summaryrefslogtreecommitdiffstats
path: root/arch/m68k/ifpsp060
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m68k/ifpsp060
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/m68k/ifpsp060')
-rw-r--r--arch/m68k/ifpsp060/CHANGES120
-rw-r--r--arch/m68k/ifpsp060/MISC201
-rw-r--r--arch/m68k/ifpsp060/Makefile10
-rw-r--r--arch/m68k/ifpsp060/README71
-rw-r--r--arch/m68k/ifpsp060/TEST.DOC208
-rw-r--r--arch/m68k/ifpsp060/fplsp.doc231
-rw-r--r--arch/m68k/ifpsp060/fplsp.sa1946
-rw-r--r--arch/m68k/ifpsp060/fpsp.doc295
-rw-r--r--arch/m68k/ifpsp060/fpsp.sa3401
-rw-r--r--arch/m68k/ifpsp060/fskeleton.S342
-rw-r--r--arch/m68k/ifpsp060/ftest.sa371
-rw-r--r--arch/m68k/ifpsp060/ilsp.doc150
-rw-r--r--arch/m68k/ifpsp060/ilsp.sa101
-rw-r--r--arch/m68k/ifpsp060/iskeleton.S349
-rw-r--r--arch/m68k/ifpsp060/isp.doc218
-rw-r--r--arch/m68k/ifpsp060/isp.sa392
-rw-r--r--arch/m68k/ifpsp060/itest.sa1281
-rw-r--r--arch/m68k/ifpsp060/os.S396
-rw-r--r--arch/m68k/ifpsp060/pfpsp.sa1730
-rw-r--r--arch/m68k/ifpsp060/src/README-SRC12
-rw-r--r--arch/m68k/ifpsp060/src/fplsp.S10980
-rw-r--r--arch/m68k/ifpsp060/src/fpsp.S24785
-rw-r--r--arch/m68k/ifpsp060/src/ftest.S1456
-rw-r--r--arch/m68k/ifpsp060/src/ilsp.S932
-rw-r--r--arch/m68k/ifpsp060/src/isp.S4299
-rw-r--r--arch/m68k/ifpsp060/src/itest.S6386
-rw-r--r--arch/m68k/ifpsp060/src/pfpsp.S14745
27 files changed, 75408 insertions, 0 deletions
diff --git a/arch/m68k/ifpsp060/CHANGES b/arch/m68k/ifpsp060/CHANGES
new file mode 100644
index 00000000000..c1e712dfc2e
--- /dev/null
+++ b/arch/m68k/ifpsp060/CHANGES
@@ -0,0 +1,120 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+CHANGES SINCE LAST RELEASE:
+---------------------------
+
+1) "movep" emulation where data was being read from memory
+was reading the intermediate bytes. Emulation now only
+reads the required bytes.
+
+2) "flogn", "flog2", and "flog10" of "1" was setting the
+Inexact FPSR bit. Emulation now does not set Inexact for
+this case.
+
+3) For an opclass three FP instruction where the effective addressing
+mode was pre-decrement or post-increment and the address register
+was A0 or A1, the address register was not being updated as a result
+of the operation. This has been corrected.
+
+4) Beta B.2 version had the following erratum:
+
+ Scenario:
+ ---------
+ If {i,d}mem_{read,write}_{byte,word,long}() returns
+ a failing value to the 68060SP, the package ignores
+ this return value and continues with program execution
+ as if it never received a failing value.
+
+ Effect:
+ -------
+ For example, if a user executed "fsin.x ADDR,fp0" where
+ ADDR should cause a "segmentation violation", the memory read
+ requested by the package should return a failing value
+ to the package. Since the package currently ignores this
+ return value, the user program will continue to the
+ next instruction, and the result created in fp0 will be
+ undefined.
+
+ Fix:
+ ----
+ This has been fixed in the current release.
+
+ Notes:
+ ------
+ Upon receiving a non-zero (failing) return value from
+ a {i,d}mem_{read,write}_{byte,word,long}() "call-out",
+ the package creates a 16-byte access error stack frame
+ from the current exception stack frame and exits
+ through the "call-out" _real_access(). This is the process
+ as described in the MC68060 User's Manual.
+
+ For instruction read access errors, the info stacked is:
+ SR = SR at time of exception
+ PC = PC of instruction being emulated
+ VOFF = $4008 (stack frame format type)
+ ADDRESS = PC of instruction being emulated
+ FSLW = FAULT STATUS LONGWORD
+
+ The valid FSLW bits are:
+ bit 27 = 1 (misaligned bit)
+ bit 24 = 1 (read)
+ bit 23 = 0 (write)
+ bit 22:21 = 10 (SIZE = word)
+ bit 20:19 = 00 (TT)
+ bit 18:16 = x10 (TM; x = 1 for supervisor mode)
+ bit 15 = 1 (IO)
+ bit 0 = 1 (Software Emulation Error)
+
+ all other bits are EQUAL TO ZERO and can be set by the _real_access()
+ "call-out" stub by the user as appropriate. The MC68060 User's Manual
+ stated that ONLY "bit 0" would be set. The 060SP attempts to set a few
+ other bits.
+
+ For data read/write access errors, the info stacked is:
+ SR = SR at time of exception
+ PC = PC of instruction being emulated
+ VOFF = $4008 (stack frame format type)
+ ADDRESS = Address of source or destination operand
+ FSLW = FAULT STATUS LONGWORD
+
+ The valid FSLW bits are:
+ bit 27 = 0 (misaligned bit)
+ bit 24 = x (read; 1 if read, 0 if write)
+ bit 23 = x (write; 1 if write, 0 if read)
+ bit 22:21 = xx (SIZE; see MC68060 User's Manual)
+ bit 20:19 = 00 (TT)
+ bit 18:16 = x01 (TM; x = 1 for supervisor mode)
+ bit 15 = 0 (IO)
+ bit 0 = 1 (Software Emulation Error)
+
+ all other bits are EQUAL TO ZERO and can be set by the _real_access()
+ "call-out" stub by the user as appropriate. The MC68060 User's Manual
+ stated that ONLY "bit 0" would be set. The 060SP attempts to set a few
+ other bits.
diff --git a/arch/m68k/ifpsp060/MISC b/arch/m68k/ifpsp060/MISC
new file mode 100644
index 00000000000..b7e644b94ae
--- /dev/null
+++ b/arch/m68k/ifpsp060/MISC
@@ -0,0 +1,201 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+RELEASE FILE VERSIONS:
+-----------------------
+
+fpsp.sa
+----------
+freal.s : 2.4
+hdr.fpu : 2.4
+x_fovfl.s : 2.16
+x_funfl.s : 2.19
+x_funsupp.s : 2.27
+x_effadd.s : 2.21
+x_foperr.s : 2.9
+x_fsnan.s : 2.12
+x_finex.s : 2.14
+x_fdz.s : 2.5
+x_fline.s : 2.5
+x_funimp.s : 2.27
+fsin.s : 2.6
+ftan.s : 2.6
+fatan.s : 2.3
+fasin.s : 2.3
+facos.s : 2.5
+fetox.s : 2.4
+fgetem.s : 2.5
+fcosh.s : 2.4
+fsinh.s : 2.5
+ftanh.s : 2.3
+flogn.s : 2.6
+fatanh.s : 2.4
+flog2.s : 2.4
+ftwotox.s : 2.4
+fmovecr.s : 2.5
+fscale.s : 2.5
+frem_mod.s : 2.6
+fkern.s : 2.6
+fkern2.s : 2.5
+fgen_except.s: 2.7
+foptbl.s : 2.3
+fmul.s : 2.5
+fin.s : 2.4
+fdiv.s : 2.5
+fneg.s : 2.4
+ftst.s : 2.3
+fint.s : 2.3
+fintrz.s : 2.3
+fabs.s : 2.4
+fcmp.s : 2.4
+fsglmul.s : 2.5
+fsgldiv.s : 2.8
+fadd.s : 2.6
+fsub.s : 2.6
+fsqrt.s : 2.4
+fmisc.s : 2.3
+fdbcc.s : 2.8
+ftrapcc.s : 2.5
+fscc.s : 2.6
+fmovm.s : 2.15
+fctrl.s : 2.6
+fcalc_ea.s : 2.7
+fmem.s : 2.9
+fout.s : 2.9
+ireg.s : 2.6
+fdenorm.s : 2.3
+fround.s : 2.4
+fnorm.s : 2.3
+foptag_set.s: 2.4
+fresult.s : 2.3
+fpack.s : 2.6
+fdecbin.s : 2.4
+fbindec.s : 2.5
+fbinstr.s : 2.3
+faccess.s : 2.3
+
+pfpsp.sa
+----------
+freal.s : 2.4
+hdr.fpu : 2.4
+x_fovfl.s : 2.16
+x_funfl.s : 2.19
+x_funsupp.s : 2.27
+x_effadd.s : 2.21
+x_foperr.s : 2.9
+x_fsnan.s : 2.12
+x_finex.s : 2.14
+x_fdz.s : 2.5
+x_fline2.s : 2.3
+fcalc_ea.s : 2.7
+foptbl2.s : 2.4
+fmovm.s : 2.15
+fctrl.s : 2.6
+fmisc.s : 2.3
+fdenorm.s : 2.3
+fround.s : 2.4
+fnorm.s : 2.3
+foptag_set.s: 2.4
+fresult.s : 2.3
+fout.s : 2.9
+fmul.s : 2.5
+fin.s : 2.4
+fdiv.s : 2.5
+fneg.s : 2.4
+ftst.s : 2.3
+fint.s : 2.3
+fintrz.s : 2.3
+fabs.s : 2.4
+fcmp.s : 2.4
+fsglmul.s : 2.5
+fsgldiv.s : 2.8
+fadd.s : 2.6
+fsub.s : 2.6
+fsqrt.s : 2.4
+ireg.s : 2.6
+fpack.s : 2.6
+fdecbin.s : 2.4
+fbindec.s : 2.5
+fbinstr.s : 2.3
+faccess.s : 2.3
+
+fplsp.sa
+----------
+lfptop.s : 2.3
+hdr.fpu : 2.4
+fsin.s : 2.6
+ftan.s : 2.6
+fatan.s : 2.3
+fasin.s : 2.3
+facos.s : 2.5
+fetox.s : 2.4
+fgetem.s : 2.5
+fcosh.s : 2.4
+fsinh.s : 2.5
+ftanh.s : 2.3
+flogn.s : 2.6
+fatanh.s : 2.4
+flog2.s : 2.4
+ftwotox.s : 2.4
+fscale.s : 2.5
+frem_mod.s : 2.6
+l_support.s : 2.15
+fnorm.s : 2.3
+
+isp.sa
+----------
+ireal.s : 2.4
+hdr.int : 2.4
+x_uieh.s : 2.13
+icalc_ea.s : 2.11
+imovep.s : 2.8
+ichk2cmp2.s : 2.6
+idiv64.s : 2.10
+imul64.s :
+icas2.s : 2.11
+icas.s : 2.12
+icas2_core.s: 2.6
+icas_core.s : 2.6
+
+ilsp.sa
+----------
+litop.s : 2.2
+l_idiv64.s : 2.8
+l_imul64.s : 2.6
+l_ichk2cmp2.s: 2.5
+
+ex. files
+----------
+wrk/fskeleton.s: 2.2
+wrk/iskeleton.s: 2.2
+wrk/os.s : 2.1
+
+tests
+----------
+itest.s : 2.2
+ftest.s : 2.1
diff --git a/arch/m68k/ifpsp060/Makefile b/arch/m68k/ifpsp060/Makefile
new file mode 100644
index 00000000000..2fe8472cb5e
--- /dev/null
+++ b/arch/m68k/ifpsp060/Makefile
@@ -0,0 +1,10 @@
+# Makefile for 680x0 Linux 68060 integer/floating point support package
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "README.legal" in the main directory of this archive
+# for more details.
+
+obj-y := fskeleton.o iskeleton.o os.o
+
+EXTRA_AFLAGS := -traditional
+EXTRA_LDFLAGS := -x
diff --git a/arch/m68k/ifpsp060/README b/arch/m68k/ifpsp060/README
new file mode 100644
index 00000000000..e3bced429bd
--- /dev/null
+++ b/arch/m68k/ifpsp060/README
@@ -0,0 +1,71 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Files in this directory:
+-------------------------
+
+fpsp.sa Full FP Kernel Module - hex image
+fpsp.s Full FP Kernel Module - source code
+fpsp.doc Full FP Kernel Module - on-line documentation
+
+pfpsp.sa Partial FP Kernel Module - hex image
+pfpsp.s Partial FP Kernel Module - source code
+
+fplsp.sa FP Library Module - hex image
+fplsp.s FP Library Module - source code
+fplsp.doc FP Library Module - on-line documentation
+
+isp.sa Integer Unimplemented Kernel Module - hex image
+isp.s Integer Unimplemented Kernel Module - source code
+isp.doc Integer Unimplemented Kernel Module - on-line doc
+
+ilsp.sa Integer Unimplemented Library Module - hex image
+ilsp.s Integer Unimplemented Library Module - source code
+ilsp.doc Integer Unimplemented Library Module - on-line doc
+
+fskeleton.s Sample Call-outs needed by fpsp.sa and pfpsp.sa
+
+iskeleton.s Sample Call-outs needed by isp.sa
+
+os.s Sample Call-outs needed by fpsp.sa, pfpsp.sa, and isp.sa
+
+ftest.sa Simple test program to test that {p}fpsp.sa
+ was connected properly; hex image
+ftest.s above test; source code
+
+itest.sa Simple test program to test that isp.sa was
+ connected properly; hex image
+itest.s above test; source code
+
+test.doc on-line documentation for {i,f}test.sa
+
+README This file
+
+ERRATA Known errata for this release
+
+MISC Release file version numbers
diff --git a/arch/m68k/ifpsp060/TEST.DOC b/arch/m68k/ifpsp060/TEST.DOC
new file mode 100644
index 00000000000..5e5900cb2dc
--- /dev/null
+++ b/arch/m68k/ifpsp060/TEST.DOC
@@ -0,0 +1,208 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+68060 SOFTWARE PACKAGE (Kernel version) SIMPLE TESTS
+-----------------------------------------------------
+
+The files itest.sa and ftest.sa contain simple tests to check
+the state of the 68060ISP and 68060FPSP once they have been installed.
+
+Release file format:
+--------------------
+The release files itest.sa and ftest.sa are essentially
+hexadecimal images of the actual tests. This format is the
+ONLY format that will be supported. The hex images were created
+by assembling the source code and then converting the resulting
+binary output images into ASCII text files. The hexadecimal
+numbers are listed using the Motorola Assembly syntax assembler
+directive "dc.l" (define constant longword). The files can be
+converted to other assembly syntaxes by using any word processor
+with a global search and replace function.
+
+To assist in assembling and linking these modules with other modules,
+the installer should add symbolic labels to the top of the files.
+This will allow the calling routines to access the entry points
+of these packages.
+
+The source code itest.s and ftest.s have been included but only
+for documentation purposes.
+
+Release file structure:
+-----------------------
+
+(top of module)
+ -----------------
+ | | - 128 byte-sized section
+ (1) | Call-Out | - 4 bytes per entry (user fills these in)
+ | |
+ -----------------
+ | | - 8 bytes per entry
+ (2) | Entry Point | - user does "bsr" or "jsr" to this address
+ | |
+ -----------------
+ | | - code section
+ (3) ~ ~
+ | |
+ -----------------
+(bottom of module)
+
+The first section of this module is the "Call-out" section. This section
+is NOT INCLUDED in {i,f}test.sa (an example "Call-out" section is provided at
+the end of this file). The purpose of this section is to allow the test
+routines to reference external printing functions that must be provided
+by the host operating system. This section MUST be exactly 128 bytes in
+size. There are 32 fields, each 4 bytes in size. Each field corresponds
+to a function required by the test packages (these functions and their
+location are listed in "68060{ISP,FPSP}-TEST call-outs" below). Each field
+entry should contain the address of the corresponding function RELATIVE to
+the starting address of the "call-out" section. The "Call-out" section must
+sit adjacent to the {i,f}test.sa image in memory. Since itest.sa and ftest.sa
+are individual tests, they each require their own "Call-out" sections.
+
+The second section, the "Entry-point" section, is used by external routines
+to access the test routines. Since the {i,f}test.sa hex files contain
+no symbol names, this section contains function entry points that are fixed
+with respect to the top of the package. The currently defined entry-points
+are listed in section "68060{ISP,FPSP}-TEST entry points" below. A calling
+routine would simply execute a "bsr" or "jsr" that jumped to the selected
+function entry-point.
+
+For example, to run the 060ISP test, write a program that includes the
+itest.sa data and execute something similar to:
+
+ bsr _060ISP_TEST+128+0
+
+(_060ISP_TEST is the starting address of the "Call-out" section; the "Call-out"
+section is 128 bytes long; and the 68060ISP test entry point is located
+0 bytes from the top of the "Entry-point" section.)
+
+The third section is the code section. After entering through an "Entry-point",
+the entry code jumps to the appropriate test code within the code section.
+
+68060ISP-TEST Call-outs:
+------------------------
+0x0: _print_string()
+0x4: _print_number()
+
+68060FPSP-TEST Call-outs:
+-------------------------
+0x0: _print_string()
+0x4: _print_number()
+
+The test packages call _print_string() and _print_number()
+as subroutines and expect the main program to print a string
+or a number to a file or to the screen.
+In "C"-like fashion, the test program calls:
+
+ print_string("Test passed");
+
+ or
+
+ print_number(20);
+
+For _print_string(), the test programs pass a longword address
+of the string on the stack. For _print_number(), the test programs pass
+a longword number to be printed.
+
+For debugging purposes, after the main program performs a "print"
+for a test package, it should flush the output so that it's not
+buffered. In this way, if the test program crashes, at least the previous
+statements printed will be seen.
+
+68060ISP-TEST Entry-points:
+---------------------------
+0x0: integer test
+
+68060FPSP-TEST Entry-points:
+----------------------------
+0x00: main fp test
+0x08: FP unimplemented test
+0x10: FP enabled snan/operr/ovfl/unfl/dz/inex
+
+The floating-point unit test has 3 entry points which will require
+3 different calls to the package if each of the three following tests
+is desired:
+
+main fp test: tests (1) unimp effective address exception
+ (2) unsupported data type exceptions
+ (3) non-maskable overflow/underflow exceptions
+
+FP unimplemented: tests FP unimplemented exception. this one is
+ separate from the previous tests for systems that don't
+ want FP unimplemented instructions.
+
+FP enabled: tests enabled snan/operr/ovfl/unfl/dz/inex.
+ basically, it enables each of these exceptions and forces
+ each using an implemented FP instruction. this process
+ exercises _fpsp_{snan,operr,ovfl,unfl,dz,inex}() and
+ _real_{snan,operr,ovfl,unfl,dz,inex}(). the test expects
+ _real_XXXX() to do nothing except clear the exception
+ and "rte". if a system's _real_XXXX() handler creates an
+ alternate result, the test will print "failed" but this
+ is acceptable.
+
+Miscellaneous:
+--------------
+Again, itest.sa and ftest.sa are simple tests and do not thoroughly
+test all 68060SP connections. For example, they do not test connections
+to _real_access(), _real_trace(), _real_trap(), etc. because these
+will be system-implemented several different ways and the test packages
+must remain system independent.
+
+Example test package set-up:
+----------------------------
+_print_str:
+ . # provided by system
+ rts
+
+_print_num:
+ . # provided by system
+ rts
+
+ .
+ .
+ bsr _060FPSP_TEST+128+0
+ .
+ .
+ rts
+
+# beginning of "Call-out" section; provided by integrator.
+# MUST be 128 bytes long.
+_060FPSP_TEST:
+ long _print_str - _060FPSP_TEST
+ long _print_num - _060FPSP_TEST
+ space 120
+
+# ftest.sa starts here; start of "Entry-point" section.
+ long 0x60ff0000, 0x00002346
+ long 0x60ff0000, 0x00018766
+ long 0x60ff0000, 0x00023338
+ long 0x24377299, 0xab2643ea
+ .
+ .
+ .
diff --git a/arch/m68k/ifpsp060/fplsp.doc b/arch/m68k/ifpsp060/fplsp.doc
new file mode 100644
index 00000000000..fb637c43676
--- /dev/null
+++ b/arch/m68k/ifpsp060/fplsp.doc
@@ -0,0 +1,231 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+68060 FLOATING-POINT SOFTWARE PACKAGE (Library version)
+--------------------------------------------------------
+
+The file fplsp.sa contains the "Library version" of the
+68060SP Floating-Point Software Package. The routines
+included in this module can be used to emulate the
+FP instructions not implemented in 68060 hardware. These
+instructions normally take exception vector #11
+"FP Unimplemented Instruction".
+
+By re-compiling a program that uses these instructions, and
+making subroutine calls in place of the unimplemented
+instructions, a program can avoid the overhead associated
+with taking the exception.
+
+Release file format:
+--------------------
+The file fplsp.sa is essentially a hexadecimal image of the
+release package. This is the ONLY format which will be supported.
+The hex image was created by assembling the source code and
+then converting the resulting binary output image into an
+ASCII text file. The hexadecimal numbers are listed
+using the Motorola Assembly Syntax assembler directive "dc.l"
+(define constant longword). The file can be converted to other
+assembly syntaxes by using any word processor with a global
+search and replace function.
+
+To assist in assembling and linking this module with other modules,
+the installer should add a symbolic label to the top of the file.
+This will allow calling routines to access the entry points
+of this package.
+
+The source code fplsp.s has also been included but only for
+documentation purposes.
+
+Release file structure:
+-----------------------
+The file fplsp.sa contains an "Entry-Point" section and a
+code section. The FPLSP has no "Call-Out" section. The first section
+is the "Entry-Point" section. In order to access a function in the
+package, a program must "bsr" or "jsr" to the location listed
+below in "68060FPLSP entry points" that corresponds to the desired
+function. A branch instruction located at the selected entry point
+within the package will then enter the correct emulation code routine.
+
+The entry point addresses at the beginning of the package will remain
+fixed so that a program calling the routines will not have to be
+re-compiled with every new 68060FPLSP release.
+
+There are 3 entry-points for each instruction type: single precision,
+double precision, and extended precision.
+
+As an example, the "fsin" library instruction can be passed an
+extended precision operand if program executes:
+
+# fsin.x fp0
+
+ fmovm.x &0x01,-(%sp) # pass operand on stack
+ bsr.l _060FPLSP_TOP+0x1a8 # branch to fsin routine
+ add.l &0xc,%sp # clear operand from stack
+
+Upon return, fp0 holds the correct result. The FPSR is
+set correctly. The FPCR is unchanged. The FPIAR is undefined.
+
+Another example. This time, a dyadic operation:
+
+# frem.s %fp1,%fp0
+
+ fmov.s %fp1,-(%sp) # pass src operand
+ fmov.s %fp0,-(%sp) # pass dst operand
+ bsr.l _060FPLSP_TOP+0x168 # branch to frem routine
+ addq.l &0x8,%sp # clear operands from stack
+
+Again, the result is returned in fp0. Note that BOTH operands
+are passed in single precision format.
+
+Exception reporting:
+--------------------
+The package takes exceptions according to the FPCR value upon subroutine
+entry. If an exception should be reported, then the package forces
+this exception using implemented floating-point instructions.
+For example, if the instruction being emulated should cause a
+floating-point Operand Error exception, then the library routine
+executes an FMUL of a zero and an infinity to force the OPERR
+exception. Although the FPIAR will be undefined for the enabled
+Operand Error exception handler, the user will at least be able
+to record that the event occurred.
+
+Miscellaneous:
+--------------
+The package does not attempt to correctly emulate instructions
+with Signalling NAN inputs. Use of SNANs should be avoided with
+this package.
+
+The fabs/fadd/fdiv/fint/fintrz/fmul/fneg/fsqrt/fsub entry points
+are provided for the convenience of older compilers that make
+subroutine calls for all fp instructions. The code does NOT emulate
+the instruction but rather simply executes it.
+
+68060FPLSP entry points:
+------------------------
+_060FPLSP_TOP:
+0x000: _060LSP__facoss_
+0x008: _060LSP__facosd_
+0x010: _060LSP__facosx_
+0x018: _060LSP__fasins_
+0x020: _060LSP__fasind_
+0x028: _060LSP__fasinx_
+0x030: _060LSP__fatans_
+0x038: _060LSP__fatand_
+0x040: _060LSP__fatanx_
+0x048: _060LSP__fatanhs_
+0x050: _060LSP__fatanhd_
+0x058: _060LSP__fatanhx_
+0x060: _060LSP__fcoss_
+0x068: _060LSP__fcosd_
+0x070: _060LSP__fcosx_
+0x078: _060LSP__fcoshs_
+0x080: _060LSP__fcoshd_
+0x088: _060LSP__fcoshx_
+0x090: _060LSP__fetoxs_
+0x098: _060LSP__fetoxd_
+0x0a0: _060LSP__fetoxx_
+0x0a8: _060LSP__fetoxm1s_
+0x0b0: _060LSP__fetoxm1d_
+0x0b8: _060LSP__fetoxm1x_
+0x0c0: _060LSP__fgetexps_
+0x0c8: _060LSP__fgetexpd_
+0x0d0: _060LSP__fgetexpx_
+0x0d8: _060LSP__fgetmans_
+0x0e0: _060LSP__fgetmand_
+0x0e8: _060LSP__fgetmanx_
+0x0f0: _060LSP__flog10s_
+0x0f8: _060LSP__flog10d_
+0x100: _060LSP__flog10x_
+0x108: _060LSP__flog2s_
+0x110: _060LSP__flog2d_
+0x118: _060LSP__flog2x_
+0x120: _060LSP__flogns_
+0x128: _060LSP__flognd_
+0x130: _060LSP__flognx_
+0x138: _060LSP__flognp1s_
+0x140: _060LSP__flognp1d_
+0x148: _060LSP__flognp1x_
+0x150: _060LSP__fmods_
+0x158: _060LSP__fmodd_
+0x160: _060LSP__fmodx_
+0x168: _060LSP__frems_
+0x170: _060LSP__fremd_
+0x178: _060LSP__fremx_
+0x180: _060LSP__fscales_
+0x188: _060LSP__fscaled_
+0x190: _060LSP__fscalex_
+0x198: _060LSP__fsins_
+0x1a0: _060LSP__fsind_
+0x1a8: _060LSP__fsinx_
+0x1b0: _060LSP__fsincoss_
+0x1b8: _060LSP__fsincosd_
+0x1c0: _060LSP__fsincosx_
+0x1c8: _060LSP__fsinhs_
+0x1d0: _060LSP__fsinhd_
+0x1d8: _060LSP__fsinhx_
+0x1e0: _060LSP__ftans_
+0x1e8: _060LSP__ftand_
+0x1f0: _060LSP__ftanx_
+0x1f8: _060LSP__ftanhs_
+0x200: _060LSP__ftanhd_
+0x208: _060LSP__ftanhx_
+0x210: _060LSP__ftentoxs_
+0x218: _060LSP__ftentoxd_
+0x220: _060LSP__ftentoxx_
+0x228: _060LSP__ftwotoxs_
+0x230: _060LSP__ftwotoxd_
+0x238: _060LSP__ftwotoxx_
+
+0x240: _060LSP__fabss_
+0x248: _060LSP__fabsd_
+0x250: _060LSP__fabsx_
+0x258: _060LSP__fadds_
+0x260: _060LSP__faddd_
+0x268: _060LSP__faddx_
+0x270: _060LSP__fdivs_
+0x278: _060LSP__fdivd_
+0x280: _060LSP__fdivx_
+0x288: _060LSP__fints_
+0x290: _060LSP__fintd_
+0x298: _060LSP__fintx_
+0x2a0: _060LSP__fintrzs_
+0x2a8: _060LSP__fintrzd_
+0x2b0: _060LSP__fintrzx_
+0x2b8: _060LSP__fmuls_
+0x2c0: _060LSP__fmuld_
+0x2c8: _060LSP__fmulx_
+0x2d0: _060LSP__fnegs_
+0x2d8: _060LSP__fnegd_
+0x2e0: _060LSP__fnegx_
+0x2e8: _060LSP__fsqrts_
+0x2f0: _060LSP__fsqrtd_
+0x2f8: _060LSP__fsqrtx_
+0x300: _060LSP__fsubs_
+0x308: _060LSP__fsubd_
+0x310: _060LSP__fsubx_
diff --git a/arch/m68k/ifpsp060/fplsp.sa b/arch/m68k/ifpsp060/fplsp.sa
new file mode 100644
index 00000000000..8826df0329e
--- /dev/null
+++ b/arch/m68k/ifpsp060/fplsp.sa
@@ -0,0 +1,1946 @@
+ dc.l $60ff0000,$238e0000,$60ff0000,$24200000
+ dc.l $60ff0000,$24b60000,$60ff0000,$11060000
+ dc.l $60ff0000,$11980000,$60ff0000,$122e0000
+ dc.l $60ff0000,$0f160000,$60ff0000,$0fa80000
+ dc.l $60ff0000,$103e0000,$60ff0000,$12ae0000
+ dc.l $60ff0000,$13400000,$60ff0000,$13d60000
+ dc.l $60ff0000,$05ae0000,$60ff0000,$06400000
+ dc.l $60ff0000,$06d60000,$60ff0000,$213e0000
+ dc.l $60ff0000,$21d00000,$60ff0000,$22660000
+ dc.l $60ff0000,$16160000,$60ff0000,$16a80000
+ dc.l $60ff0000,$173e0000,$60ff0000,$0aee0000
+ dc.l $60ff0000,$0b800000,$60ff0000,$0c160000
+ dc.l $60ff0000,$24a60000,$60ff0000,$25380000
+ dc.l $60ff0000,$25ce0000,$60ff0000,$26660000
+ dc.l $60ff0000,$26f80000,$60ff0000,$278e0000
+ dc.l $60ff0000,$1d160000,$60ff0000,$1da80000
+ dc.l $60ff0000,$1e3e0000,$60ff0000,$1ed60000
+ dc.l $60ff0000,$1f680000,$60ff0000,$1ffe0000
+ dc.l $60ff0000,$1b0e0000,$60ff0000,$1ba00000
+ dc.l $60ff0000,$1c360000,$60ff0000,$08860000
+ dc.l $60ff0000,$09180000,$60ff0000,$09ae0000
+ dc.l $60ff0000,$2bf00000,$60ff0000,$2ca40000
+ dc.l $60ff0000,$2d580000,$60ff0000,$29980000
+ dc.l $60ff0000,$2a4c0000,$60ff0000,$2b000000
+ dc.l $60ff0000,$2e000000,$60ff0000,$2eb40000
+ dc.l $60ff0000,$2f680000,$60ff0000,$029e0000
+ dc.l $60ff0000,$03300000,$60ff0000,$03c60000
+ dc.l $60ff0000,$27660000,$60ff0000,$27fe0000
+ dc.l $60ff0000,$289a0000,$60ff0000,$061e0000
+ dc.l $60ff0000,$06b00000,$60ff0000,$07460000
+ dc.l $60ff0000,$12ee0000,$60ff0000,$13800000
+ dc.l $60ff0000,$14160000,$60ff0000,$0b760000
+ dc.l $60ff0000,$0c080000,$60ff0000,$0c9e0000
+ dc.l $60ff0000,$18460000,$60ff0000,$18d80000
+ dc.l $60ff0000,$196e0000,$60ff0000,$16560000
+ dc.l $60ff0000,$16e80000,$60ff0000,$177e0000
+ dc.l $60ff0000,$72fe0000,$60ff0000,$72fe0000
+ dc.l $60ff0000,$72fe0000,$60ff0000,$71be0000
+ dc.l $60ff0000,$71d40000,$60ff0000,$71ea0000
+ dc.l $60ff0000,$72840000,$60ff0000,$729a0000
+ dc.l $60ff0000,$72b00000,$60ff0000,$72fe0000
+ dc.l $60ff0000,$72fe0000,$60ff0000,$72fe0000
+ dc.l $60ff0000,$72fe0000,$60ff0000,$72fe0000
+ dc.l $60ff0000,$72fe0000,$60ff0000,$71f20000
+ dc.l $60ff0000,$72080000,$60ff0000,$721e0000
+ dc.l $60ff0000,$72860000,$60ff0000,$72860000
+ dc.l $60ff0000,$72860000,$60ff0000,$72860000
+ dc.l $60ff0000,$72860000,$60ff0000,$72860000
+ dc.l $60ff0000,$71600000,$60ff0000,$71760000
+ dc.l $60ff0000,$718c0000,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $40c62d38,$d3d64634,$3d6f90ae,$b1e75cc7
+ dc.l $40000000,$c90fdaa2,$2168c235,$00000000
+ dc.l $3fff0000,$c90fdaa2,$2168c235,$00000000
+ dc.l $3fe45f30,$6dc9c883,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00006c76,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$2ddc6030,$0c010001
+ dc.l $660861ff,$00007124,$60220c01,$00026608
+ dc.l $61ff0000,$6d226014,$0c010003,$660861ff
+ dc.l $00006f4c,$600661ff,$00002f8e,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$6bdc1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$00002d3e,$60300c01,$00016608
+ dc.l $61ff0000,$70866022,$0c010002,$660861ff
+ dc.l $00006c84,$60140c01,$00036608,$61ff0000
+ dc.l $6eae6006,$61ff0000,$2ef04cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$6b381d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$00002c9e,$60300c01,$00016608
+ dc.l $61ff0000,$6fe66022,$0c010002,$660861ff
+ dc.l $00006be4,$60140c01,$00036608,$61ff0000
+ dc.l $6e0e6006,$61ff0000,$2e504cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $00006a9e,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $2c0e6030,$0c010001,$660861ff,$00006fc8
+ dc.l $60220c01,$00026608,$61ff0000,$6b4a6014
+ dc.l $0c010003,$660861ff,$00006d74,$600661ff
+ dc.l $00002dbc,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$6a041d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$00002b70
+ dc.l $60300c01,$00016608,$61ff0000,$6f2a6022
+ dc.l $0c010002,$660861ff,$00006aac,$60140c01
+ dc.l $00036608,$61ff0000,$6cd66006,$61ff0000
+ dc.l $2d1e4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $69601d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$00002ad0
+ dc.l $60300c01,$00016608,$61ff0000,$6e8a6022
+ dc.l $0c010002,$660861ff,$00006a0c,$60140c01
+ dc.l $00036608,$61ff0000,$6c366006,$61ff0000
+ dc.l $2c7e4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$000068c6,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$4e686030,$0c010001
+ dc.l $660861ff,$00006d74,$60220c01,$00026608
+ dc.l $61ff0000,$6d946014,$0c010003,$660861ff
+ dc.l $00006b9c,$600661ff,$00004f14,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$682c1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$00004dca,$60300c01,$00016608
+ dc.l $61ff0000,$6cd66022,$0c010002,$660861ff
+ dc.l $00006cf6,$60140c01,$00036608,$61ff0000
+ dc.l $6afe6006,$61ff0000,$4e764cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$67881d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$00004d2a,$60300c01,$00016608
+ dc.l $61ff0000,$6c366022,$0c010002,$660861ff
+ dc.l $00006c56,$60140c01,$00036608,$61ff0000
+ dc.l $6a5e6006,$61ff0000,$4dd64cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $000066ee,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $59b26030,$0c010001,$660861ff,$00006b9c
+ dc.l $60220c01,$00026608,$61ff0000,$6bf26014
+ dc.l $0c010003,$660861ff,$000069c4,$600661ff
+ dc.l $00005ad4,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$66541d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$00005914
+ dc.l $60300c01,$00016608,$61ff0000,$6afe6022
+ dc.l $0c010002,$660861ff,$00006b54,$60140c01
+ dc.l $00036608,$61ff0000,$69266006,$61ff0000
+ dc.l $5a364cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $65b01d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$00005874
+ dc.l $60300c01,$00016608,$61ff0000,$6a5e6022
+ dc.l $0c010002,$660861ff,$00006ab4,$60140c01
+ dc.l $00036608,$61ff0000,$68866006,$61ff0000
+ dc.l $59964cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00006516,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$46c46030,$0c010001
+ dc.l $660861ff,$000069c4,$60220c01,$00026608
+ dc.l $61ff0000,$6a246014,$0c010003,$660861ff
+ dc.l $000067ec,$600661ff,$00004948,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$647c1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$00004626,$60300c01,$00016608
+ dc.l $61ff0000,$69266022,$0c010002,$660861ff
+ dc.l $00006986,$60140c01,$00036608,$61ff0000
+ dc.l $674e6006,$61ff0000,$48aa4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$63d81d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$00004586,$60300c01,$00016608
+ dc.l $61ff0000,$68866022,$0c010002,$660861ff
+ dc.l $000068e6,$60140c01,$00036608,$61ff0000
+ dc.l $66ae6006,$61ff0000,$480a4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $0000633e,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $49c46030,$0c010001,$660861ff,$000067ec
+ dc.l $60220c01,$00026608,$61ff0000,$68546014
+ dc.l $0c010003,$660861ff,$00006614,$600661ff
+ dc.l $00004afa,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$62a41d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$00004926
+ dc.l $60300c01,$00016608,$61ff0000,$674e6022
+ dc.l $0c010002,$660861ff,$000067b6,$60140c01
+ dc.l $00036608,$61ff0000,$65766006,$61ff0000
+ dc.l $4a5c4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $62001d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$00004886
+ dc.l $60300c01,$00016608,$61ff0000,$66ae6022
+ dc.l $0c010002,$660861ff,$00006716,$60140c01
+ dc.l $00036608,$61ff0000,$64d66006,$61ff0000
+ dc.l $49bc4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00006166,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$391c6030,$0c010001
+ dc.l $660861ff,$00006614,$60220c01,$00026608
+ dc.l $61ff0000,$66b86014,$0c010003,$660861ff
+ dc.l $0000643c,$600661ff,$00003b28,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$60cc1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$0000387e,$60300c01,$00016608
+ dc.l $61ff0000,$65766022,$0c010002,$660861ff
+ dc.l $0000661a,$60140c01,$00036608,$61ff0000
+ dc.l $639e6006,$61ff0000,$3a8a4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$60281d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$000037de,$60300c01,$00016608
+ dc.l $61ff0000,$64d66022,$0c010002,$660861ff
+ dc.l $0000657a,$60140c01,$00036608,$61ff0000
+ dc.l $62fe6006,$61ff0000,$39ea4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $00005f8e,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $39886030,$0c010001,$660861ff,$0000643c
+ dc.l $60220c01,$00026608,$61ff0000,$603a6014
+ dc.l $0c010003,$660861ff,$00006264,$600661ff
+ dc.l $00003a04,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$5ef41d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$000038ea
+ dc.l $60300c01,$00016608,$61ff0000,$639e6022
+ dc.l $0c010002,$660861ff,$00005f9c,$60140c01
+ dc.l $00036608,$61ff0000,$61c66006,$61ff0000
+ dc.l $39664cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $5e501d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$0000384a
+ dc.l $60300c01,$00016608,$61ff0000,$62fe6022
+ dc.l $0c010002,$660861ff,$00005efc,$60140c01
+ dc.l $00036608,$61ff0000,$61266006,$61ff0000
+ dc.l $38c64cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00005db6,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$51d46030,$0c010001
+ dc.l $660861ff,$00006264,$60220c01,$00026608
+ dc.l $61ff0000,$5e626014,$0c010003,$660861ff
+ dc.l $0000608c,$600661ff,$00005224,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$5d1c1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$00005136,$60300c01,$00016608
+ dc.l $61ff0000,$61c66022,$0c010002,$660861ff
+ dc.l $00005dc4,$60140c01,$00036608,$61ff0000
+ dc.l $5fee6006,$61ff0000,$51864cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$5c781d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$00005096,$60300c01,$00016608
+ dc.l $61ff0000,$61266022,$0c010002,$660861ff
+ dc.l $00005d24,$60140c01,$00036608,$61ff0000
+ dc.l $5f4e6006,$61ff0000,$50e64cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $00005bde,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $28066030,$0c010001,$660861ff,$0000608c
+ dc.l $60220c01,$00026608,$61ff0000,$5c8a6014
+ dc.l $0c010003,$660861ff,$00005eb4,$600661ff
+ dc.l $00002938,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$5b441d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$00002768
+ dc.l $60300c01,$00016608,$61ff0000,$5fee6022
+ dc.l $0c010002,$660861ff,$00005bec,$60140c01
+ dc.l $00036608,$61ff0000,$5e166006,$61ff0000
+ dc.l $289a4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $5aa01d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$000026c8
+ dc.l $60300c01,$00016608,$61ff0000,$5f4e6022
+ dc.l $0c010002,$660861ff,$00005b4c,$60140c01
+ dc.l $00036608,$61ff0000,$5d766006,$61ff0000
+ dc.l $27fa4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00005a06,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$39e46030,$0c010001
+ dc.l $660861ff,$00005f30,$60220c01,$00026608
+ dc.l $61ff0000,$5f026014,$0c010003,$660861ff
+ dc.l $00005cdc,$600661ff,$00003b5e,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$596c1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$00003946,$60300c01,$00016608
+ dc.l $61ff0000,$5e926022,$0c010002,$660861ff
+ dc.l $00005e64,$60140c01,$00036608,$61ff0000
+ dc.l $5c3e6006,$61ff0000,$3ac04cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$58c81d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$000038a6,$60300c01,$00016608
+ dc.l $61ff0000,$5df26022,$0c010002,$660861ff
+ dc.l $00005dc4,$60140c01,$00036608,$61ff0000
+ dc.l $5b9e6006,$61ff0000,$3a204cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $0000582e,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $522e6030,$0c010001,$660861ff,$00005d58
+ dc.l $60220c01,$00026608,$61ff0000,$5d2a6014
+ dc.l $0c010003,$660861ff,$00005b04,$600661ff
+ dc.l $000052d6,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$57941d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$00005190
+ dc.l $60300c01,$00016608,$61ff0000,$5cba6022
+ dc.l $0c010002,$660861ff,$00005c8c,$60140c01
+ dc.l $00036608,$61ff0000,$5a666006,$61ff0000
+ dc.l $52384cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $56f01d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$000050f0
+ dc.l $60300c01,$00016608,$61ff0000,$5c1a6022
+ dc.l $0c010002,$660861ff,$00005bec,$60140c01
+ dc.l $00036608,$61ff0000,$59c66006,$61ff0000
+ dc.l $51984cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00005656,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$514e6030,$0c010001
+ dc.l $660861ff,$00005b80,$60220c01,$00026608
+ dc.l $61ff0000,$5b526014,$0c010003,$660861ff
+ dc.l $0000592c,$600661ff,$0000524c,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$55bc1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$000050b0,$60300c01,$00016608
+ dc.l $61ff0000,$5ae26022,$0c010002,$660861ff
+ dc.l $00005ab4,$60140c01,$00036608,$61ff0000
+ dc.l $588e6006,$61ff0000,$51ae4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$55181d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$00005010,$60300c01,$00016608
+ dc.l $61ff0000,$5a426022,$0c010002,$660861ff
+ dc.l $00005a14,$60140c01,$00036608,$61ff0000
+ dc.l $57ee6006,$61ff0000,$510e4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $0000547e,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $45026030,$0c010001,$660861ff,$000054c8
+ dc.l $60220c01,$00026608,$61ff0000,$59826014
+ dc.l $0c010003,$660861ff,$00005754,$600661ff
+ dc.l $00004682,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$53e41d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$00004464
+ dc.l $60300c01,$00016608,$61ff0000,$542a6022
+ dc.l $0c010002,$660861ff,$000058e4,$60140c01
+ dc.l $00036608,$61ff0000,$56b66006,$61ff0000
+ dc.l $45e44cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $53401d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$000043c4
+ dc.l $60300c01,$00016608,$61ff0000,$538a6022
+ dc.l $0c010002,$660861ff,$00005844,$60140c01
+ dc.l $00036608,$61ff0000,$56166006,$61ff0000
+ dc.l $45444cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$000052a6,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$476c6030,$0c010001
+ dc.l $660861ff,$000052f0,$60220c01,$00026608
+ dc.l $61ff0000,$57aa6014,$0c010003,$660861ff
+ dc.l $0000557c,$600661ff,$0000476a,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$520c1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$000046ce,$60300c01,$00016608
+ dc.l $61ff0000,$52526022,$0c010002,$660861ff
+ dc.l $0000570c,$60140c01,$00036608,$61ff0000
+ dc.l $54de6006,$61ff0000,$46cc4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$51681d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$0000462e,$60300c01,$00016608
+ dc.l $61ff0000,$51b26022,$0c010002,$660861ff
+ dc.l $0000566c,$60140c01,$00036608,$61ff0000
+ dc.l $543e6006,$61ff0000,$462c4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $000050ce,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $45e46030,$0c010001,$660861ff,$00005118
+ dc.l $60220c01,$00026608,$61ff0000,$55d26014
+ dc.l $0c010003,$660861ff,$000053a4,$600661ff
+ dc.l $0000460c,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$50341d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$00004546
+ dc.l $60300c01,$00016608,$61ff0000,$507a6022
+ dc.l $0c010002,$660861ff,$00005534,$60140c01
+ dc.l $00036608,$61ff0000,$53066006,$61ff0000
+ dc.l $456e4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $4f901d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$000044a6
+ dc.l $60300c01,$00016608,$61ff0000,$4fda6022
+ dc.l $0c010002,$660861ff,$00005494,$60140c01
+ dc.l $00036608,$61ff0000,$52666006,$61ff0000
+ dc.l $44ce4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00004ef6,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$33da6030,$0c010001
+ dc.l $660861ff,$00005420,$60220c01,$00026608
+ dc.l $61ff0000,$53ca6014,$0c010003,$660861ff
+ dc.l $000051cc,$600661ff,$0000344c,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$4e5c1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$0000333c,$60300c01,$00016608
+ dc.l $61ff0000,$53826022,$0c010002,$660861ff
+ dc.l $0000532c,$60140c01,$00036608,$61ff0000
+ dc.l $512e6006,$61ff0000,$33ae4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$4db81d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$0000329c,$60300c01,$00016608
+ dc.l $61ff0000,$52e26022,$0c010002,$660861ff
+ dc.l $0000528c,$60140c01,$00036608,$61ff0000
+ dc.l $508e6006,$61ff0000,$330e4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $00004d1e,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $27cc6030,$0c010001,$660861ff,$00005284
+ dc.l $60220c01,$00026608,$61ff0000,$4dca6014
+ dc.l $0c010003,$660861ff,$00004ff4,$600661ff
+ dc.l $0000282a,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$4c841d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$0000272e
+ dc.l $60300c01,$00016608,$61ff0000,$51e66022
+ dc.l $0c010002,$660861ff,$00004d2c,$60140c01
+ dc.l $00036608,$61ff0000,$4f566006,$61ff0000
+ dc.l $278c4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $4be01d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$0000268e
+ dc.l $60300c01,$00016608,$61ff0000,$51466022
+ dc.l $0c010002,$660861ff,$00004c8c,$60140c01
+ dc.l $00036608,$61ff0000,$4eb66006,$61ff0000
+ dc.l $26ec4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00004b46,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$2fb06030,$0c010001
+ dc.l $660861ff,$00004ff4,$60220c01,$00026608
+ dc.l $61ff0000,$4bf26014,$0c010003,$660861ff
+ dc.l $00004e1c,$600661ff,$00002f9a,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e5400,$0008f22e,$6800ff6c,$41eeff6c
+ dc.l $61ff0000,$4aac1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff631d41,$ff4e4a01
+ dc.l $660861ff,$00002f12,$60300c01,$00016608
+ dc.l $61ff0000,$4f566022,$0c010002,$660861ff
+ dc.l $00004b54,$60140c01,$00036608,$61ff0000
+ dc.l $4d7e6006,$61ff0000,$2efc4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$000041ee
+ dc.l $ff6c216e,$00080000,$216e000c,$0004216e
+ dc.l $00100008,$61ff0000,$4a081d40,$ff4e1200
+ dc.l $02ae00ff,$00ffff64,$4280102e,$ff634a01
+ dc.l $660861ff,$00002e72,$60300c01,$00016608
+ dc.l $61ff0000,$4eb66022,$0c010002,$660861ff
+ dc.l $00004ab4,$60140c01,$00036608,$61ff0000
+ dc.l $4cde6006,$61ff0000,$2e5c4cee,$0303ff9c
+ dc.l $f22e9800,$ff60f22e,$d040ffe8,$4e5e4e75
+ dc.l $4e56ff40,$48ee0303,$ff9cf22e,$b800ff60
+ dc.l $f22ef0c0,$ffdcf23c,$90000000,$0000f22e
+ dc.l $44000008,$f22e6800,$ff6c41ee,$ff6c61ff
+ dc.l $0000496e,$1d40ff4e,$120002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$4a016608,$61ff0000
+ dc.l $2e0c6030,$0c010001,$660861ff,$00004e1c
+ dc.l $60220c01,$00026608,$61ff0000,$4a1a6014
+ dc.l $0c010003,$660861ff,$00004c44,$600661ff
+ dc.l $00002e08,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$f22e5400,$0008f22e
+ dc.l $6800ff6c,$41eeff6c,$61ff0000,$48d41d40
+ dc.l $ff4e1200,$02ae00ff,$00ffff64,$4280102e
+ dc.l $ff631d41,$ff4e4a01,$660861ff,$00002d6e
+ dc.l $60300c01,$00016608,$61ff0000,$4d7e6022
+ dc.l $0c010002,$660861ff,$0000497c,$60140c01
+ dc.l $00036608,$61ff0000,$4ba66006,$61ff0000
+ dc.l $2d6a4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$000041ee,$ff6c216e,$00080000
+ dc.l $216e000c,$0004216e,$00100008,$61ff0000
+ dc.l $48301d40,$ff4e1200,$02ae00ff,$00ffff64
+ dc.l $4280102e,$ff634a01,$660861ff,$00002cce
+ dc.l $60300c01,$00016608,$61ff0000,$4cde6022
+ dc.l $0c010002,$660861ff,$000048dc,$60140c01
+ dc.l $00036608,$61ff0000,$4b066006,$61ff0000
+ dc.l $2cca4cee,$0303ff9c,$f22e9800,$ff60f22e
+ dc.l $d040ffe8,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$44000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00004796,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $4a016608,$61ff0000,$0af46030,$0c010001
+ dc.l $660861ff,$00004d18,$60220c01,$00026608
+ dc.l $61ff0000,$4d386014,$0c010003,$660861ff
+ dc.l $00004d34,$600661ff,$00000d58,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f227e003,$f21fd040
+ dc.l $f21fd080,$4e5e4e75,$4e56ff40,$48ee0303
+ dc.l $ff9cf22e,$b800ff60,$f22ef0c0,$ffdcf23c
+ dc.l $90000000,$0000f22e,$54000008,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$000046f6,$1d40ff4e
+ dc.l $120002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $1d41ff4e,$4a016608,$61ff0000,$0a506030
+ dc.l $0c010001,$660861ff,$00004c74,$60220c01
+ dc.l $00026608,$61ff0000,$4c946014,$0c010003
+ dc.l $660861ff,$00004c90,$600661ff,$00000cb4
+ dc.l $4cee0303,$ff9cf22e,$9800ff60,$f227e003
+ dc.l $f21fd040,$f21fd080,$4e5e4e75,$4e56ff40
+ dc.l $48ee0303,$ff9cf22e,$b800ff60,$f22ef0c0
+ dc.l $ffdcf23c,$90000000,$000041ee,$ff6c216e
+ dc.l $00080000,$216e000c,$0004216e,$00100008
+ dc.l $61ff0000,$464c1d40,$ff4e1200,$02ae00ff
+ dc.l $00ffff64,$4280102e,$ff634a01,$660861ff
+ dc.l $000009aa,$60300c01,$00016608,$61ff0000
+ dc.l $4bce6022,$0c010002,$660861ff,$00004bee
+ dc.l $60140c01,$00036608,$61ff0000,$4bea6006
+ dc.l $61ff0000,$0c0e4cee,$0303ff9c,$f22e9800
+ dc.l $ff60f227,$e003f21f,$d040f21f,$d0804e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e4400,$0008f22e,$6800ff78,$41eeff78
+ dc.l $61ff0000,$45ac1d40,$ff4ff22e,$4400000c
+ dc.l $f22e6800,$ff6c41ee,$ff6c61ff,$00004592
+ dc.l $1d40ff4e,$220002ae,$00ff00ff,$ff644280
+ dc.l $102eff63,$41eeff6c,$43eeff78,$4a016608
+ dc.l $61ff0000,$4c466030,$0c010001,$660861ff
+ dc.l $00004c64,$60220c01,$00026608,$61ff0000
+ dc.l $4c846014,$0c010003,$660861ff,$00004d16
+ dc.l $600661ff,$00004c14,$4cee0303,$ff9cf22e
+ dc.l $9800ff60,$f22ed040,$ffe84e5e,$4e754e56
+ dc.l $ff4048ee,$0303ff9c,$f22eb800,$ff60f22e
+ dc.l $f0c0ffdc,$f23c9000,$00000000,$f22e5400
+ dc.l $0008f22e,$6800ff78,$41eeff78,$61ff0000
+ dc.l $44f01d40,$ff4ff22e,$54000010,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$000044d6,$1d40ff4e
+ dc.l $220002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $41eeff6c,$43eeff78,$4a016608,$61ff0000
+ dc.l $4b8a6030,$0c010001,$660861ff,$00004ba8
+ dc.l $60220c01,$00026608,$61ff0000,$4bc86014
+ dc.l $0c010003,$660861ff,$00004c5a,$600661ff
+ dc.l $00004b58,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$41eeff78,$216e0008
+ dc.l $0000216e,$000c0004,$216e0010,$000861ff
+ dc.l $0000442e,$1d40ff4f,$41eeff6c,$216e0014
+ dc.l $0000216e,$00180004,$216e001c,$000861ff
+ dc.l $0000440e,$1d40ff4e,$220002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$41eeff6c,$43eeff78
+ dc.l $4a016608,$61ff0000,$4ac26030,$0c010001
+ dc.l $660861ff,$00004ae0,$60220c01,$00026608
+ dc.l $61ff0000,$4b006014,$0c010003,$660861ff
+ dc.l $00004b92,$600661ff,$00004a90,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e4400,$0008f22e,$6800ff78,$41eeff78
+ dc.l $61ff0000,$436c1d40,$ff4ff22e,$4400000c
+ dc.l $f22e6800,$ff6c41ee,$ff6c61ff,$00004352
+ dc.l $1d40ff4e,$220002ae,$00ff00ff,$ff644280
+ dc.l $102eff63,$41eeff6c,$43eeff78,$4a016608
+ dc.l $61ff0000,$491c6030,$0c010001,$660861ff
+ dc.l $0000493a,$60220c01,$00026608,$61ff0000
+ dc.l $495a6014,$0c010003,$660861ff,$00004ad6
+ dc.l $600661ff,$000048ea,$4cee0303,$ff9cf22e
+ dc.l $9800ff60,$f22ed040,$ffe84e5e,$4e754e56
+ dc.l $ff4048ee,$0303ff9c,$f22eb800,$ff60f22e
+ dc.l $f0c0ffdc,$f23c9000,$00000000,$f22e5400
+ dc.l $0008f22e,$6800ff78,$41eeff78,$61ff0000
+ dc.l $42b01d40,$ff4ff22e,$54000010,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00004296,$1d40ff4e
+ dc.l $220002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $41eeff6c,$43eeff78,$4a016608,$61ff0000
+ dc.l $48606030,$0c010001,$660861ff,$0000487e
+ dc.l $60220c01,$00026608,$61ff0000,$489e6014
+ dc.l $0c010003,$660861ff,$00004a1a,$600661ff
+ dc.l $0000482e,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$41eeff78,$216e0008
+ dc.l $0000216e,$000c0004,$216e0010,$000861ff
+ dc.l $000041ee,$1d40ff4f,$41eeff6c,$216e0014
+ dc.l $0000216e,$00180004,$216e001c,$000861ff
+ dc.l $000041ce,$1d40ff4e,$220002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$41eeff6c,$43eeff78
+ dc.l $4a016608,$61ff0000,$47986030,$0c010001
+ dc.l $660861ff,$000047b6,$60220c01,$00026608
+ dc.l $61ff0000,$47d66014,$0c010003,$660861ff
+ dc.l $00004952,$600661ff,$00004766,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e754e56,$ff4048ee,$0303ff9c,$f22eb800
+ dc.l $ff60f22e,$f0c0ffdc,$f23c9000,$00000000
+ dc.l $f22e4400,$0008f22e,$6800ff78,$41eeff78
+ dc.l $61ff0000,$412c1d40,$ff4ff22e,$4400000c
+ dc.l $f22e6800,$ff6c41ee,$ff6c61ff,$00004112
+ dc.l $1d40ff4e,$220002ae,$00ff00ff,$ff644280
+ dc.l $102eff63,$41eeff6c,$43eeff78,$4a016608
+ dc.l $61ff0000,$484a6030,$0c010001,$660861ff
+ dc.l $0000486a,$60220c01,$00026608,$61ff0000
+ dc.l $488a6014,$0c010003,$660861ff,$00004896
+ dc.l $600661ff,$00004818,$4cee0303,$ff9cf22e
+ dc.l $9800ff60,$f22ed040,$ffe84e5e,$4e754e56
+ dc.l $ff4048ee,$0303ff9c,$f22eb800,$ff60f22e
+ dc.l $f0c0ffdc,$f23c9000,$00000000,$f22e5400
+ dc.l $0008f22e,$6800ff78,$41eeff78,$61ff0000
+ dc.l $40701d40,$ff4ff22e,$54000010,$f22e6800
+ dc.l $ff6c41ee,$ff6c61ff,$00004056,$1d40ff4e
+ dc.l $220002ae,$00ff00ff,$ff644280,$102eff63
+ dc.l $41eeff6c,$43eeff78,$4a016608,$61ff0000
+ dc.l $478e6030,$0c010001,$660861ff,$000047ae
+ dc.l $60220c01,$00026608,$61ff0000,$47ce6014
+ dc.l $0c010003,$660861ff,$000047da,$600661ff
+ dc.l $0000475c,$4cee0303,$ff9cf22e,$9800ff60
+ dc.l $f22ed040,$ffe84e5e,$4e754e56,$ff4048ee
+ dc.l $0303ff9c,$f22eb800,$ff60f22e,$f0c0ffdc
+ dc.l $f23c9000,$00000000,$41eeff78,$216e0008
+ dc.l $0000216e,$000c0004,$216e0010,$000861ff
+ dc.l $00003fae,$1d40ff4f,$41eeff6c,$216e0014
+ dc.l $0000216e,$00180004,$216e001c,$000861ff
+ dc.l $00003f8e,$1d40ff4e,$220002ae,$00ff00ff
+ dc.l $ff644280,$102eff63,$41eeff6c,$43eeff78
+ dc.l $4a016608,$61ff0000,$46c66030,$0c010001
+ dc.l $660861ff,$000046e6,$60220c01,$00026608
+ dc.l $61ff0000,$47066014,$0c010003,$660861ff
+ dc.l $00004712,$600661ff,$00004694,$4cee0303
+ dc.l $ff9cf22e,$9800ff60,$f22ed040,$ffe84e5e
+ dc.l $4e75bd6a,$aa77ccc9,$94f53de6,$12097aae
+ dc.l $8da1be5a,$e6452a11,$8ae43ec7,$1de3a534
+ dc.l $1531bf2a,$01a01a01,$8b590000,$00000000
+ dc.l $00003ff8,$00008888,$88888888,$59af0000
+ dc.l $0000bffc,$0000aaaa,$aaaaaaaa,$aa990000
+ dc.l $00003d2a,$c4d0d601,$1ee3bda9,$396f9f45
+ dc.l $ac193e21,$eed90612,$c972be92,$7e4fb79d
+ dc.l $9fcf3efa,$01a01a01,$d4230000,$00000000
+ dc.l $0000bff5,$0000b60b,$60b60b61,$d4380000
+ dc.l $00003ffa,$0000aaaa,$aaaaaaaa,$ab5ebf00
+ dc.l $00002d7c,$00000000,$ff5c6008,$2d7c0000
+ dc.l $0001ff5c,$f2104800,$f22e6800,$ff842210
+ dc.l $32280004,$02817fff,$ffff0c81,$3fd78000
+ dc.l $6c046000,$01780c81,$4004bc7e,$6d046000
+ dc.l $0468f200,$0080f23a,$54a3d186,$43fb0170
+ dc.l $00000866,$f22e6080,$ff58222e,$ff58e981
+ dc.l $d3c1f219,$4828f211,$4428222e,$ff58d2ae
+ dc.l $ff5ce299,$0c810000,$00006d00,$0088f227
+ dc.l $e00cf22e,$6800ff84,$f2000023,$f23a5580
+ dc.l $fed2f23a,$5500fed4,$f2000080,$f20004a3
+ dc.l $e2990281,$80000000,$b3aeff84,$f20005a3
+ dc.l $f2000523,$f23a55a2,$febaf23a,$5522febc
+ dc.l $f20005a3,$f2000523,$f23a55a2,$feb6f23a
+ dc.l $4922fec0,$f2000ca3,$f2000123,$f23a48a2
+ dc.l $fec2f22e,$4823ff84,$f20008a2,$f2000423
+ dc.l $f21fd030,$f2009000,$f22e4822,$ff8460ff
+ dc.l $00004006,$f227e00c,$f2000023,$f23a5500
+ dc.l $fea2f23a,$5580fea4,$f2000080,$f20004a3
+ dc.l $f22e6800,$ff84e299,$02818000,$0000f200
+ dc.l $0523b3ae,$ff840281,$80000000,$f20005a3
+ dc.l $00813f80,$00002d41,$ff54f23a,$5522fe74
+ dc.l $f23a55a2,$fe76f200,$0523f200,$05a3f23a
+ dc.l $5522fe70,$f23a49a2,$fe7af200,$0523f200
+ dc.l $0ca3f23a,$4922fe7c,$f23a44a2,$fe82f200
+ dc.l $0823f200,$0422f22e,$4823ff84,$f21fd030
+ dc.l $f2009000,$f22e4422,$ff5460ff,$00003f6a
+ dc.l $0c813fff,$80006eff,$00000300,$222eff5c
+ dc.l $0c810000,$00006e14,$f2009000,$123c0003
+ dc.l $f22e4800,$ff8460ff,$00003f36,$f23c4400
+ dc.l $3f800000,$f2009000,$f23c4422,$80800000
+ dc.l $60ff0000,$3f2c60ff,$00003f64,$f23c4400
+ dc.l $3f800000,$60ff0000,$3f182d7c,$00000004
+ dc.l $ff5cf210,$4800f22e,$6800ff84,$22103228
+ dc.l $00040281,$7fffffff,$0c813fd7,$80006c04
+ dc.l $60000240,$0c814004,$bc7e6d04,$6000027a
+ dc.l $f2000080,$f23a54a3,$cf9843fb,$01700000
+ dc.l $0678f22e,$6080ff58,$222eff58,$e981d3c1
+ dc.l $f2194828,$f2114428,$222eff58,$e2990c81
+ dc.l $00000000,$6c000106,$f227e004,$f22e6800
+ dc.l $ff84f200,$0023f23a,$5480fce8,$f23a5500
+ dc.l $fd32f200,$00a3f200,$01232f02,$2401e29a
+ dc.l $02828000,$0000b382,$02828000,$0000f23a
+ dc.l $54a2fcc8,$f23a5522,$fd12f200,$00a3b5ae
+ dc.l $ff84241f,$f2000123,$e2990281,$80000000
+ dc.l $2d7c3f80,$0000ff54,$b3aeff54,$f23a54a2
+ dc.l $fca2f23a,$5522fcec,$f20000a3,$f2000123
+ dc.l $f22e6800,$ff90f23a,$54a2fc90,$b3aeff90
+ dc.l $f23a5522,$fcd6f200,$00a3f200,$0123f23a
+ dc.l $54a2fc80,$f23a5522,$fccaf200,$00a3f200
+ dc.l $0123f23a,$48a2fc7c,$f23a4922,$fcc6f200
+ dc.l $00a3f200,$0123f23a,$48a2fc78,$f23a4922
+ dc.l $fcc2f200,$00a3f200,$0823f22e,$48a3ff84
+ dc.l $f23a4422,$fcbaf22e,$4823ff90,$f21fd020
+ dc.l $f2009000,$f22e48a2,$ff8461ff,$00003e22
+ dc.l $f22e4422,$ff5460ff,$00003d9e,$f227e004
+ dc.l $f22e6800,$ff84f200,$0023f23a,$5480fc34
+ dc.l $f23a5500,$fbdef200,$00a3f22e,$6800ff90
+ dc.l $f2000123,$e2990281,$80000000,$f23a54a2
+ dc.l $fc1af23a,$5522fbc4,$b3aeff84,$b3aeff90
+ dc.l $f20000a3,$00813f80,$00002d41,$ff54f200
+ dc.l $0123f23a,$54a2fbfc,$f23a5522,$fba6f200
+ dc.l $00a3f200,$0123f23a,$54a2fbf0,$f23a5522
+ dc.l $fb9af200,$00a3f200,$0123f23a,$54a2fbe4
+ dc.l $f23a5522,$fb8ef200,$00a3f200,$0123f23a
+ dc.l $48a2fbe0,$f23a4922,$fb8af200,$00a3f200
+ dc.l $0123f23a,$48a2fbdc,$f23a4922,$fb86f200
+ dc.l $00a3f200,$0823f23a,$44a2fbd4,$f22e4823
+ dc.l $ff84f22e,$48a3ff90,$f21fd020,$f2009000
+ dc.l $f22e44a2,$ff5461ff,$00003d36,$f22e4822
+ dc.l $ff8460ff,$00003cb2,$0c813fff,$80006e00
+ dc.l $0048f23c,$44803f80,$0000f200,$9000f23c
+ dc.l $44a80080,$000061ff,$00003d06,$f200b000
+ dc.l $123c0003,$f22e4800,$ff8460ff,$00003c72
+ dc.l $2f00f23c,$44803f80,$000061ff,$00003ce2
+ dc.l $201f60ff,$00003ca8,$f227e03c,$2f02f23c
+ dc.l $44800000,$00000c81,$7ffeffff,$66523d7c
+ dc.l $7ffeff84,$2d7cc90f,$daa2ff88,$42aeff8c
+ dc.l $3d7c7fdc,$ff902d7c,$85a308d3,$ff9442ae
+ dc.l $ff98f200,$003af294,$000e002e,$0080ff84
+ dc.l $002e0080,$ff90f22e,$4822ff84,$f2000080
+ dc.l $f22e4822,$ff90f200,$00a8f22e,$48a2ff90
+ dc.l $f22e6800,$ff84322e,$ff842241,$02810000
+ dc.l $7fff0481,$00003fff,$0c810000,$001c6f0e
+ dc.l $04810000,$001b1d7c,$0000ff58,$60084281
+ dc.l $1d7c0001,$ff58243c,$00003ffe,$94812d7c
+ dc.l $a2f9836e,$ff882d7c,$4e44152a,$ff8c3d42
+ dc.l $ff84f200,$0100f22e,$4923ff84,$24094842
+ dc.l $02828000,$00000082,$5f000000,$2d42ff54
+ dc.l $f22e4522,$ff54f22e,$4528ff54,$24010682
+ dc.l $00003fff,$3d42ff84,$2d7cc90f,$daa2ff88
+ dc.l $42aeff8c,$06810000,$3fdd3d41,$ff902d7c
+ dc.l $85a308d3,$ff9442ae,$ff98122e,$ff58f200
+ dc.l $0a00f22e,$4a23ff84,$f2000a80,$f22e4aa3
+ dc.l $ff90f200,$1180f200,$15a2f200,$0e28f200
+ dc.l $0c28f200,$1622f200,$0180f200,$10a8f200
+ dc.l $04220c01,$00006e00,$000ef200,$01a8f200
+ dc.l $0ca26000,$ff0cf22e,$6100ff58,$241ff21f
+ dc.l $d03c222e,$ff5c0c81,$00000004,$6d00fa4c
+ dc.l $6000fc36,$3ea0b759,$f50f8688,$bef2baa5
+ dc.l $a8924f04,$bf346f59,$b39ba65f,$00000000
+ dc.l $00000000,$3ff60000,$e073d3fc,$199c4a00
+ dc.l $00000000,$3ff90000,$d23cd684,$15d95fa1
+ dc.l $00000000,$bffc0000,$8895a6c5,$fb423bca
+ dc.l $00000000,$bffd0000,$eef57e0d,$a84bc8ce
+ dc.l $00000000,$3ffc0000,$a2f9836e,$4e44152a
+ dc.l $00000000,$40010000,$c90fdaa2,$00000000
+ dc.l $00000000,$3fdf0000,$85a308d4,$00000000
+ dc.l $00000000,$c0040000,$c90fdaa2,$2168c235
+ dc.l $21800000,$c0040000,$c2c75bcd,$105d7c23
+ dc.l $a0d00000,$c0040000,$bc7edcf7,$ff523611
+ dc.l $a1e80000,$c0040000,$b6365e22,$ee46f000
+ dc.l $21480000,$c0040000,$afeddf4d,$dd3ba9ee
+ dc.l $a1200000,$c0040000,$a9a56078,$cc3063dd
+ dc.l $21fc0000,$c0040000,$a35ce1a3,$bb251dcb
+ dc.l $21100000,$c0040000,$9d1462ce,$aa19d7b9
+ dc.l $a1580000,$c0040000,$96cbe3f9,$990e91a8
+ dc.l $21e00000,$c0040000,$90836524,$88034b96
+ dc.l $20b00000,$c0040000,$8a3ae64f,$76f80584
+ dc.l $a1880000,$c0040000,$83f2677a,$65ecbf73
+ dc.l $21c40000,$c0030000,$fb53d14a,$a9c2f2c2
+ dc.l $20000000,$c0030000,$eec2d3a0,$87ac669f
+ dc.l $21380000,$c0030000,$e231d5f6,$6595da7b
+ dc.l $a1300000,$c0030000,$d5a0d84c,$437f4e58
+ dc.l $9fc00000,$c0030000,$c90fdaa2,$2168c235
+ dc.l $21000000,$c0030000,$bc7edcf7,$ff523611
+ dc.l $a1680000,$c0030000,$afeddf4d,$dd3ba9ee
+ dc.l $a0a00000,$c0030000,$a35ce1a3,$bb251dcb
+ dc.l $20900000,$c0030000,$96cbe3f9,$990e91a8
+ dc.l $21600000,$c0030000,$8a3ae64f,$76f80584
+ dc.l $a1080000,$c0020000,$fb53d14a,$a9c2f2c2
+ dc.l $1f800000,$c0020000,$e231d5f6,$6595da7b
+ dc.l $a0b00000,$c0020000,$c90fdaa2,$2168c235
+ dc.l $20800000,$c0020000,$afeddf4d,$dd3ba9ee
+ dc.l $a0200000,$c0020000,$96cbe3f9,$990e91a8
+ dc.l $20e00000,$c0010000,$fb53d14a,$a9c2f2c2
+ dc.l $1f000000,$c0010000,$c90fdaa2,$2168c235
+ dc.l $20000000,$c0010000,$96cbe3f9,$990e91a8
+ dc.l $20600000,$c0000000,$c90fdaa2,$2168c235
+ dc.l $1f800000,$bfff0000,$c90fdaa2,$2168c235
+ dc.l $1f000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$3fff0000,$c90fdaa2,$2168c235
+ dc.l $9f000000,$40000000,$c90fdaa2,$2168c235
+ dc.l $9f800000,$40010000,$96cbe3f9,$990e91a8
+ dc.l $a0600000,$40010000,$c90fdaa2,$2168c235
+ dc.l $a0000000,$40010000,$fb53d14a,$a9c2f2c2
+ dc.l $9f000000,$40020000,$96cbe3f9,$990e91a8
+ dc.l $a0e00000,$40020000,$afeddf4d,$dd3ba9ee
+ dc.l $20200000,$40020000,$c90fdaa2,$2168c235
+ dc.l $a0800000,$40020000,$e231d5f6,$6595da7b
+ dc.l $20b00000,$40020000,$fb53d14a,$a9c2f2c2
+ dc.l $9f800000,$40030000,$8a3ae64f,$76f80584
+ dc.l $21080000,$40030000,$96cbe3f9,$990e91a8
+ dc.l $a1600000,$40030000,$a35ce1a3,$bb251dcb
+ dc.l $a0900000,$40030000,$afeddf4d,$dd3ba9ee
+ dc.l $20a00000,$40030000,$bc7edcf7,$ff523611
+ dc.l $21680000,$40030000,$c90fdaa2,$2168c235
+ dc.l $a1000000,$40030000,$d5a0d84c,$437f4e58
+ dc.l $1fc00000,$40030000,$e231d5f6,$6595da7b
+ dc.l $21300000,$40030000,$eec2d3a0,$87ac669f
+ dc.l $a1380000,$40030000,$fb53d14a,$a9c2f2c2
+ dc.l $a0000000,$40040000,$83f2677a,$65ecbf73
+ dc.l $a1c40000,$40040000,$8a3ae64f,$76f80584
+ dc.l $21880000,$40040000,$90836524,$88034b96
+ dc.l $a0b00000,$40040000,$96cbe3f9,$990e91a8
+ dc.l $a1e00000,$40040000,$9d1462ce,$aa19d7b9
+ dc.l $21580000,$40040000,$a35ce1a3,$bb251dcb
+ dc.l $a1100000,$40040000,$a9a56078,$cc3063dd
+ dc.l $a1fc0000,$40040000,$afeddf4d,$dd3ba9ee
+ dc.l $21200000,$40040000,$b6365e22,$ee46f000
+ dc.l $a1480000,$40040000,$bc7edcf7,$ff523611
+ dc.l $21e80000,$40040000,$c2c75bcd,$105d7c23
+ dc.l $20d00000,$40040000,$c90fdaa2,$2168c235
+ dc.l $a1800000,$f2104800,$22103228,$00040281
+ dc.l $7fffffff,$0c813fd7,$80006c04,$60000134
+ dc.l $0c814004,$bc7e6d04,$60000144,$f2000080
+ dc.l $f23a54a3,$c6dc43fa,$fdbcf201,$6080e981
+ dc.l $d3c1f219,$4828f211,$4428ea99,$02818000
+ dc.l $0000f227,$e00c0c81,$00000000,$6d000072
+ dc.l $f2000080,$f20004a3,$f23a5580,$faf8f23a
+ dc.l $5500fafa,$f20005a3,$f2000523,$f23a55a2
+ dc.l $faf4f23a,$4922fafe,$f20005a3,$f2000523
+ dc.l $f23a49a2,$fb00f23a,$4922fb0a,$f20005a3
+ dc.l $f2000523,$f23a49a2,$fb0cf200,$0123f200
+ dc.l $0ca3f200,$0822f23c,$44a23f80,$0000f21f
+ dc.l $d030f200,$9000f200,$042060ff,$0000357a
+ dc.l $f2000080,$f2000023,$f23a5580,$fa88f23a
+ dc.l $5500fa8a,$f20001a3,$f2000123,$f23a55a2
+ dc.l $fa84f23a,$4922fa8e,$f20001a3,$f2000123
+ dc.l $f23a49a2,$fa90f23a,$4922fa9a,$f20001a3
+ dc.l $f2000123,$f23a49a2,$fa9cf200,$0523f200
+ dc.l $0c23f200,$08a2f23c,$44223f80,$0000f21f
+ dc.l $d030f227,$68800a97,$80000000,$f2009000
+ dc.l $f21f4820,$60ff0000,$35000c81,$3fff8000
+ dc.l $6e1cf227,$6800f200,$9000123c,$0003f21f
+ dc.l $480060ff,$000034da,$60ff0000,$3522f227
+ dc.l $e03c2f02,$f23c4480,$00000000,$0c817ffe
+ dc.l $ffff6652,$3d7c7ffe,$ff842d7c,$c90fdaa2
+ dc.l $ff8842ae,$ff8c3d7c,$7fdcff90,$2d7c85a3
+ dc.l $08d3ff94,$42aeff98,$f200003a,$f294000e
+ dc.l $002e0080,$ff84002e,$0080ff90,$f22e4822
+ dc.l $ff84f200,$0080f22e,$4822ff90,$f20000a8
+ dc.l $f22e48a2,$ff90f22e,$6800ff84,$322eff84
+ dc.l $22410281,$00007fff,$04810000,$3fff0c81
+ dc.l $0000001c,$6f0e0481,$0000001b,$1d7c0000
+ dc.l $ff586008,$42811d7c,$0001ff58,$243c0000
+ dc.l $3ffe9481,$2d7ca2f9,$836eff88,$2d7c4e44
+ dc.l $152aff8c,$3d42ff84,$f2000100,$f22e4923
+ dc.l $ff842409,$48420282,$80000000,$00825f00
+ dc.l $00002d42,$ff54f22e,$4522ff54,$f22e4528
+ dc.l $ff542401,$06820000,$3fff3d42,$ff842d7c
+ dc.l $c90fdaa2,$ff8842ae,$ff8c0681,$00003fdd
+ dc.l $3d41ff90,$2d7c85a3,$08d3ff94,$42aeff98
+ dc.l $122eff58,$f2000a00,$f22e4a23,$ff84f200
+ dc.l $0a80f22e,$4aa3ff90,$f2001180,$f20015a2
+ dc.l $f2000e28,$f2000c28,$f2001622,$f2000180
+ dc.l $f20010a8,$f2000422,$0c010000,$6e00000e
+ dc.l $f20001a8,$f2000ca2,$6000ff0c,$f22e6100
+ dc.l $ff54241f,$f21fd03c,$222eff54,$e2996000
+ dc.l $fd72bff6,$687e3149,$87d84002,$ac6934a2
+ dc.l $6db3bfc2,$476f4e1d,$a28e3fb3,$44447f87
+ dc.l $6989bfb7,$44ee7faf,$45db3fbc,$71c64694
+ dc.l $0220bfc2,$49249218,$72f93fc9,$99999999
+ dc.l $8fa9bfd5,$55555555,$5555bfb7,$0bf39853
+ dc.l $9e6a3fbc,$7187962d,$1d7dbfc2,$49248271
+ dc.l $07b83fc9,$99999996,$263ebfd5,$55555555
+ dc.l $55363fff,$0000c90f,$daa22168,$c2350000
+ dc.l $0000bfff,$0000c90f,$daa22168,$c2350000
+ dc.l $00000001,$00008000,$00000000,$00000000
+ dc.l $00008001,$00008000,$00000000,$00000000
+ dc.l $00003ffb,$000083d1,$52c5060b,$7a510000
+ dc.l $00003ffb,$00008bc8,$54456549,$8b8b0000
+ dc.l $00003ffb,$000093be,$40601762,$6b0d0000
+ dc.l $00003ffb,$00009bb3,$078d35ae,$c2020000
+ dc.l $00003ffb,$0000a3a6,$9a525ddc,$e7de0000
+ dc.l $00003ffb,$0000ab98,$e9436276,$56190000
+ dc.l $00003ffb,$0000b389,$e502f9c5,$98620000
+ dc.l $00003ffb,$0000bb79,$7e436b09,$e6fb0000
+ dc.l $00003ffb,$0000c367,$a5c739e5,$f4460000
+ dc.l $00003ffb,$0000cb54,$4c61cff7,$d5c60000
+ dc.l $00003ffb,$0000d33f,$62f82488,$533e0000
+ dc.l $00003ffb,$0000db28,$da816240,$4c770000
+ dc.l $00003ffb,$0000e310,$a4078ad3,$4f180000
+ dc.l $00003ffb,$0000eaf6,$b0a8188e,$e1eb0000
+ dc.l $00003ffb,$0000f2da,$f1949dbe,$79d50000
+ dc.l $00003ffb,$0000fabd,$581361d4,$7e3e0000
+ dc.l $00003ffc,$00008346,$ac210959,$ecc40000
+ dc.l $00003ffc,$00008b23,$2a083042,$82d80000
+ dc.l $00003ffc,$000092fb,$70b8d29a,$e2f90000
+ dc.l $00003ffc,$00009acf,$476f5ccd,$1cb40000
+ dc.l $00003ffc,$0000a29e,$76304954,$f23f0000
+ dc.l $00003ffc,$0000aa68,$c5d08ab8,$52300000
+ dc.l $00003ffc,$0000b22d,$fffd9d53,$9f830000
+ dc.l $00003ffc,$0000b9ed,$ef453e90,$0ea50000
+ dc.l $00003ffc,$0000c1a8,$5f1cc75e,$3ea50000
+ dc.l $00003ffc,$0000c95d,$1be82813,$8de60000
+ dc.l $00003ffc,$0000d10b,$f300840d,$2de40000
+ dc.l $00003ffc,$0000d8b4,$b2ba6bc0,$5e7a0000
+ dc.l $00003ffc,$0000e057,$2a6bb423,$35f60000
+ dc.l $00003ffc,$0000e7f3,$2a70ea9c,$aa8f0000
+ dc.l $00003ffc,$0000ef88,$843264ec,$efaa0000
+ dc.l $00003ffc,$0000f717,$0a28ecc0,$66660000
+ dc.l $00003ffd,$0000812f,$d288332d,$ad320000
+ dc.l $00003ffd,$000088a8,$d1b1218e,$4d640000
+ dc.l $00003ffd,$00009012,$ab3f23e4,$aee80000
+ dc.l $00003ffd,$0000976c,$c3d411e7,$f1b90000
+ dc.l $00003ffd,$00009eb6,$89493889,$a2270000
+ dc.l $00003ffd,$0000a5ef,$72c34487,$361b0000
+ dc.l $00003ffd,$0000ad17,$00baf07a,$72270000
+ dc.l $00003ffd,$0000b42c,$bcfafd37,$efb70000
+ dc.l $00003ffd,$0000bb30,$3a940ba8,$0f890000
+ dc.l $00003ffd,$0000c221,$15c6fcae,$bbaf0000
+ dc.l $00003ffd,$0000c8fe,$f3e68633,$12210000
+ dc.l $00003ffd,$0000cfc9,$8330b400,$0c700000
+ dc.l $00003ffd,$0000d680,$7aa1102c,$5bf90000
+ dc.l $00003ffd,$0000dd23,$99bc3125,$2aa30000
+ dc.l $00003ffd,$0000e3b2,$a8556b8f,$c5170000
+ dc.l $00003ffd,$0000ea2d,$764f6431,$59890000
+ dc.l $00003ffd,$0000f3bf,$5bf8bad1,$a21d0000
+ dc.l $00003ffe,$0000801c,$e39e0d20,$5c9a0000
+ dc.l $00003ffe,$00008630,$a2dada1e,$d0660000
+ dc.l $00003ffe,$00008c1a,$d445f3e0,$9b8c0000
+ dc.l $00003ffe,$000091db,$8f1664f3,$50e20000
+ dc.l $00003ffe,$00009773,$1420365e,$538c0000
+ dc.l $00003ffe,$00009ce1,$c8e6a0b8,$cdba0000
+ dc.l $00003ffe,$0000a228,$32dbcada,$ae090000
+ dc.l $00003ffe,$0000a746,$f2ddb760,$22940000
+ dc.l $00003ffe,$0000ac3e,$c0fb997d,$d6a20000
+ dc.l $00003ffe,$0000b110,$688aebdc,$6f6a0000
+ dc.l $00003ffe,$0000b5bc,$c49059ec,$c4b00000
+ dc.l $00003ffe,$0000ba44,$bc7dd470,$782f0000
+ dc.l $00003ffe,$0000bea9,$4144fd04,$9aac0000
+ dc.l $00003ffe,$0000c2eb,$4abb6616,$28b60000
+ dc.l $00003ffe,$0000c70b,$d54ce602,$ee140000
+ dc.l $00003ffe,$0000cd00,$0549adec,$71590000
+ dc.l $00003ffe,$0000d484,$57d2d8ea,$4ea30000
+ dc.l $00003ffe,$0000db94,$8da712de,$ce3b0000
+ dc.l $00003ffe,$0000e238,$55f969e8,$096a0000
+ dc.l $00003ffe,$0000e877,$1129c435,$32590000
+ dc.l $00003ffe,$0000ee57,$c16e0d37,$9c0d0000
+ dc.l $00003ffe,$0000f3e1,$0211a87c,$37790000
+ dc.l $00003ffe,$0000f919,$039d758b,$8d410000
+ dc.l $00003ffe,$0000fe05,$8b8f6493,$5fb30000
+ dc.l $00003fff,$00008155,$fb497b68,$5d040000
+ dc.l $00003fff,$00008388,$9e3549d1,$08e10000
+ dc.l $00003fff,$0000859c,$fa76511d,$724b0000
+ dc.l $00003fff,$00008795,$2ecfff81,$31e70000
+ dc.l $00003fff,$00008973,$2fd19557,$641b0000
+ dc.l $00003fff,$00008b38,$cad10193,$2a350000
+ dc.l $00003fff,$00008ce7,$a8d8301e,$e6b50000
+ dc.l $00003fff,$00008f46,$a39e2eae,$52810000
+ dc.l $00003fff,$0000922d,$a7d79188,$84870000
+ dc.l $00003fff,$000094d1,$9fcbdedf,$52410000
+ dc.l $00003fff,$0000973a,$b94419d2,$a08b0000
+ dc.l $00003fff,$0000996f,$f00e08e1,$0b960000
+ dc.l $00003fff,$00009b77,$3f951232,$1da70000
+ dc.l $00003fff,$00009d55,$cc320f93,$56240000
+ dc.l $00003fff,$00009f10,$0575006c,$c5710000
+ dc.l $00003fff,$0000a0a9,$c290d97c,$c06c0000
+ dc.l $00003fff,$0000a226,$59ebebc0,$630a0000
+ dc.l $00003fff,$0000a388,$b4aff6ef,$0ec90000
+ dc.l $00003fff,$0000a4d3,$5f1061d2,$92c40000
+ dc.l $00003fff,$0000a608,$95dcfbe3,$187e0000
+ dc.l $00003fff,$0000a72a,$51dc7367,$beac0000
+ dc.l $00003fff,$0000a83a,$51530956,$168f0000
+ dc.l $00003fff,$0000a93a,$20077539,$546e0000
+ dc.l $00003fff,$0000aa9e,$7245023b,$26050000
+ dc.l $00003fff,$0000ac4c,$84ba6fe4,$d58f0000
+ dc.l $00003fff,$0000adce,$4a4a606b,$97120000
+ dc.l $00003fff,$0000af2a,$2dcd8d26,$3c9c0000
+ dc.l $00003fff,$0000b065,$6f81f222,$65c70000
+ dc.l $00003fff,$0000b184,$65150f71,$496a0000
+ dc.l $00003fff,$0000b28a,$aa156f9a,$da350000
+ dc.l $00003fff,$0000b37b,$44ff3766,$b8950000
+ dc.l $00003fff,$0000b458,$c3dce963,$04330000
+ dc.l $00003fff,$0000b525,$529d5622,$46bd0000
+ dc.l $00003fff,$0000b5e2,$cca95f9d,$88cc0000
+ dc.l $00003fff,$0000b692,$cada7aca,$1ada0000
+ dc.l $00003fff,$0000b736,$aea7a692,$58380000
+ dc.l $00003fff,$0000b7cf,$ab287e9f,$7b360000
+ dc.l $00003fff,$0000b85e,$cc66cb21,$98350000
+ dc.l $00003fff,$0000b8e4,$fd5a20a5,$93da0000
+ dc.l $00003fff,$0000b99f,$41f64aff,$9bb50000
+ dc.l $00003fff,$0000ba7f,$1e17842b,$be7b0000
+ dc.l $00003fff,$0000bb47,$12857637,$e17d0000
+ dc.l $00003fff,$0000bbfa,$be8a4788,$df6f0000
+ dc.l $00003fff,$0000bc9d,$0fad2b68,$9d790000
+ dc.l $00003fff,$0000bd30,$6a39471e,$cd860000
+ dc.l $00003fff,$0000bdb6,$c731856a,$f18a0000
+ dc.l $00003fff,$0000be31,$cac502e8,$0d700000
+ dc.l $00003fff,$0000bea2,$d55ce331,$94e20000
+ dc.l $00003fff,$0000bf0b,$10b7c031,$28f00000
+ dc.l $00003fff,$0000bf6b,$7a18dacb,$778d0000
+ dc.l $00003fff,$0000bfc4,$ea4663fa,$18f60000
+ dc.l $00003fff,$0000c018,$1bde8b89,$a4540000
+ dc.l $00003fff,$0000c065,$b066cfbf,$64390000
+ dc.l $00003fff,$0000c0ae,$345f5634,$0ae60000
+ dc.l $00003fff,$0000c0f2,$22919cb9,$e6a70000
+ dc.l $0000f210,$48002210,$32280004,$f22e6800
+ dc.l $ff840281,$7fffffff,$0c813ffb,$80006c04
+ dc.l $600000d0,$0c814002,$ffff6f04,$6000014c
+ dc.l $02aef800,$0000ff88,$00ae0400,$0000ff88
+ dc.l $2d7c0000,$0000ff8c,$f2000080,$f22e48a3
+ dc.l $ff84f22e,$4828ff84,$f23c44a2,$3f800000
+ dc.l $f2000420,$2f022401,$02810000,$78000282
+ dc.l $7fff0000,$04823ffb,$0000e282,$d282ee81
+ dc.l $43faf780,$d3c12d59,$ff902d59,$ff942d59
+ dc.l $ff98222e,$ff840281,$80000000,$83aeff90
+ dc.l $241ff227,$e004f200,$0080f200,$04a3f23a
+ dc.l $5500f6a0,$f2000522,$f2000523,$f20000a3
+ dc.l $f23a5522,$f696f23a,$54a3f698,$f20008a3
+ dc.l $f2000422,$f21fd020,$f2009000,$f22e4822
+ dc.l $ff9060ff,$000029d2,$0c813fff,$80006e00
+ dc.l $008a0c81,$3fd78000,$6d00006c,$f227e00c
+ dc.l $f2000023,$f2000080,$f20004a3,$f23a5500
+ dc.l $f65af23a,$5580f65c,$f2000523,$f20005a3
+ dc.l $f23a5522,$f656f23a,$55a2f658,$f2000523
+ dc.l $f2000ca3,$f23a5522,$f652f23a,$54a2f654
+ dc.l $f2000123,$f22e4823,$ff84f200,$08a2f200
+ dc.l $0423f21f,$d030f200,$9000f22e,$4822ff84
+ dc.l $60ff0000,$2954f200,$9000123c,$0003f22e
+ dc.l $4800ff84,$60ff0000,$29380c81,$40638000
+ dc.l $6e00008e,$f227e00c,$f23c4480,$bf800000
+ dc.l $f20000a0,$f2000400,$f2000023,$f22e6880
+ dc.l $ff84f200,$0080f200,$04a3f23a,$5580f5ec
+ dc.l $f23a5500,$f5eef200,$05a3f200,$0523f23a
+ dc.l $55a2f5e8,$f23a5522,$f5eaf200,$0ca3f200
+ dc.l $0123f23a,$54a2f5e4,$f22e4823,$ff84f200
+ dc.l $08a2f200,$0423f22e,$4822ff84,$f21fd030
+ dc.l $f2009000,$4a106a0c,$f23a4822,$f5d660ff
+ dc.l $000028c6,$f23a4822,$f5ba60ff,$000028b2
+ dc.l $4a106a16,$f23a4800,$f5baf200,$9000f23a
+ dc.l $4822f5c0,$60ff0000,$28a0f23a,$4800f594
+ dc.l $f2009000,$f23a4822,$f5ba60ff,$00002882
+ dc.l $60ff0000,$28baf210,$48002210,$32280004
+ dc.l $02817fff,$ffff0c81,$3fff8000,$6c4e0c81
+ dc.l $3fd78000,$6d00007c,$f23c4480,$3f800000
+ dc.l $f20000a8,$f227e004,$f23c4500,$3f800000
+ dc.l $f2000122,$f20008a3,$f21fd020,$f2000484
+ dc.l $f2000420,$f227e001,$41d761ff,$fffffd66
+ dc.l $dffc0000,$000c60ff,$0000280e,$f2000018
+ dc.l $f23c4438,$3f800000,$f2d20000,$265af23a
+ dc.l $4800b8ae,$22100281,$80000000,$00813f80
+ dc.l $00002f01,$f2009000,$f21f4423,$60ff0000
+ dc.l $27d8f200,$9000123c,$0003f210,$480060ff
+ dc.l $000027be,$60ff0000,$2806f210,$48002210
+ dc.l $32280004,$02817fff,$ffff0c81,$3fff8000
+ dc.l $6c44f23c,$44803f80,$0000f200,$00a2f200
+ dc.l $001af23c,$44223f80,$0000f200,$0420f200
+ dc.l $00042f00,$4280f227,$e00141d7,$61ffffff
+ dc.l $fcc4dffc,$0000000c,$f21f9000,$f2000022
+ dc.l $60ff0000,$276cf200,$0018f23c,$44383f80
+ dc.l $0000f2d2,$000025b0,$4a106a18,$f23a4800
+ dc.l $b7f0f200,$9000f23c,$44220080,$000060ff
+ dc.l $0000273e,$60ff0000,$2988f200,$9000f23a
+ dc.l $4800b7de,$60ff0000,$27283fdc,$000082e3
+ dc.l $08654361,$c4c60000,$00003fa5,$55555555
+ dc.l $4cc13fc5,$55555555,$4a543f81,$11111117
+ dc.l $43853fa5,$55555555,$4f5a3fc5,$55555555
+ dc.l $55550000,$00000000,$00003ec7,$1de3a577
+ dc.l $46823efa,$01a019d7,$cb683f2a,$01a01a01
+ dc.l $9df33f56,$c16c16c1,$70e23f81,$11111111
+ dc.l $11113fa5,$55555555,$55553ffc,$0000aaaa
+ dc.l $aaaaaaaa,$aaab0000,$000048b0,$00000000
+ dc.l $00003730,$00000000,$00003fff,$00008000
+ dc.l $00000000,$00000000,$00003fff,$00008164
+ dc.l $d1f3bc03,$07749f84,$1a9b3fff,$000082cd
+ dc.l $8698ac2b,$a1d89fc1,$d5b93fff,$0000843a
+ dc.l $28c3acde,$4048a072,$83693fff,$000085aa
+ dc.l $c367cc48,$7b141fc5,$c95c3fff,$0000871f
+ dc.l $61969e8d,$10101ee8,$5c9f3fff,$00008898
+ dc.l $0e8092da,$85289fa2,$07293fff,$00008a14
+ dc.l $d575496e,$fd9ca07b,$f9af3fff,$00008b95
+ dc.l $c1e3ea8b,$d6e8a002,$0dcf3fff,$00008d1a
+ dc.l $df5b7e5b,$a9e4205a,$63da3fff,$00008ea4
+ dc.l $398b45cd,$53c01eb7,$00513fff,$00009031
+ dc.l $dc431466,$b1dc1f6e,$b0293fff,$000091c3
+ dc.l $d373ab11,$c338a078,$14943fff,$0000935a
+ dc.l $2b2f13e6,$e92c9eb3,$19b03fff,$000094f4
+ dc.l $efa8fef7,$09602017,$457d3fff,$00009694
+ dc.l $2d372018,$5a001f11,$d5373fff,$00009837
+ dc.l $f0518db8,$a9709fb9,$52dd3fff,$000099e0
+ dc.l $459320b7,$fa641fe4,$30873fff,$00009b8d
+ dc.l $39b9d54e,$55381fa2,$a8183fff,$00009d3e
+ dc.l $d9a72cff,$b7501fde,$494d3fff,$00009ef5
+ dc.l $326091a1,$11ac2050,$48903fff,$0000a0b0
+ dc.l $510fb971,$4fc4a073,$691c3fff,$0000a270
+ dc.l $43030c49,$68181f9b,$7a053fff,$0000a435
+ dc.l $15ae09e6,$80a0a079,$71263fff,$0000a5fe
+ dc.l $d6a9b151,$38eca071,$a1403fff,$0000a7cd
+ dc.l $93b4e965,$3568204f,$62da3fff,$0000a9a1
+ dc.l $5ab4ea7c,$0ef81f28,$3c4a3fff,$0000ab7a
+ dc.l $39b5a93e,$d3389f9a,$7fdc3fff,$0000ad58
+ dc.l $3eea42a1,$4ac8a05b,$3fac3fff,$0000af3b
+ dc.l $78ad690a,$43741fdf,$26103fff,$0000b123
+ dc.l $f581d2ac,$25909f70,$5f903fff,$0000b311
+ dc.l $c412a911,$2488201f,$678a3fff,$0000b504
+ dc.l $f333f9de,$64841f32,$fb133fff,$0000b6fd
+ dc.l $91e328d1,$77902003,$8b303fff,$0000b8fb
+ dc.l $af4762fb,$9ee8200d,$c3cc3fff,$0000baff
+ dc.l $5ab2133e,$45fc9f8b,$2ae63fff,$0000bd08
+ dc.l $a39f580c,$36c0a02b,$bf703fff,$0000bf17
+ dc.l $99b67a73,$1084a00b,$f5183fff,$0000c12c
+ dc.l $4cca6670,$9458a041,$dd413fff,$0000c346
+ dc.l $ccda2497,$64089fdf,$137b3fff,$0000c567
+ dc.l $2a115506,$dadc201f,$15683fff,$0000c78d
+ dc.l $74c8abb9,$b15c1fc1,$3a2e3fff,$0000c9b9
+ dc.l $bd866e2f,$27a4a03f,$8f033fff,$0000cbec
+ dc.l $14fef272,$7c5c1ff4,$907d3fff,$0000ce24
+ dc.l $8c151f84,$80e49e6e,$53e43fff,$0000d063
+ dc.l $33daef2b,$25941fd6,$d45c3fff,$0000d2a8
+ dc.l $1d91f12a,$e45ca076,$edb93fff,$0000d4f3
+ dc.l $5aabcfed,$fa209fa6,$de213fff,$0000d744
+ dc.l $fccad69d,$6af41ee6,$9a2f3fff,$0000d99d
+ dc.l $15c278af,$d7b4207f,$439f3fff,$0000dbfb
+ dc.l $b797daf2,$3754201e,$c2073fff,$0000de60
+ dc.l $f4825e0e,$91249e8b,$e1753fff,$0000e0cc
+ dc.l $deec2a94,$e1102003,$2c4b3fff,$0000e33f
+ dc.l $8972be8a,$5a502004,$dff53fff,$0000e5b9
+ dc.l $06e77c83,$48a81e72,$f47a3fff,$0000e839
+ dc.l $6a503c4b,$dc681f72,$2f223fff,$0000eac0
+ dc.l $c6e7dd24,$3930a017,$e9453fff,$0000ed4f
+ dc.l $301ed994,$2b841f40,$1a5b3fff,$0000efe4
+ dc.l $b99bdcda,$f5cc9fb9,$a9e33fff,$0000f281
+ dc.l $773c59ff,$b1382074,$4c053fff,$0000f525
+ dc.l $7d152486,$cc2c1f77,$3a193fff,$0000f7d0
+ dc.l $df730ad1,$3bb81ffe,$90d53fff,$0000fa83
+ dc.l $b2db722a,$033ca041,$ed223fff,$0000fd3e
+ dc.l $0c0cf486,$c1741f85,$3f3a2210,$02817fff
+ dc.l $00000c81,$3fbe0000,$6c0660ff,$00000108
+ dc.l $32280004,$0c81400c,$b1676d06,$60ff0000
+ dc.l $010cf210,$4800f200,$0080f23c,$442342b8
+ dc.l $aa3bf227,$e00c2d7c,$00000000,$ff58f201
+ dc.l $600043fa,$fbb6f201,$40002d41,$ff540281
+ dc.l $0000003f,$e989d3c1,$222eff54,$ec810641
+ dc.l $3fff3d7a,$fb06ff54,$f2000100,$f23c4423
+ dc.l $bc317218,$f23a4923,$faf2f200,$0422f200
+ dc.l $0822f200,$0080f200,$04a3f23c,$45003ab6
+ dc.l $0b70f200,$0523f200,$0580f23c,$45a33c08
+ dc.l $8895f23a,$5522fad4,$f23a55a2,$fad6f200
+ dc.l $05233d41,$ff842d7c,$80000000,$ff8842ae
+ dc.l $ff8cf200,$05a3f23c,$45223f00,$0000f200
+ dc.l $01a3f200,$0523f200,$0c22f219,$4880f200
+ dc.l $0822f200,$0423f21f,$d030f211,$4422f200
+ dc.l $0422222e,$ff584a81,$6706f22e,$4823ff90
+ dc.l $f2009000,$123c0000,$f22e4823,$ff8460ff
+ dc.l $0000216e,$f210d080,$f2009000,$f23c4422
+ dc.l $3f800000,$60ff0000,$21680c81,$400cb27c
+ dc.l $6e66f210,$4800f200,$0080f23c,$442342b8
+ dc.l $aa3bf227,$e00c2d7c,$00000001,$ff58f201
+ dc.l $600043fa,$faa6f201,$40002d41,$ff540281
+ dc.l $0000003f,$e989d3c1,$222eff54,$ec812d41
+ dc.l $ff54e281,$93aeff54,$06413fff,$3d41ff90
+ dc.l $2d7c8000,$0000ff94,$42aeff98,$222eff54
+ dc.l $06413fff,$6000fed2,$4a106bff,$00001fbc
+ dc.l $60ff0000,$20ae2f10,$02978000,$00000097
+ dc.l $00800000,$f23c4400,$3f800000,$f2009000
+ dc.l $f21f4422,$60ff0000,$20c82210,$02817fff
+ dc.l $00000c81,$3ffd0000,$6c0660ff,$0000015e
+ dc.l $32280004,$0c814004,$c2156f06,$60ff0000
+ dc.l $026cf210,$4800f200,$0080f23c,$442342b8
+ dc.l $aa3bf227,$e00cf201,$600043fa,$f9eef201
+ dc.l $40002d41,$ff540281,$0000003f,$e989d3c1
+ dc.l $222eff54,$ec812d41,$ff54f200,$0100f23c
+ dc.l $4423bc31,$7218f23a,$4923f930,$f2000422
+ dc.l $f2000822,$06413fff,$f2000080,$f20004a3
+ dc.l $f23c4500,$3950097b,$f2000523,$f2000580
+ dc.l $f23c45a3,$3ab60b6a,$f23a5522,$f91ef23a
+ dc.l $55a2f920,$3d41ff84,$2d7c8000,$0000ff88
+ dc.l $42aeff8c,$f2000523,$222eff54,$4441f200
+ dc.l $05a30641,$3ffff23a,$5522f900,$f23c45a2
+ dc.l $3f000000,$f2000523,$00418000,$3d41ff90
+ dc.l $2d7c8000,$0000ff94,$42aeff98,$f2000ca3
+ dc.l $f2000123,$f2000422,$f2000822,$f21fd030
+ dc.l $f2114823,$222eff54,$0c810000,$003f6f1a
+ dc.l $f2294480,$000cf22e,$48a2ff90,$f2000422
+ dc.l $f2114822,$60ff0000,$00340c81,$fffffffd
+ dc.l $6c16f229,$4422000c,$f2114822,$f22e4822
+ dc.l $ff9060ff,$00000016,$f2194880,$f2114422
+ dc.l $f22e48a2,$ff90f200,$0422f200,$9000f22e
+ dc.l $4823ff84,$60ff0000,$1f500c81,$3fbe0000
+ dc.l $6c6c0c81,$00330000,$6d2c2d7c,$80010000
+ dc.l $ff842d7c,$80000000,$ff8842ae,$ff8cf210
+ dc.l $4800f200,$9000123c,$0002f22e,$4822ff84
+ dc.l $60ff0000,$1f0cf210,$4800f23a,$5423f86c
+ dc.l $2d7c8001,$0000ff84,$2d7c8000,$0000ff88
+ dc.l $42aeff8c,$f22e4822,$ff84f200,$9000123c
+ dc.l $0000f23a,$5423f84c,$60ff0000,$1ed4f210
+ dc.l $4800f200,$0023f227,$e00cf23c,$44802f30
+ dc.l $caa8f200,$00a3f23c,$4500310f,$8290f23c
+ dc.l $44a232d7,$3220f200,$0123f200,$00a3f23c
+ dc.l $45223493,$f281f23a,$54a2f7c0,$f2000123
+ dc.l $f20000a3,$f23a5522,$f7baf23a,$54a2f7bc
+ dc.l $f2000123,$f20000a3,$f23a5522,$f7b6f23a
+ dc.l $54a2f7b8,$f2000123,$f20000a3,$f23a5522
+ dc.l $f7b2f23a,$48a2f7b4,$f2000123,$f20000a3
+ dc.l $f2000123,$f21048a3,$f23c4423,$3f000000
+ dc.l $f20008a2,$f21fd030,$f2000422,$f2009000
+ dc.l $f2104822,$60ff0000,$1e302210,$0c810000
+ dc.l $00006e00,$fbacf23c,$4400bf80,$0000f200
+ dc.l $9000f23c,$44220080,$000060ff,$00001e1a
+ dc.l $60ff0000,$1e4a3028,$00000880,$000f0440
+ dc.l $3ffff200,$50006d02,$4e751d7c,$0008ff64
+ dc.l $4e7561ff,$00002342,$44400440,$3ffff200
+ dc.l $50001d7c,$0008ff64,$4e753028,$00000040
+ dc.l $7fff0880,$000e2d68,$0004ff88,$2d680008
+ dc.l $ff8c3d40,$ff84f22e,$4800ff84,$6b024e75
+ dc.l $1d7c0008,$ff644e75,$61ff0000,$22fc60ca
+ dc.l $7ffb0000,$80000000,$00000000,$00000000
+ dc.l $f2104800,$22103228,$00040281,$7fffffff
+ dc.l $0c81400c,$b1676e42,$f2000018,$2f004280
+ dc.l $f227e001,$41d761ff,$fffffad2,$dffc0000
+ dc.l $000cf23c,$44233f00,$0000201f,$f23c4480
+ dc.l $3e800000,$f20000a0,$f2009000,$123c0002
+ dc.l $f2000422,$60ff0000,$1d280c81,$400cb2b3
+ dc.l $6e3cf200,$0018f23a,$5428adb6,$f23a5428
+ dc.l $adb82f00,$4280f227,$e00141d7,$61ffffff
+ dc.l $fa7cdffc,$0000000c,$201ff200,$9000123c
+ dc.l $0000f23a,$4823ff5a,$60ff0000,$1ce460ff
+ dc.l $00001cb0,$f23c4400,$3f800000,$f2009000
+ dc.l $f23c4422,$00800000,$60ff0000,$1cd4f210
+ dc.l $48002210,$32280004,$22410281,$7fffffff
+ dc.l $0c81400c,$b1676e62,$f2000018,$48e78040
+ dc.l $f227e001,$41d74280,$61ffffff,$fbe0dffc
+ dc.l $0000000c,$f23c9000,$00000000,$4cdf0201
+ dc.l $f2000080,$f23c44a2,$3f800000,$f2276800
+ dc.l $f2000420,$22090281,$80000000,$00813f00
+ dc.l $0000f21f,$48222f01,$f2009000,$123c0000
+ dc.l $f21f4423,$60ff0000,$1c480c81,$400cb2b3
+ dc.l $6eff0000,$1bc2f200,$0018f23a,$5428acd2
+ dc.l $2f3c0000,$00002f3c,$80000000,$22090281
+ dc.l $80000000,$00817ffb,$00002f01,$f23a5428
+ dc.l $acb82f00,$4280f227,$e00141d7,$61ffffff
+ dc.l $f97cdffc,$0000000c,$201ff200,$9000123c
+ dc.l $0000f21f,$482360ff,$00001be6,$60ff0000
+ dc.l $1c2ef210,$4800f22e,$6800ff84,$22103228
+ dc.l $00042d41,$ff840281,$7fffffff,$0c813fd7
+ dc.l $80006d00,$00740c81,$3fffddce,$6e00006a
+ dc.l $222eff84,$2d41ff5c,$02817fff,$00000681
+ dc.l $00010000,$2d41ff84,$02ae8000,$0000ff5c
+ dc.l $f22e4800,$ff842f00,$4280f227,$e00141d7
+ dc.l $61ffffff,$fac8dffc,$0000000c,$201ff200
+ dc.l $0080f23c,$44a24000,$0000222e,$ff5cf22e
+ dc.l $6880ff84,$b3aeff84,$f2009000,$f22e4820
+ dc.l $ff8460ff,$00001b52,$0c813fff,$80006d00
+ dc.l $00880c81,$40048aa1,$6e000092,$222eff84
+ dc.l $2d41ff5c,$02817fff,$00000681,$00010000
+ dc.l $2d41ff84,$02ae8000,$0000ff5c,$222eff5c
+ dc.l $f22e4800,$ff842f00,$4280f227,$e00141d7
+ dc.l $61ffffff,$f878dffc,$0000000c,$201f222e
+ dc.l $ff5cf23c,$44223f80,$00000a81,$c0000000
+ dc.l $f2014480,$f20000a0,$222eff5c,$00813f80
+ dc.l $0000f201,$4400f200,$9000123c,$0002f200
+ dc.l $042260ff,$00001ac2,$f2009000,$123c0003
+ dc.l $f22e4800,$ff8460ff,$00001aa6,$222eff84
+ dc.l $02818000,$00000081,$3f800000,$f2014400
+ dc.l $02818000,$00000a81,$80800000,$f2009000
+ dc.l $f2014422,$60ff0000,$1a8060ff,$00001ac0
+ dc.l $3ffe0000,$b17217f7,$d1cf79ac,$00000000
+ dc.l $3f800000,$00000000,$7f800000,$bf800000
+ dc.l $3fc2499a,$b5e4040b,$bfc555b5,$848cb7db
+ dc.l $3fc99999,$987d8730,$bfcfffff,$ff6f7e97
+ dc.l $3fd55555,$555555a4,$bfe00000,$00000008
+ dc.l $3f175496,$add7dad6,$3f3c71c2,$fe80c7e0
+ dc.l $3f624924,$928bccff,$3f899999,$999995ec
+ dc.l $3fb55555,$55555555,$40000000,$00000000
+ dc.l $3f990000,$80000000,$00000000,$00000000
+ dc.l $3ffe0000,$fe03f80f,$e03f80fe,$00000000
+ dc.l $3ff70000,$ff015358,$833c47e2,$00000000
+ dc.l $3ffe0000,$fa232cf2,$52138ac0,$00000000
+ dc.l $3ff90000,$bdc8d83e,$ad88d549,$00000000
+ dc.l $3ffe0000,$f6603d98,$0f6603da,$00000000
+ dc.l $3ffa0000,$9cf43dcf,$f5eafd48,$00000000
+ dc.l $3ffe0000,$f2b9d648,$0f2b9d65,$00000000
+ dc.l $3ffa0000,$da16eb88,$cb8df614,$00000000
+ dc.l $3ffe0000,$ef2eb71f,$c4345238,$00000000
+ dc.l $3ffb0000,$8b29b775,$1bd70743,$00000000
+ dc.l $3ffe0000,$ebbdb2a5,$c1619c8c,$00000000
+ dc.l $3ffb0000,$a8d839f8,$30c1fb49,$00000000
+ dc.l $3ffe0000,$e865ac7b,$7603a197,$00000000
+ dc.l $3ffb0000,$c61a2eb1,$8cd907ad,$00000000
+ dc.l $3ffe0000,$e525982a,$f70c880e,$00000000
+ dc.l $3ffb0000,$e2f2a47a,$de3a18af,$00000000
+ dc.l $3ffe0000,$e1fc780e,$1fc780e2,$00000000
+ dc.l $3ffb0000,$ff64898e,$df55d551,$00000000
+ dc.l $3ffe0000,$dee95c4c,$a037ba57,$00000000
+ dc.l $3ffc0000,$8db956a9,$7b3d0148,$00000000
+ dc.l $3ffe0000,$dbeb61ee,$d19c5958,$00000000
+ dc.l $3ffc0000,$9b8fe100,$f47ba1de,$00000000
+ dc.l $3ffe0000,$d901b203,$6406c80e,$00000000
+ dc.l $3ffc0000,$a9372f1d,$0da1bd17,$00000000
+ dc.l $3ffe0000,$d62b80d6,$2b80d62c,$00000000
+ dc.l $3ffc0000,$b6b07f38,$ce90e46b,$00000000
+ dc.l $3ffe0000,$d3680d36,$80d3680d,$00000000
+ dc.l $3ffc0000,$c3fd0329,$06488481,$00000000
+ dc.l $3ffe0000,$d0b69fcb,$d2580d0b,$00000000
+ dc.l $3ffc0000,$d11de0ff,$15ab18ca,$00000000
+ dc.l $3ffe0000,$ce168a77,$25080ce1,$00000000
+ dc.l $3ffc0000,$de1433a1,$6c66b150,$00000000
+ dc.l $3ffe0000,$cb8727c0,$65c393e0,$00000000
+ dc.l $3ffc0000,$eae10b5a,$7ddc8add,$00000000
+ dc.l $3ffe0000,$c907da4e,$871146ad,$00000000
+ dc.l $3ffc0000,$f7856e5e,$e2c9b291,$00000000
+ dc.l $3ffe0000,$c6980c69,$80c6980c,$00000000
+ dc.l $3ffd0000,$82012ca5,$a68206d7,$00000000
+ dc.l $3ffe0000,$c4372f85,$5d824ca6,$00000000
+ dc.l $3ffd0000,$882c5fcd,$7256a8c5,$00000000
+ dc.l $3ffe0000,$c1e4bbd5,$95f6e947,$00000000
+ dc.l $3ffd0000,$8e44c60b,$4ccfd7de,$00000000
+ dc.l $3ffe0000,$bfa02fe8,$0bfa02ff,$00000000
+ dc.l $3ffd0000,$944ad09e,$f4351af6,$00000000
+ dc.l $3ffe0000,$bd691047,$07661aa3,$00000000
+ dc.l $3ffd0000,$9a3eecd4,$c3eaa6b2,$00000000
+ dc.l $3ffe0000,$bb3ee721,$a54d880c,$00000000
+ dc.l $3ffd0000,$a0218434,$353f1de8,$00000000
+ dc.l $3ffe0000,$b92143fa,$36f5e02e,$00000000
+ dc.l $3ffd0000,$a5f2fcab,$bbc506da,$00000000
+ dc.l $3ffe0000,$b70fbb5a,$19be3659,$00000000
+ dc.l $3ffd0000,$abb3b8ba,$2ad362a5,$00000000
+ dc.l $3ffe0000,$b509e68a,$9b94821f,$00000000
+ dc.l $3ffd0000,$b1641795,$ce3ca97b,$00000000
+ dc.l $3ffe0000,$b30f6352,$8917c80b,$00000000
+ dc.l $3ffd0000,$b7047551,$5d0f1c61,$00000000
+ dc.l $3ffe0000,$b11fd3b8,$0b11fd3c,$00000000
+ dc.l $3ffd0000,$bc952afe,$ea3d13e1,$00000000
+ dc.l $3ffe0000,$af3addc6,$80af3ade,$00000000
+ dc.l $3ffd0000,$c2168ed0,$f458ba4a,$00000000
+ dc.l $3ffe0000,$ad602b58,$0ad602b6,$00000000
+ dc.l $3ffd0000,$c788f439,$b3163bf1,$00000000
+ dc.l $3ffe0000,$ab8f69e2,$8359cd11,$00000000
+ dc.l $3ffd0000,$ccecac08,$bf04565d,$00000000
+ dc.l $3ffe0000,$a9c84a47,$a07f5638,$00000000
+ dc.l $3ffd0000,$d2420487,$2dd85160,$00000000
+ dc.l $3ffe0000,$a80a80a8,$0a80a80b,$00000000
+ dc.l $3ffd0000,$d7894992,$3bc3588a,$00000000
+ dc.l $3ffe0000,$a655c439,$2d7b73a8,$00000000
+ dc.l $3ffd0000,$dcc2c4b4,$9887dacc,$00000000
+ dc.l $3ffe0000,$a4a9cf1d,$96833751,$00000000
+ dc.l $3ffd0000,$e1eebd3e,$6d6a6b9e,$00000000
+ dc.l $3ffe0000,$a3065e3f,$ae7cd0e0,$00000000
+ dc.l $3ffd0000,$e70d785c,$2f9f5bdc,$00000000
+ dc.l $3ffe0000,$a16b312e,$a8fc377d,$00000000
+ dc.l $3ffd0000,$ec1f392c,$5179f283,$00000000
+ dc.l $3ffe0000,$9fd809fd,$809fd80a,$00000000
+ dc.l $3ffd0000,$f12440d3,$e36130e6,$00000000
+ dc.l $3ffe0000,$9e4cad23,$dd5f3a20,$00000000
+ dc.l $3ffd0000,$f61cce92,$346600bb,$00000000
+ dc.l $3ffe0000,$9cc8e160,$c3fb19b9,$00000000
+ dc.l $3ffd0000,$fb091fd3,$8145630a,$00000000
+ dc.l $3ffe0000,$9b4c6f9e,$f03a3caa,$00000000
+ dc.l $3ffd0000,$ffe97042,$bfa4c2ad,$00000000
+ dc.l $3ffe0000,$99d722da,$bde58f06,$00000000
+ dc.l $3ffe0000,$825efced,$49369330,$00000000
+ dc.l $3ffe0000,$9868c809,$868c8098,$00000000
+ dc.l $3ffe0000,$84c37a7a,$b9a905c9,$00000000
+ dc.l $3ffe0000,$97012e02,$5c04b809,$00000000
+ dc.l $3ffe0000,$87224c2e,$8e645fb7,$00000000
+ dc.l $3ffe0000,$95a02568,$095a0257,$00000000
+ dc.l $3ffe0000,$897b8cac,$9f7de298,$00000000
+ dc.l $3ffe0000,$94458094,$45809446,$00000000
+ dc.l $3ffe0000,$8bcf55de,$c4cd05fe,$00000000
+ dc.l $3ffe0000,$92f11384,$0497889c,$00000000
+ dc.l $3ffe0000,$8e1dc0fb,$89e125e5,$00000000
+ dc.l $3ffe0000,$91a2b3c4,$d5e6f809,$00000000
+ dc.l $3ffe0000,$9066e68c,$955b6c9b,$00000000
+ dc.l $3ffe0000,$905a3863,$3e06c43b,$00000000
+ dc.l $3ffe0000,$92aade74,$c7be59e0,$00000000
+ dc.l $3ffe0000,$8f1779d9,$fdc3a219,$00000000
+ dc.l $3ffe0000,$94e9bff6,$15845643,$00000000
+ dc.l $3ffe0000,$8dda5202,$37694809,$00000000
+ dc.l $3ffe0000,$9723a1b7,$20134203,$00000000
+ dc.l $3ffe0000,$8ca29c04,$6514e023,$00000000
+ dc.l $3ffe0000,$995899c8,$90eb8990,$00000000
+ dc.l $3ffe0000,$8b70344a,$139bc75a,$00000000
+ dc.l $3ffe0000,$9b88bdaa,$3a3dae2f,$00000000
+ dc.l $3ffe0000,$8a42f870,$5669db46,$00000000
+ dc.l $3ffe0000,$9db4224f,$ffe1157c,$00000000
+ dc.l $3ffe0000,$891ac73a,$e9819b50,$00000000
+ dc.l $3ffe0000,$9fdadc26,$8b7a12da,$00000000
+ dc.l $3ffe0000,$87f78087,$f78087f8,$00000000
+ dc.l $3ffe0000,$a1fcff17,$ce733bd4,$00000000
+ dc.l $3ffe0000,$86d90544,$7a34acc6,$00000000
+ dc.l $3ffe0000,$a41a9e8f,$5446fb9f,$00000000
+ dc.l $3ffe0000,$85bf3761,$2cee3c9b,$00000000
+ dc.l $3ffe0000,$a633cd7e,$6771cd8b,$00000000
+ dc.l $3ffe0000,$84a9f9c8,$084a9f9d,$00000000
+ dc.l $3ffe0000,$a8489e60,$0b435a5e,$00000000
+ dc.l $3ffe0000,$83993052,$3fbe3368,$00000000
+ dc.l $3ffe0000,$aa59233c,$cca4bd49,$00000000
+ dc.l $3ffe0000,$828cbfbe,$b9a020a3,$00000000
+ dc.l $3ffe0000,$ac656dae,$6bcc4985,$00000000
+ dc.l $3ffe0000,$81848da8,$faf0d277,$00000000
+ dc.l $3ffe0000,$ae6d8ee3,$60bb2468,$00000000
+ dc.l $3ffe0000,$80808080,$80808081,$00000000
+ dc.l $3ffe0000,$b07197a2,$3c46c654,$00000000
+ dc.l $f2104800,$2d7c0000,$0000ff54,$22103228
+ dc.l $00042d50,$ff842d68,$0004ff88,$2d680008
+ dc.l $ff8c0c81,$00000000,$6d000182,$0c813ffe
+ dc.l $f07d6d0a,$0c813fff,$88416f00,$00e2e081
+ dc.l $e0810481,$00003fff,$d2aeff54,$41faf7b2
+ dc.l $f2014080,$2d7c3fff,$0000ff84,$2d6eff88
+ dc.l $ff9402ae,$fe000000,$ff9400ae,$01000000
+ dc.l $ff94222e,$ff940281,$7e000000,$e081e081
+ dc.l $e881d1c1,$f22e4800,$ff842d7c,$3fff0000
+ dc.l $ff9042ae,$ff98f22e,$4828ff90,$f227e00c
+ dc.l $f2104823,$f23a48a3,$f6c8f200,$0100f200
+ dc.l $0923f22e,$6880ff84,$f2000980,$f2000880
+ dc.l $f23a54a3,$f6ccf23a,$5523f6ce,$f23a54a2
+ dc.l $f6d0f23a,$5522f6d2,$f2000ca3,$f2000d23
+ dc.l $f23a54a2,$f6ccf23a,$5522f6ce,$f2000ca3
+ dc.l $d1fc0000,$0010f200,$0d23f200,$00a3f200
+ dc.l $0822f210,$48a2f21f,$d030f200,$0422f200
+ dc.l $9000f22e,$4822ff84,$60ff0000,$10ccf23c
+ dc.l $58380001,$f2c10000,$1318f200,$0080f23a
+ dc.l $44a8f64e,$f23a4422,$f648f200,$04a2f200
+ dc.l $00a0f227,$e00cf200,$0400f200,$0023f22e
+ dc.l $6880ff84,$f2000080,$f20004a3,$f23a5580
+ dc.l $f660f23a,$5500f662,$f20005a3,$f2000523
+ dc.l $f23a55a2,$f65cf23a,$5522f65e,$f2000ca3
+ dc.l $f2000123,$f23a54a2,$f658f22e,$4823ff84
+ dc.l $f20008a2,$f21fd030,$f2000423,$f2009000
+ dc.l $f22e4822,$ff8460ff,$0000103e,$60ff0000
+ dc.l $0e962d7c,$ffffff9c,$ff5448e7,$3f002610
+ dc.l $28280004,$2a280008,$42824a84,$66342805
+ dc.l $42857420,$4286edc4,$6000edac,$d4862d43
+ dc.l $ff842d44,$ff882d45,$ff8c4482,$2d42ff54
+ dc.l $f22e4800,$ff844cdf,$00fc41ee,$ff846000
+ dc.l $fe0c4286,$edc46000,$2406edac,$2e05edad
+ dc.l $44860686,$00000020,$ecaf8887,$2d43ff84
+ dc.l $2d44ff88,$2d45ff8c,$44822d42,$ff54f22e
+ dc.l $4800ff84,$4cdf00fc,$41eeff84,$6000fdce
+ dc.l $f2104800,$f2000018,$f23a4838,$f5a4f292
+ dc.l $0014f200,$9000123c,$0003f210,$480060ff
+ dc.l $00000f7e,$f2104800,$2d7c0000,$0000ff54
+ dc.l $f2000080,$f23a4422,$f508f22e,$6800ff84
+ dc.l $3d6eff88,$ff86222e,$ff840c81,$00000000
+ dc.l $6f0000da,$0c813ffe,$80006d00,$fda20c81
+ dc.l $3fffc000,$6e00fd98,$0c813ffe,$f07d6d00
+ dc.l $001a0c81,$3fff8841,$6e000010,$f20004a2
+ dc.l $f23a4422,$f4bc6000,$fe762d6e,$ff88ff94
+ dc.l $02aefe00,$0000ff94,$00ae0100,$0000ff94
+ dc.l $0c813fff,$80006c44,$f23a4400,$f4fc2d7c
+ dc.l $3fff0000,$ff9042ae,$ff98f22e,$4828ff90
+ dc.l $222eff94,$02817e00,$0000e081,$e081e881
+ dc.l $f20004a2,$f227e00c,$f2000422,$41faf4e2
+ dc.l $d1c1f23a,$4480f466,$6000fd76,$f23a4400
+ dc.l $f4502d7c,$3fff0000,$ff9042ae,$ff98f22e
+ dc.l $4828ff90,$222eff94,$02817e00,$0000e081
+ dc.l $e081e881,$f2000422,$f227e00c,$41faf4a2
+ dc.l $d1c1f23a,$4480f41e,$6000fd36,$0c810000
+ dc.l $00006d10,$f23a4400,$f414f200,$900060ff
+ dc.l $00000c4e,$f23a4400,$f3fcf200,$900060ff
+ dc.l $00000cb4,$60ff0000,$0e962210,$32280004
+ dc.l $02817fff,$ffff0c81,$3fff8000,$6c56f210
+ dc.l $4818f200,$0080f200,$049af200,$0022f23c
+ dc.l $44a23f80,$0000f200,$04202210,$02818000
+ dc.l $00000081,$3f000000,$2f012f00,$4280f227
+ dc.l $e00141d7,$61ffffff,$fe5adffc,$0000000c
+ dc.l $201ff200,$9000123c,$0000f21f,$442360ff
+ dc.l $00000dde,$f2104818,$f23c4438,$3f800000
+ dc.l $f2d20000,$0c3260ff,$00000bb6,$60ff0000
+ dc.l $0e0e3ffd,$0000de5b,$d8a93728,$71950000
+ dc.l $00003fff,$0000b8aa,$3b295c17,$f0bc0000
+ dc.l $0000f23c,$58000001,$f2104838,$f2c10000
+ dc.l $0ff02210,$6d000090,$2f004280,$61ffffff
+ dc.l $fba2f21f,$9000f23a,$4823ffb8,$60ff0000
+ dc.l $0d782210,$6d000070,$2f004280,$61ffffff
+ dc.l $fd34f21f,$9000f23a,$4823ff98,$60ff0000
+ dc.l $0d682210,$6d000050,$22280008,$662e2228
+ dc.l $00040281,$7fffffff,$66223210,$02810000
+ dc.l $7fff0481,$00003fff,$67ff0000,$0f84f200
+ dc.l $9000f201,$400060ff,$00000d1e,$2f004280
+ dc.l $61ffffff,$fb2ef21f,$9000f23a,$4823ff54
+ dc.l $60ff0000,$0d0460ff,$00000b5c,$22106d00
+ dc.l $fff62f00,$428061ff,$fffffcba,$f21f9000
+ dc.l $f23a4823,$ff2e60ff,$00000cee,$406a934f
+ dc.l $0979a371,$3f734413,$509f8000,$bfcd0000
+ dc.l $c0219dc1,$da994fd2,$00000000,$40000000
+ dc.l $935d8ddd,$aaa8ac17,$00000000,$3ffe0000
+ dc.l $b17217f7,$d1cf79ac,$00000000,$3f56c16d
+ dc.l $6f7bd0b2,$3f811112,$302c712c,$3fa55555
+ dc.l $55554cc1,$3fc55555,$55554a54,$3fe00000
+ dc.l $00000000,$00000000,$00000000,$3fff0000
+ dc.l $80000000,$00000000,$3f738000,$3fff0000
+ dc.l $8164d1f3,$bc030773,$3fbef7ca,$3fff0000
+ dc.l $82cd8698,$ac2ba1d7,$3fbdf8a9,$3fff0000
+ dc.l $843a28c3,$acde4046,$3fbcd7c9,$3fff0000
+ dc.l $85aac367,$cc487b15,$bfbde8da,$3fff0000
+ dc.l $871f6196,$9e8d1010,$3fbde85c,$3fff0000
+ dc.l $88980e80,$92da8527,$3fbebbf1,$3fff0000
+ dc.l $8a14d575,$496efd9a,$3fbb80ca,$3fff0000
+ dc.l $8b95c1e3,$ea8bd6e7,$bfba8373,$3fff0000
+ dc.l $8d1adf5b,$7e5ba9e6,$bfbe9670,$3fff0000
+ dc.l $8ea4398b,$45cd53c0,$3fbdb700,$3fff0000
+ dc.l $9031dc43,$1466b1dc,$3fbeeeb0,$3fff0000
+ dc.l $91c3d373,$ab11c336,$3fbbfd6d,$3fff0000
+ dc.l $935a2b2f,$13e6e92c,$bfbdb319,$3fff0000
+ dc.l $94f4efa8,$fef70961,$3fbdba2b,$3fff0000
+ dc.l $96942d37,$20185a00,$3fbe91d5,$3fff0000
+ dc.l $9837f051,$8db8a96f,$3fbe8d5a,$3fff0000
+ dc.l $99e04593,$20b7fa65,$bfbcde7b,$3fff0000
+ dc.l $9b8d39b9,$d54e5539,$bfbebaaf,$3fff0000
+ dc.l $9d3ed9a7,$2cffb751,$bfbd86da,$3fff0000
+ dc.l $9ef53260,$91a111ae,$bfbebedd,$3fff0000
+ dc.l $a0b0510f,$b9714fc2,$3fbcc96e,$3fff0000
+ dc.l $a2704303,$0c496819,$bfbec90b,$3fff0000
+ dc.l $a43515ae,$09e6809e,$3fbbd1db,$3fff0000
+ dc.l $a5fed6a9,$b15138ea,$3fbce5eb,$3fff0000
+ dc.l $a7cd93b4,$e965356a,$bfbec274,$3fff0000
+ dc.l $a9a15ab4,$ea7c0ef8,$3fbea83c,$3fff0000
+ dc.l $ab7a39b5,$a93ed337,$3fbecb00,$3fff0000
+ dc.l $ad583eea,$42a14ac6,$3fbe9301,$3fff0000
+ dc.l $af3b78ad,$690a4375,$bfbd8367,$3fff0000
+ dc.l $b123f581,$d2ac2590,$bfbef05f,$3fff0000
+ dc.l $b311c412,$a9112489,$3fbdfb3c,$3fff0000
+ dc.l $b504f333,$f9de6484,$3fbeb2fb,$3fff0000
+ dc.l $b6fd91e3,$28d17791,$3fbae2cb,$3fff0000
+ dc.l $b8fbaf47,$62fb9ee9,$3fbcdc3c,$3fff0000
+ dc.l $baff5ab2,$133e45fb,$3fbee9aa,$3fff0000
+ dc.l $bd08a39f,$580c36bf,$bfbeaefd,$3fff0000
+ dc.l $bf1799b6,$7a731083,$bfbcbf51,$3fff0000
+ dc.l $c12c4cca,$66709456,$3fbef88a,$3fff0000
+ dc.l $c346ccda,$24976407,$3fbd83b2,$3fff0000
+ dc.l $c5672a11,$5506dadd,$3fbdf8ab,$3fff0000
+ dc.l $c78d74c8,$abb9b15d,$bfbdfb17,$3fff0000
+ dc.l $c9b9bd86,$6e2f27a3,$bfbefe3c,$3fff0000
+ dc.l $cbec14fe,$f2727c5d,$bfbbb6f8,$3fff0000
+ dc.l $ce248c15,$1f8480e4,$bfbcee53,$3fff0000
+ dc.l $d06333da,$ef2b2595,$bfbda4ae,$3fff0000
+ dc.l $d2a81d91,$f12ae45a,$3fbc9124,$3fff0000
+ dc.l $d4f35aab,$cfedfa1f,$3fbeb243,$3fff0000
+ dc.l $d744fcca,$d69d6af4,$3fbde69a,$3fff0000
+ dc.l $d99d15c2,$78afd7b6,$bfb8bc61,$3fff0000
+ dc.l $dbfbb797,$daf23755,$3fbdf610,$3fff0000
+ dc.l $de60f482,$5e0e9124,$bfbd8be1,$3fff0000
+ dc.l $e0ccdeec,$2a94e111,$3fbacb12,$3fff0000
+ dc.l $e33f8972,$be8a5a51,$3fbb9bfe,$3fff0000
+ dc.l $e5b906e7,$7c8348a8,$3fbcf2f4,$3fff0000
+ dc.l $e8396a50,$3c4bdc68,$3fbef22f,$3fff0000
+ dc.l $eac0c6e7,$dd24392f,$bfbdbf4a,$3fff0000
+ dc.l $ed4f301e,$d9942b84,$3fbec01a,$3fff0000
+ dc.l $efe4b99b,$dcdaf5cb,$3fbe8cac,$3fff0000
+ dc.l $f281773c,$59ffb13a,$bfbcbb3f,$3fff0000
+ dc.l $f5257d15,$2486cc2c,$3fbef73a,$3fff0000
+ dc.l $f7d0df73,$0ad13bb9,$bfb8b795,$3fff0000
+ dc.l $fa83b2db,$722a033a,$3fbef84b,$3fff0000
+ dc.l $fd3e0c0c,$f486c175,$bfbef581,$f210d080
+ dc.l $22103228,$0004f22e,$6800ff84,$02817fff
+ dc.l $ffff0c81,$3fb98000,$6c046000,$00880c81
+ dc.l $400d80c0,$6f046000,$007cf200,$0080f23c
+ dc.l $44a34280,$0000f22e,$6080ff54,$2f0243fa
+ dc.l $fbbcf22e,$4080ff54,$222eff54,$24010281
+ dc.l $0000003f,$e981d3c1,$ec822202,$e2819481
+ dc.l $06820000,$3ffff227,$e00cf23c,$44a33c80
+ dc.l $00002d59,$ff842d59,$ff882d59,$ff8c3d59
+ dc.l $ff90f200,$04283d59,$ff94426e,$ff9642ae
+ dc.l $ff98d36e,$ff84f23a,$4823fb22,$d36eff90
+ dc.l $60000100,$0c813fff,$80006e12,$f2009000
+ dc.l $f23c4422,$3f800000,$60ff0000,$07b4222e
+ dc.l $ff840c81,$00000000,$6d0660ff,$00000764
+ dc.l $60ff0000,$0666f200,$9000f23c,$44003f80
+ dc.l $00002210,$00810080,$0001f201,$442260ff
+ dc.l $0000077e,$f210d080,$22103228,$0004f22e
+ dc.l $6800ff84,$02817fff,$ffff0c81,$3fb98000
+ dc.l $6c046000,$ff900c81,$400b9b07,$6f046000
+ dc.l $ff84f200,$0080f23a,$54a3fa62,$f22e6080
+ dc.l $ff542f02,$43fafac6,$f22e4080,$ff54222e
+ dc.l $ff542401,$02810000,$003fe981,$d3c1ec82
+ dc.l $2202e281,$94810682,$00003fff,$f227e00c
+ dc.l $f2000500,$f23a54a3,$fa2c2d59,$ff84f23a
+ dc.l $4923fa2a,$2d59ff88,$2d59ff8c,$f2000428
+ dc.l $3d59ff90,$f2000828,$3d59ff94,$426eff96
+ dc.l $42aeff98,$f23a4823,$fa14d36e,$ff84d36e
+ dc.l $ff90f200,$0080f200,$04a3f23a,$5500fa1e
+ dc.l $f23a5580,$fa20f200,$0523f200,$05a3f23a
+ dc.l $5522fa1a,$f23a55a2,$fa1cf200,$0523f200
+ dc.l $05a3f23a,$5522fa16,$f20001a3,$f2000523
+ dc.l $f2000c22,$f2000822,$f21fd030,$f22e4823
+ dc.l $ff84f22e,$4822ff90,$f22e4822,$ff84f200
+ dc.l $90003d42,$ff84241f,$2d7c8000,$0000ff88
+ dc.l $42aeff8c,$123c0000,$f22e4823,$ff8460ff
+ dc.l $0000063e,$f2009000,$f23c4400,$3f800000
+ dc.l $22100081,$00800001,$f2014422,$60ff0000
+ dc.l $06302f00,$32290000,$5beeff54,$02810000
+ dc.l $7fff3028,$00000240,$7fff0c40,$3fff6d00
+ dc.l $00c00c40,$400c6e00,$00a4f228,$48030000
+ dc.l $f2006000,$f23c8800,$00000000,$4a290004
+ dc.l $6b5e2f00,$3d690000,$ff842d69,$0004ff88
+ dc.l $2d690008,$ff8c41ee,$ff8461ff,$00000b2a
+ dc.l $4480d09f,$f22ed080,$ff840c40,$c0016c36
+ dc.l $f21f9000,$223c8000,$00000480,$ffffc001
+ dc.l $44800c00,$00206c0a,$e0a942a7,$2f0142a7
+ dc.l $60280400,$0020e0a9,$2f0142a7,$42a7601a
+ dc.l $f229d080,$0000f21f,$90000640,$3fff4840
+ dc.l $42a72f3c,$80000000,$2f00f200,$b000123c
+ dc.l $0000f21f,$482360ff,$0000054c,$201fc149
+ dc.l $4a290000,$6bff0000,$041c60ff,$00000464
+ dc.l $4a290004,$6a16201f,$f2009000,$123c0003
+ dc.l $f2294800,$000060ff,$0000051c,$201f2049
+ dc.l $60ff0000,$05860001,$00008000,$00000000
+ dc.l $00000000,$0000422e,$ff652f00,$422eff5c
+ dc.l $600c422e,$ff652f00,$1d7c0001,$ff5c48e7
+ dc.l $3f003628,$00003d43,$ff580283,$00007fff
+ dc.l $28280004,$2a280008,$4a83663c,$263c0000
+ dc.l $3ffe4a84,$66162805,$42850483,$00000020
+ dc.l $4286edc4,$6000edac,$96866022,$4286edc4
+ dc.l $60009686,$edac2e05,$edad4486,$06860000
+ dc.l $0020ecaf,$88876006,$06830000,$3ffe3029
+ dc.l $00003d40,$ff5a322e,$ff58b181,$02810000
+ dc.l $80003d41,$ff5e0280,$00007fff,$22290004
+ dc.l $24290008,$4a80663c,$203c0000,$3ffe4a81
+ dc.l $66162202,$42820480,$00000020,$4286edc1
+ dc.l $6000eda9,$90866022,$4286edc1,$60009086
+ dc.l $eda92e02,$edaa4486,$06860000,$0020ecaf
+ dc.l $82876006,$06800000,$3ffe2d43,$ff542f00
+ dc.l $90834286,$4283227c,$00000000,$4a806c06
+ dc.l $201f6000,$006a588f,$4a866e0e,$b2846608
+ dc.l $b4856604,$60000136,$65089485,$93844286
+ dc.l $52834a80,$670ed683,$d482e391,$55c65289
+ dc.l $538060d4,$202eff54,$4a816616,$22024282
+ dc.l $04800000,$00204286,$edc16000,$eda99086
+ dc.l $601c4286,$edc16000,$6b149086,$eda92e02
+ dc.l $edaa4486,$06860000,$0020ecaf,$82870c80
+ dc.l $000041fe,$6c2a3d40,$ff902d41,$ff942d42
+ dc.l $ff982c2e,$ff543d46,$ff842d44,$ff882d45
+ dc.l $ff8cf22e,$4800ff90,$1d7c0001,$ff5d6036
+ dc.l $2d41ff94,$2d42ff98,$04800000,$3ffe3d40
+ dc.l $ff902c2e,$ff540486,$00003ffe,$2d46ff54
+ dc.l $f22e4800,$ff903d46,$ff842d44,$ff882d45
+ dc.l $ff8c422e,$ff5d4a2e,$ff5c6722,$2c2eff54
+ dc.l $5386b086,$6d186e0e,$b2846608,$b4856604
+ dc.l $6000007a,$6508f22e,$4828ff84,$52833c2e
+ dc.l $ff5a6c04,$f200001a,$42863c2e,$ff5e7e08
+ dc.l $eeae0283,$0000007f,$86861d43,$ff654cdf
+ dc.l $00fc201f,$f2009000,$4a2eff5d,$6710123c
+ dc.l $0000f23a,$4823fdc0,$60ff0000,$02ca123c
+ dc.l $0003f200,$000060ff,$000002bc,$52830c80
+ dc.l $00000008,$6c04e1ab,$60024283,$f23c4400
+ dc.l $00000000,$422eff5d,$6000ff94,$2c030286
+ dc.l $00000001,$4a866700,$ff865283,$3c2eff5a
+ dc.l $0a860000,$80003d46,$ff5a6000,$ff723028
+ dc.l $00000240,$7fff0c40,$7fff6738,$08280007
+ dc.l $00046706,$103c0000,$4e754a40,$66184aa8
+ dc.l $0004660c,$4aa80008,$6606103c,$00014e75
+ dc.l $103c0004,$4e7561ff,$000007f6,$4e75103c
+ dc.l $00064e75,$4aa80008,$66122028,$00040280
+ dc.l $7fffffff,$6606103c,$00024e75,$103c0003
+ dc.l $4e757fff,$0000ffff,$ffffffff,$ffff4a28
+ dc.l $00006a38,$00ae0a00,$0410ff64,$082e0002
+ dc.l $ff62660a,$f23c4400,$ff800000,$4e75f22e
+ dc.l $d080ffdc,$f22e9000,$ff60f23c,$4480bf80
+ dc.l $0000f23c,$44a00000,$00004e75,$00ae0200
+ dc.l $0410ff64,$082e0002,$ff62660a,$f23c4400
+ dc.l $7f800000,$4e75f22e,$d080ffdc,$f22e9000
+ dc.l $ff60f23c,$44803f80,$0000f23c,$44a00000
+ dc.l $00004e75,$00ae0100,$2080ff64,$082e0005
+ dc.l $ff626608,$f23ad080,$ff6a4e75,$f22ed080
+ dc.l $ffdcf22e,$9000ff60,$f227e004,$f23c4500
+ dc.l $7f800000,$f23c4523,$00000000,$f21fd020
+ dc.l $4e757ffe,$0000ffff,$ffffffff,$fffffffe
+ dc.l $0000ffff,$ffffffff,$ffff0000,$00008000
+ dc.l $00000000,$00008000,$00008000,$00000000
+ dc.l $00004a28,$00006a26,$00ae0800,$0a28ff64
+ dc.l $f22e9000,$ff60f23a,$d080ffdc,$f23a4823
+ dc.l $ffcaf200,$a800e198,$1d40ff64,$4e75006e
+ dc.l $0a28ff66,$f22e9000,$ff60f23a,$d080ffac
+ dc.l $f2000023,$f200a800,$e1981d40,$ff644e75
+ dc.l $00ae0000,$1048ff64,$12000201,$00c06700
+ dc.l $005a3d68,$0000ff84,$2d680004,$ff882d68
+ dc.l $0008ff8c,$41eeff84,$48e7c080,$61ff0000
+ dc.l $06184cdf,$01030c01,$00406610,$4aa80008
+ dc.l $66184a28,$00076612,$60000020,$22280008
+ dc.l $02810000,$07ff6700,$001200ae,$00000200
+ dc.l $ff646006,$006e1248,$ff664a28,$00006a22
+ dc.l $f22e9000,$ff60f23a,$d080ff14,$f23a4823
+ dc.l $ff02f200,$a800e198,$00000000,$1d40ff64
+ dc.l $4e75f22e,$9000ff60,$f23ad080,$fee6f23a
+ dc.l $4823fee0,$f200a800,$e1981d40,$ff644e75
+ dc.l $006e1248,$ff66f22e,$9000ff60,$f23ad080
+ dc.l $fec2f23a,$4823febc,$f200a800,$e1981d40
+ dc.l $ff644e75,$f200a800,$81aeff64,$6020f200
+ dc.l $a80081ae,$ff64f294,$000ef281,$0032006e
+ dc.l $0208ff66,$600800ae,$08000208,$ff64082e
+ dc.l $0001ff62,$66024e75,$f22e9000,$ff60f23c
+ dc.l $44803f80,$0000f23a,$48a2fe80,$4e751d7c
+ dc.l $0004ff64,$006e0208,$ff664e75,$f22e9000
+ dc.l $ff60f228,$48000000,$f200a800,$00800000
+ dc.l $0a2881ae,$ff644e75,$f22e9000,$ff60f228
+ dc.l $48000000,$f200a800,$81aeff64,$4e754e75
+ dc.l $f2294800,$00004a29,$00006b08,$1d7c0001
+ dc.l $ff644e75,$1d7c0009,$ff644e75,$f2284800
+ dc.l $00004a28,$00006b08,$1d7c0001,$ff644e75
+ dc.l $1d7c0009,$ff644e75,$f227b000,$f23c9000
+ dc.l $00000000,$f22f4400,$0008f21f,$9000f22f
+ dc.l $44220008,$4e75f227,$b000f23c,$90000000
+ dc.l $0000f22f,$54000008,$f21f9000,$f22f5422
+ dc.l $000c4e75,$f22fd080,$0004f22f,$48220010
+ dc.l $4e75f227,$b000f23c,$90000000,$0000f22f
+ dc.l $44000008,$f21f9000,$f22f4428,$00084e75
+ dc.l $f227b000,$f23c9000,$00000000,$f22f5400
+ dc.l $0008f21f,$9000f22f,$5428000c,$4e75f22f
+ dc.l $d0800004,$f22f4828,$00104e75,$f227b000
+ dc.l $f23c9000,$00000000,$f22f4400,$0008f21f
+ dc.l $9000f22f,$44230008,$4e75f227,$b000f23c
+ dc.l $90000000,$0000f22f,$54000008,$f21f9000
+ dc.l $f22f5423,$000c4e75,$f22fd080,$0004f22f
+ dc.l $48230010,$4e75f227,$b000f23c,$90000000
+ dc.l $0000f22f,$44000008,$f21f9000,$f22f4420
+ dc.l $00084e75,$f227b000,$f23c9000,$00000000
+ dc.l $f22f5400,$0008f21f,$9000f22f,$5420000c
+ dc.l $4e75f22f,$d0800004,$f22f4820,$00104e75
+ dc.l $f22f4418,$00044e75,$f22f5418,$00044e75
+ dc.l $f22f4818,$00044e75,$f22f441a,$00044e75
+ dc.l $f22f541a,$00044e75,$f22f481a,$00044e75
+ dc.l $f22f4404,$00044e75,$f22f5404,$00044e75
+ dc.l $f22f4804,$00044e75,$f22f4401,$00044e75
+ dc.l $f22f5401,$00044e75,$f22f4801,$00044e75
+ dc.l $f22f4403,$00044e75,$f22f5403,$00044e75
+ dc.l $f22f4803,$00044e75,$4a280000,$6b10f23c
+ dc.l $44000000,$00001d7c,$0004ff64,$4e75f23c
+ dc.l $44008000,$00001d7c,$000cff64,$4e754a29
+ dc.l $00006bea,$60d84a28,$00006b10,$f23c4400
+ dc.l $7f800000,$1d7c0002,$ff644e75,$f23c4400
+ dc.l $ff800000,$1d7c000a,$ff644e75,$4a290000
+ dc.l $6bea60d8,$4a280000,$6ba460d0,$4a280000
+ dc.l $6b00fba2,$60c64a28,$00006b16,$60be4a28
+ dc.l $00006b0e,$f23c4400,$3f800000,$422eff64
+ dc.l $4e75f23c,$4400bf80,$00001d7c,$0008ff64
+ dc.l $4e753fff,$0000c90f,$daa22168,$c235bfff
+ dc.l $0000c90f,$daa22168,$c2354a28,$00006b0e
+ dc.l $f2009000,$f23a4800,$ffda6000,$fcf2f200
+ dc.l $9000f23a,$4800ffd8,$6000fcec,$f23c4480
+ dc.l $3f800000,$4a280000,$6a10f23c,$44008000
+ dc.l $00001d7c,$000cff64,$4e75f23c,$44000000
+ dc.l $00001d7c,$0004ff64,$4e75f23a,$4880fa84
+ dc.l $6000fb02,$f2284880,$00006000,$fd30122e
+ dc.l $ff4f67ff,$fffff782,$0c010001,$67000078
+ dc.l $0c010002,$67ffffff,$fade0c01,$000467ff
+ dc.l $fffff766,$60ffffff,$fcea122e,$ff4f67ff
+ dc.l $fffffac4,$0c010001,$67ffffff,$faba0c01
+ dc.l $000267ff,$fffffab0,$0c010004,$67ffffff
+ dc.l $faa660ff,$fffffcbc,$122eff4f,$67ff0000
+ dc.l $00440c01,$000167ff,$0000001e,$0c010002
+ dc.l $67ffffff,$fa820c01,$000467ff,$00000026
+ dc.l $60ffffff,$fc8e1228,$00001029,$0000b101
+ dc.l $02010080,$1d41ff65,$4a006a00,$fe526000
+ dc.l $fe5e422e,$ff652f00,$12280000,$10290000
+ dc.l $b1010201,$00801d41,$ff650c2e,$0004ff4f
+ dc.l $660c41e9,$0000201f,$60ffffff,$fc2ef21f
+ dc.l $9000f229,$48000000,$4a290000,$6b024e75
+ dc.l $1d7c0008,$ff644e75,$122eff4f,$67ffffff
+ dc.l $f6a40c01,$00016700,$ff8e0c01,$000267ff
+ dc.l $fffff9f4,$0c010004,$67ffffff,$f68860ff
+ dc.l $fffffc00,$122eff4f,$67ffffff,$f9da0c01
+ dc.l $000167ff,$fffff9d0,$0c010002,$67ffffff
+ dc.l $f9c60c01,$000467ff,$fffff9bc,$60ffffff
+ dc.l $fbd2122e,$ff4f6700,$ff5a0c01,$00016700
+ dc.l $ff360c01,$000267ff,$fffff99c,$0c010004
+ dc.l $67ffffff,$ff4060ff,$fffffba8,$122eff4f
+ dc.l $67ffffff,$f5000c01,$000167ff,$fffffd92
+ dc.l $0c010002,$67ffffff,$fdb60c01,$000467ff
+ dc.l $fffff4e2,$60ffffff,$fb7a122e,$ff4f67ff
+ dc.l $fffff4d2,$0c010001,$67ffffff,$fd640c01
+ dc.l $000267ff,$fffffd88,$0c010004,$67ffffff
+ dc.l $f4b460ff,$fffffb4c,$122eff4f,$67ffffff
+ dc.l $f9260c01,$000367ff,$fffffb38,$60ffffff
+ dc.l $f916122e,$ff4f0c01,$000367ff,$fffffb24
+ dc.l $60ffffff,$fb3a2f02,$2f032028,$00042228
+ dc.l $0008edc0,$2000671a,$e5a8e9c1,$30228083
+ dc.l $e5a92140,$00042141,$00082002,$261f241f
+ dc.l $4e75edc1,$2000e5a9,$06820000,$00202141
+ dc.l $000442a8,$00082002,$261f241f,$4e75ede8
+ dc.l $00000004,$660eede8,$00000008,$67000074
+ dc.l $06400020,$42813228,$00000241,$7fffb041
+ dc.l $6e1c9240,$30280000,$02408000,$82403141
+ dc.l $000061ff,$ffffff82,$103c0000,$4e750c01
+ dc.l $00206e20,$e9e80840,$00042140,$00042028
+ dc.l $0008e3a8,$21400008,$02688000,$0000103c
+ dc.l $00044e75,$04410020,$20280008,$e3a82140
+ dc.l $000442a8,$00080268,$80000000,$103c0004
+ dc.l $4e750268,$80000000,$103c0001,$4e7551fc
diff --git a/arch/m68k/ifpsp060/fpsp.doc b/arch/m68k/ifpsp060/fpsp.doc
new file mode 100644
index 00000000000..408315209e6
--- /dev/null
+++ b/arch/m68k/ifpsp060/fpsp.doc
@@ -0,0 +1,295 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+68060 FLOATING-POINT SOFTWARE PACKAGE (Kernel version)
+-------------------------------------------------------
+
+The file fpsp.sa contains the 68060 Floating-Point Software
+Package. This package is essentially a set of exception handlers
+that can be integrated into an operating system.
+These exception handlers emulate Unimplemented FP instructions,
+instructions using unimplemented data types, and instructions
+using unimplemented addressing modes. In addition, this package
+includes exception handlers to provide full IEEE-754 compliant
+exception handling.
+
+Release file format:
+--------------------
+The file fpsp.sa is essentially a hexadecimal image of the
+release package. This is the ONLY format which will be supported.
+The hex image was created by assembling the source code and
+then converting the resulting binary output image into an
+ASCII text file. The hexadecimal numbers are listed
+using the Motorola Assembly Syntax assembler directive "dc.l"
+(define constant longword). The file can be converted to other
+assembly syntaxes by using any word processor with a global
+search and replace function.
+
+To assist in assembling and linking this module with other modules,
+the installer should add a symbolic label to the top of the file.
+This will allow calling routines to access the entry points
+of this package.
+
+The source code fpsp.s has also been included but only for
+documentation purposes.
+
+Release file structure:
+-----------------------
+
+(top of module)
+ -----------------
+ | | - 128 byte-sized section
+ (1) | Call-Out | - 4 bytes per entry (user fills these in)
+ | | - example routines in fskeleton.s
+ -----------------
+ | | - 8 bytes per entry
+ (2) | Entry Point | - user does "bra" or "jmp" to this address
+ | |
+ -----------------
+ | | - code section
+ (3) ~ ~
+ | |
+ -----------------
+(bottom of module)
+
+The first section of this module is the "Call-out" section. This section
+is NOT INCLUDED in fpsp.sa (an example "Call-out" section is provided at
+the end of the file fskeleton.s). The purpose of this section is to allow
+the FPSP routines to reference external functions that must be provided
+by the host operating system. This section MUST be exactly 128 bytes in
+size. There are 32 fields, each 4 bytes in size. Each field corresponds
+to a function required by the FPSP (these functions and their location are
+listed in "68060FPSP call-outs" below). Each field entry should contain
+the address of the corresponding function RELATIVE to the starting address
+of the "call-out" section. The "Call-out" section must sit adjacent to the
+fpsp.sa image in memory.
+
+The second section, the "Entry-point" section, is used by external routines
+to access the functions within the FPSP. Since the fpsp.sa hex file contains
+no symbol names, this section contains function entry points that are fixed
+with respect to the top of the package. The currently defined entry-points
+are listed in section "68060 FPSP entry points" below. A calling routine
+would simply execute a "bra" or "jmp" that jumped to the selected function
+entry-point.
+
+For example, if the 68060 hardware took a "Line-F Emulator" exception
+(vector #11), the operating system should execute something similar to:
+
+ bra _060FPSP_TOP+128+48
+
+(_060FPSP_TOP is the starting address of the "Call-out" section; the "Call-out"
+section is 128 bytes long; and the F-Line FPSP handler entry point is located
+48 bytes from the top of the "Entry-point" section.)
+
+The third section is the code section. After entering through an "Entry-point",
+the entry code jumps to the appropriate emulation code within the code section.
+
+68060FPSP call-outs: (details in fskeleton.s)
+--------------------
+0x000: _060_real_bsun
+0x004: _060_real_snan
+0x008: _060_real_operr
+0x00c: _060_real_ovfl
+0x010: _060_real_unfl
+0x014: _060_real_dz
+0x018: _060_real_inex
+0x01c: _060_real_fline
+0x020: _060_real_fpu_disabled
+0x024: _060_real_trap
+0x028: _060_real_trace
+0x02c: _060_real_access
+0x030: _060_fpsp_done
+
+0x034: (Motorola reserved)
+0x038: (Motorola reserved)
+0x03c: (Motorola reserved)
+
+0x040: _060_imem_read
+0x044: _060_dmem_read
+0x048: _060_dmem_write
+0x04c: _060_imem_read_word
+0x050: _060_imem_read_long
+0x054: _060_dmem_read_byte
+0x058: _060_dmem_read_word
+0x05c: _060_dmem_read_long
+0x060: _060_dmem_write_byte
+0x064: _060_dmem_write_word
+0x068: _060_dmem_write_long
+
+0x06c: (Motorola reserved)
+0x070: (Motorola reserved)
+0x074: (Motorola reserved)
+0x078: (Motorola reserved)
+0x07c: (Motorola reserved)
+
+68060FPSP entry points:
+-----------------------
+0x000: _060_fpsp_snan
+0x008: _060_fpsp_operr
+0x010: _060_fpsp_ovfl
+0x018: _060_fpsp_unfl
+0x020: _060_fpsp_dz
+0x028: _060_fpsp_inex
+0x030: _060_fpsp_fline
+0x038: _060_fpsp_unsupp
+0x040: _060_fpsp_effadd
+
+
+
+Miscellaneous:
+--------------
+
+_060_fpsp_snan:
+----------------
+- documented in 3.5 of 060SP spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_snan --|
+ |
+ always exits through _060_real_snan <----
+
+_060_fpsp_operr:
+----------------
+- documented in 3.5 of 060SP spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_operr --|
+ |
+ always exits through _060_real_operr <-----
+
+_060_fpsp_dz:
+----------------
+- documented in 3.7 of 060SP spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_dz --|
+ |
+ always exits through _060_real_dz <----
+
+_060_fpsp_inex:
+----------------
+- documented in 3.6 of 060SP spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_inex --|
+ |
+ always exits through _060_real_inex <----
+
+
+_060_fpsp_ovfl:
+----------------
+- documented in 3.4 of 060SP spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_ovfl --|
+ |
+ may exit through _060_real_inex <---|
+ or |
+ may exit through _060_real_ovfl <---|
+ or |
+ may exit through _060_fpsp_done <---|
+
+_060_fpsp_unfl:
+----------------
+- documented in 3.4 of 060SP spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_unfl --|
+ |
+ may exit through _060_real_inex <---|
+ or |
+ may exit through _060_real_unfl <---|
+ or |
+ may exit through _060_fpsp_done <---|
+
+
+_060_fpsp_fline:
+-----------------
+- not fully documented in 060SP spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_fline --|
+ |
+ -------------------------------------------
+ | | |
+ v v v
+ (unimplemented (fpu disabled) (possible F-line illegal)
+ stack frame) | v
+ | v special case "fmovecr"?
+ | exit through |
+ | _060_real_fpu_disabled -------------
+ | | |
+ | ^ v v
+ | | (yes) (no)
+ | | v v
+ | | fpu disabled? exit through
+ | | | _060_real_fline
+ v | -------------
+ | | | |
+ | | v v
+ | |-----------(yes) (no)
+ | |
+ |----<------------------------------------|
+ |
+ |
+ |----> may exit through _060_real_trace
+ |
+ |----> may exit through _060_real_trap
+ |
+ |----> may exit through _060_real_bsun
+ |
+ |----> may exit through _060_fpsp_done
+
+
+_060_fpsp_unsupp:
+------------------
+- documented in 3.1 of 060SP spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_unsupp --|
+ |
+ |
+ may exit through _060_real_snan <----|
+ or |
+ may exit through _060_real_operr <----|
+ or |
+ may exit through _060_real_ovfl <----|
+ or |
+ may exit through _060_real_unfl <----|
+ or |
+ may exit through _060_real_inex <----|
+ or |
+ may exit through _060_real_trace <----|
+ or |
+ may exit through _060_fpsp_done <----|
+
+
+_060_fpsp_effadd:
+------------------
+- documented in 3.3 of 060 spec.
+- Basic flow:
+ exception taken ---> enter _060_fpsp_effadd --|
+ |
+ |
+ may exit through _060_real_trace <----|
+ or |
+ may exit through _060_real_fpu_disabled <----|
+ or |
+ may exit through _060_fpsp_done <----|
diff --git a/arch/m68k/ifpsp060/fpsp.sa b/arch/m68k/ifpsp060/fpsp.sa
new file mode 100644
index 00000000000..d69486a44bc
--- /dev/null
+++ b/arch/m68k/ifpsp060/fpsp.sa
@@ -0,0 +1,3401 @@
+ .long 0x60ff0000,0x17400000,0x60ff0000,0x15f40000
+ .long 0x60ff0000,0x02b60000,0x60ff0000,0x04700000
+ .long 0x60ff0000,0x1b100000,0x60ff0000,0x19aa0000
+ .long 0x60ff0000,0x1b5a0000,0x60ff0000,0x062e0000
+ .long 0x60ff0000,0x102c0000,0x51fc51fc,0x51fc51fc
+ .long 0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x2f00203a,0xff2c487b,0x0930ffff,0xfef8202f
+ .long 0x00044e74,0x00042f00,0x203afef2,0x487b0930
+ .long 0xfffffee2,0x202f0004,0x4e740004,0x2f00203a
+ .long 0xfee0487b,0x0930ffff,0xfecc202f,0x00044e74
+ .long 0x00042f00,0x203afed2,0x487b0930,0xfffffeb6
+ .long 0x202f0004,0x4e740004,0x2f00203a,0xfea4487b
+ .long 0x0930ffff,0xfea0202f,0x00044e74,0x00042f00
+ .long 0x203afe96,0x487b0930,0xfffffe8a,0x202f0004
+ .long 0x4e740004,0x2f00203a,0xfe7c487b,0x0930ffff
+ .long 0xfe74202f,0x00044e74,0x00042f00,0x203afe76
+ .long 0x487b0930,0xfffffe5e,0x202f0004,0x4e740004
+ .long 0x2f00203a,0xfe68487b,0x0930ffff,0xfe48202f
+ .long 0x00044e74,0x00042f00,0x203afe56,0x487b0930
+ .long 0xfffffe32,0x202f0004,0x4e740004,0x2f00203a
+ .long 0xfe44487b,0x0930ffff,0xfe1c202f,0x00044e74
+ .long 0x00042f00,0x203afe32,0x487b0930,0xfffffe06
+ .long 0x202f0004,0x4e740004,0x2f00203a,0xfe20487b
+ .long 0x0930ffff,0xfdf0202f,0x00044e74,0x00042f00
+ .long 0x203afe1e,0x487b0930,0xfffffdda,0x202f0004
+ .long 0x4e740004,0x2f00203a,0xfe0c487b,0x0930ffff
+ .long 0xfdc4202f,0x00044e74,0x00042f00,0x203afdfa
+ .long 0x487b0930,0xfffffdae,0x202f0004,0x4e740004
+ .long 0x2f00203a,0xfde8487b,0x0930ffff,0xfd98202f
+ .long 0x00044e74,0x00042f00,0x203afdd6,0x487b0930
+ .long 0xfffffd82,0x202f0004,0x4e740004,0x2f00203a
+ .long 0xfdc4487b,0x0930ffff,0xfd6c202f,0x00044e74
+ .long 0x00042f00,0x203afdb2,0x487b0930,0xfffffd56
+ .long 0x202f0004,0x4e740004,0x2f00203a,0xfda0487b
+ .long 0x0930ffff,0xfd40202f,0x00044e74,0x00042f00
+ .long 0x203afd8e,0x487b0930,0xfffffd2a,0x202f0004
+ .long 0x4e740004,0x2f00203a,0xfd7c487b,0x0930ffff
+ .long 0xfd14202f,0x00044e74,0x00042f00,0x203afd6a
+ .long 0x487b0930,0xfffffcfe,0x202f0004,0x4e740004
+ .long 0x40c62d38,0xd3d64634,0x3d6f90ae,0xb1e75cc7
+ .long 0x40000000,0xc90fdaa2,0x2168c235,0x00000000
+ .long 0x3fff0000,0xc90fdaa2,0x2168c235,0x00000000
+ .long 0x3fe45f30,0x6dc9c883,0x4e56ff40,0xf32eff6c
+ .long 0x48ee0303,0xff9cf22e,0xbc00ff60,0xf22ef0c0
+ .long 0xffdc2d6e,0xff68ff44,0x206eff44,0x58aeff44
+ .long 0x61ffffff,0xff042d40,0xff40082e,0x0005ff42
+ .long 0x66000116,0x41eeff6c,0x61ff0000,0x051c41ee
+ .long 0xff6c61ff,0x0000c1dc,0x1d40ff4e,0x082e0005
+ .long 0xff436726,0xe9ee0183,0xff4261ff,0x0000bd22
+ .long 0x41eeff78,0x61ff0000,0xc1ba0c00,0x00066606
+ .long 0x61ff0000,0xc11e1d40,0xff4f4280,0x102eff63
+ .long 0x122eff43,0x0241007f,0x02ae00ff,0x01ffff64
+ .long 0xf23c9000,0x00000000,0xf23c8800,0x00000000
+ .long 0x41eeff6c,0x43eeff78,0x223b1530,0x00007112
+ .long 0x4ebb1930,0x0000710a,0xe9ee0183,0xff4261ff
+ .long 0x0000bd4e,0x082e0004,0xff626622,0x082e0001
+ .long 0xff626644,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+ .long 0x4cee0303,0xff9c4e5e,0x60ffffff,0xfcc6f22e
+ .long 0xf040ff6c,0x3d7ce005,0xff6ef22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0xf36eff6c
+ .long 0x4e5e60ff,0xfffffcb2,0xf22ef040,0xff6c1d7c
+ .long 0x00c4000b,0x3d7ce001,0xff6ef22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0xf36eff6c
+ .long 0x4e5e60ff,0xfffffcae,0x1d7c0000,0xff4e4280
+ .long 0x102eff63,0x02aeffff,0x00ffff64,0xf23c9000
+ .long 0x00000000,0xf23c8800,0x00000000,0x41eeff6c
+ .long 0x61ff0000,0xb2ce082e,0x0004ff62,0x6600ff70
+ .long 0x082e0001,0xff626600,0xff90f22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0x4e5e0817
+ .long 0x000767ff,0xfffffc0c,0xf22fa400,0x00083f7c
+ .long 0x20240006,0x60ffffff,0xfcec4e56,0xff40f32e
+ .long 0xff6c48ee,0x0303ff9c,0xf22ebc00,0xff60f22e
+ .long 0xf0c0ffdc,0x2d6eff68,0xff44206e,0xff4458ae
+ .long 0xff4461ff,0xfffffd42,0x2d40ff40,0x082e0005
+ .long 0xff426600,0x013241ee,0xff6c61ff,0x0000035a
+ .long 0x41eeff6c,0x61ff0000,0xc01a1d40,0xff4e082e
+ .long 0x0005ff43,0x672e082e,0x0004ff43,0x6626e9ee
+ .long 0x0183ff42,0x61ff0000,0xbb5841ee,0xff7861ff
+ .long 0x0000bff0,0x0c000006,0x660661ff,0x0000bf54
+ .long 0x1d40ff4f,0x4280102e,0xff63122e,0xff430241
+ .long 0x007f02ae,0x00ff01ff,0xff64f23c,0x90000000
+ .long 0x0000f23c,0x88000000,0x000041ee,0xff6c43ee
+ .long 0xff78223b,0x15300000,0x6f484ebb,0x19300000
+ .long 0x6f40e9ee,0x0183ff42,0x61ff0000,0xbb84082e
+ .long 0x0003ff62,0x6622082e,0x0001ff62,0x664ef22e
+ .long 0xd0c0ffdc,0xf22e9c00,0xff604cee,0x0303ff9c
+ .long 0x4e5e60ff,0xfffffafc,0x082e0003,0xff666700
+ .long 0xffd6f22e,0xf040ff6c,0x3d7ce003,0xff6ef22e
+ .long 0xd0c0ffdc,0xf22e9c00,0xff604cee,0x0303ff9c
+ .long 0xf36eff6c,0x4e5e60ff,0xfffffaf4,0x082e0001
+ .long 0xff666700,0xffaaf22e,0xf040ff6c,0x1d7c00c4
+ .long 0x000b3d7c,0xe001ff6e,0xf22ed0c0,0xffdcf22e
+ .long 0x9c00ff60,0x4cee0303,0xff9cf36e,0xff6c4e5e
+ .long 0x60ffffff,0xfad01d7c,0x0000ff4e,0x4280102e
+ .long 0xff6302ae,0xffff00ff,0xff64f23c,0x90000000
+ .long 0x0000f23c,0x88000000,0x000041ee,0xff6c61ff
+ .long 0x0000b0f0,0x082e0003,0xff626600,0xff66082e
+ .long 0x0001ff62,0x6600ff90,0xf22ed0c0,0xffdcf22e
+ .long 0x9c00ff60,0x4cee0303,0xff9c4e5e,0x08170007
+ .long 0x67ffffff,0xfa2ef22f,0xa4000008,0x3f7c2024
+ .long 0x000660ff,0xfffffb0e,0x4e56ff40,0xf32eff6c
+ .long 0x48ee0303,0xff9cf22e,0xbc00ff60,0xf22ef0c0
+ .long 0xffdc082e,0x00050004,0x66084e68,0x2d48ffd8
+ .long 0x600841ee,0x00102d48,0xffd82d6e,0xff68ff44
+ .long 0x206eff44,0x58aeff44,0x61ffffff,0xfb4c2d40
+ .long 0xff40422e,0xff4a082e,0x0005ff42,0x66000208
+ .long 0xe9ee0006,0xff420c00,0x00136700,0x049e02ae
+ .long 0x00ff00ff,0xff64f23c,0x90000000,0x0000f23c
+ .long 0x88000000,0x000041ee,0xff6c61ff,0x0000013a
+ .long 0x41eeff6c,0x61ff0000,0xbdfa0c00,0x00066606
+ .long 0x61ff0000,0xbd5e1d40,0xff4ee9ee,0x0183ff42
+ .long 0x082e0005,0xff436728,0x0c2e003a,0xff436720
+ .long 0x61ff0000,0xb92c41ee,0xff7861ff,0x0000bdc4
+ .long 0x0c000006,0x660661ff,0x0000bd28,0x1d40ff4f
+ .long 0x4280102e,0xff63e9ee,0x1047ff43,0x41eeff6c
+ .long 0x43eeff78,0x223b1d30,0x00006d36,0x4ebb1930
+ .long 0x00006d2e,0x102eff62,0x6634102e,0xff430200
+ .long 0x00380c00,0x0038670c,0xe9ee0183,0xff4261ff
+ .long 0x0000b95e,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+ .long 0x4cee0303,0xff9c4e5e,0x60ffffff,0xf8e6c02e
+ .long 0xff66edc0,0x06086614,0x082e0004,0xff6667ba
+ .long 0x082e0001,0xff6267b2,0x60000066,0x04800000
+ .long 0x00180c00,0x00066614,0x082e0003,0xff666600
+ .long 0x004a082e,0x0004ff66,0x66000046,0x2f0061ff
+ .long 0x000007e0,0x201f3d7b,0x0222ff6e,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9cf36e
+ .long 0xff6c4e5e,0x60ffffff,0xf87ae000,0xe006e004
+ .long 0xe005e003,0xe002e001,0xe001303c,0x000460bc
+ .long 0x303c0003,0x60b6e9ee,0x0006ff42,0x0c000011
+ .long 0x67080c00,0x00156750,0x4e753028,0x00000240
+ .long 0x7fff0c40,0x3f806708,0x0c40407f,0x672c4e75
+ .long 0x02a87fff,0xffff0004,0x671861ff,0x0000bbbc
+ .long 0x44400640,0x3f810268,0x80000000,0x81680000
+ .long 0x4e750268,0x80000000,0x4e750228,0x007f0004
+ .long 0x00687fff,0x00004e75,0x30280000,0x02407fff
+ .long 0x0c403c00,0x67080c40,0x43ff67de,0x4e7502a8
+ .long 0x7fffffff,0x00046606,0x4aa80008,0x67c461ff
+ .long 0x0000bb68,0x44400640,0x3c010268,0x80000000
+ .long 0x81680000,0x4e75e9ee,0x00c3ff42,0x0c000003
+ .long 0x670004a2,0x0c000007,0x6700049a,0x02aeffff
+ .long 0x00ffff64,0xf23c9000,0x00000000,0xf23c8800
+ .long 0x00000000,0x302eff6c,0x02407fff,0x671041ee
+ .long 0xff6c61ff,0x0000bb5c,0x1d40ff4e,0x60061d7c
+ .long 0x0004ff4e,0x4280102e,0xff6341ee,0xff6c2d56
+ .long 0xffd461ff,0x0000adec,0x102eff62,0x66000086
+ .long 0x2caeffd4,0x082e0005,0x00046626,0x206effd8
+ .long 0x4e60f22e,0xd0c0ffdc,0xf22e9c00,0xff604cee
+ .long 0x0303ff9c,0x4e5e0817,0x0007667a,0x60ffffff
+ .long 0xf7220c2e,0x0008ff4a,0x66d8f22e,0xf080ff6c
+ .long 0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+ .long 0xff9c2c56,0x2f6f00c4,0x00b82f6f,0x00c800bc
+ .long 0x2f6f002c,0x00c42f6f,0x003000c8,0x2f6f0034
+ .long 0x00ccdffc,0x000000b8,0x08170007,0x662860ff
+ .long 0xfffff6d0,0xc02eff66,0xedc00608,0x662a082e
+ .long 0x0004ff66,0x6700ff6a,0x082e0001,0xff626700
+ .long 0xff606000,0x01663f7c,0x20240006,0xf22fa400
+ .long 0x000860ff,0xfffff78e,0x04800000,0x0018303b
+ .long 0x020a4efb,0x00064afc,0x00080000,0x0000003a
+ .long 0x00640094,0x00000140,0x0000f22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0x3d7c30d8
+ .long 0x000a3d7c,0xe006ff6e,0xf36eff6c,0x4e5e60ff
+ .long 0xfffff6d4,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+ .long 0x4cee0303,0xff9c3d7c,0x30d0000a,0x3d7ce004
+ .long 0xff6ef36e,0xff6c4e5e,0x60ffffff,0xf694f22e
+ .long 0xf040ff6c,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+ .long 0x4cee0303,0xff9c3d7c,0x30d4000a,0x3d7ce005
+ .long 0xff6ef36e,0xff6c4e5e,0x60ffffff,0xf60c2cae
+ .long 0xffd4082e,0x00050004,0x66000038,0x206effd8
+ .long 0x4e60f22e,0xf040ff6c,0xf22ed0c0,0xffdcf22e
+ .long 0x9c00ff60,0x4cee0303,0xff9c3d7c,0x30cc000a
+ .long 0x3d7ce003,0xff6ef36e,0xff6c4e5e,0x60ffffff
+ .long 0xf5de0c2e,0x0008ff4a,0x66c8f22e,0xf080ff6c
+ .long 0xf22ef040,0xff78f22e,0xd0c0ffdc,0xf22e9c00
+ .long 0xff604cee,0x0303ff9c,0x3d7c30cc,0x000a3d7c
+ .long 0xe003ff7a,0xf36eff78,0x2c562f6f,0x00c400b8
+ .long 0x2f6f00c8,0x00bc2f6f,0x00cc00c0,0x2f6f002c
+ .long 0x00c42f6f,0x003000c8,0x2f6f0034,0x00ccdffc
+ .long 0x000000b8,0x60ffffff,0xf576f22e,0xf040ff6c
+ .long 0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+ .long 0xff9c3d7c,0x30c4000a,0x3d7ce001,0xff6ef36e
+ .long 0xff6c4e5e,0x60ffffff,0xf55c02ae,0x00ff00ff
+ .long 0xff64f23c,0x90000000,0x0000f23c,0x88000000
+ .long 0x000061ff,0x0000bdba,0x41eeff6c,0x61ff0000
+ .long 0xb9621d40,0xff4ee9ee,0x0183ff42,0x082e0005
+ .long 0xff436728,0x0c2e003a,0xff436720,0x61ff0000
+ .long 0xb4a041ee,0xff7861ff,0x0000b938,0x0c000006
+ .long 0x660661ff,0x0000b89c,0x1d40ff4f,0x4280102e
+ .long 0xff63e9ee,0x1047ff43,0x41eeff6c,0x43eeff78
+ .long 0x223b1d30,0x000068aa,0x4ebb1930,0x000068a2
+ .long 0x102eff62,0x6600008a,0x102eff43,0x02000038
+ .long 0x0c000038,0x670ce9ee,0x0183ff42,0x61ff0000
+ .long 0xb4d0082e,0x00050004,0x6600002a,0x206effd8
+ .long 0x4e60f22e,0xd0c0ffdc,0xf22e9c00,0xff604cee
+ .long 0x0303ff9c,0x4e5e0817,0x00076600,0x012660ff
+ .long 0xfffff440,0x082e0002,0xff4a67d6,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c4e5e
+ .long 0x2f6f0004,0x00102f6f,0x0000000c,0xdffc0000
+ .long 0x000c0817,0x00076600,0x00ea60ff,0xfffff404
+ .long 0xc02eff66,0xedc00608,0x6618082e,0x0004ff66
+ .long 0x6700ff66,0x082e0001,0xff626700,0xff5c6000
+ .long 0x006e0480,0x00000018,0x0c000006,0x6d14082e
+ .long 0x0003ff66,0x66000060,0x082e0004,0xff666600
+ .long 0x004e082e,0x00050004,0x66000054,0x206effd8
+ .long 0x4e603d7b,0x022aff6e,0xf22ed0c0,0xffdcf22e
+ .long 0x9c00ff60,0x4cee0303,0xff9cf36e,0xff6c4e5e
+ .long 0x08170007,0x6600006c,0x60ffffff,0xf386e000
+ .long 0xe006e004,0xe005e003,0xe002e001,0xe001303c
+ .long 0x00036000,0xffae303c,0x00046000,0xffa6082e
+ .long 0x0002ff4a,0x67ac3d7b,0x02d6ff6e,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9cf36e
+ .long 0xff6c4e5e,0x2f6f0004,0x00102f6f,0x0000000c
+ .long 0xdffc0000,0x000c0817,0x00076606,0x60ffffff
+ .long 0xf3223f7c,0x20240006,0xf22fa400,0x000860ff
+ .long 0xfffff402,0x02aeffff,0x00ffff64,0xf23c9000
+ .long 0x00000000,0xf23c8800,0x00000000,0xe9ee0183
+ .long 0xff4261ff,0x0000b22a,0x41eeff6c,0x61ff0000
+ .long 0xb7520c00,0x00066606,0x61ff0000,0xb6b61d40
+ .long 0xff4e4280,0x102eff63,0x41eeff6c,0x2d56ffd4
+ .long 0x61ff0000,0xa94e102e,0xff626600,0x00842cae
+ .long 0xffd4082e,0x00050004,0x6628206e,0xffd84e60
+ .long 0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+ .long 0xff9c4e5e,0x08170007,0x6600ff68,0x60ffffff
+ .long 0xf282082e,0x0003ff4a,0x67d6f22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0x2c562f6f
+ .long 0x00c400b8,0x2f6f00c8,0x00bc2f6f,0x003800c4
+ .long 0x2f6f003c,0x00c82f6f,0x004000cc,0xdffc0000
+ .long 0x00b80817,0x00076600,0xff1a60ff,0xfffff234
+ .long 0xc02eff66,0xedc00608,0x6700ff74,0x2caeffd4
+ .long 0x0c00001a,0x6e0000e8,0x67000072,0x082e0005
+ .long 0x0004660a,0x206effd8,0x4e606000,0xfb8e0c2e
+ .long 0x0008ff4a,0x6600fb84,0xf22ed0c0,0xffdcf22e
+ .long 0x9c00ff60,0x4cee0303,0xff9c3d7c,0x30d8000a
+ .long 0x3d7ce006,0xff6ef36e,0xff6c2c56,0x2f6f00c4
+ .long 0x00b82f6f,0x00c800bc,0x2f6f00cc,0x00c02f6f
+ .long 0x003800c4,0x2f6f003c,0x00c82f6f,0x004000cc
+ .long 0xdffc0000,0x00b860ff,0xfffff22c,0x082e0005
+ .long 0x00046600,0x000c206e,0xffd84e60,0x6000fb46
+ .long 0x0c2e0008,0xff4a6600,0xfb3cf22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0x3d7c30d0
+ .long 0x000a3d7c,0xe004ff6e,0xf36eff6c,0x2c562f6f
+ .long 0x00c400b8,0x2f6f00c8,0x00bc2f6f,0x00cc00c0
+ .long 0x2f6f0038,0x00c42f6f,0x003c00c8,0x2f6f0040
+ .long 0x00ccdffc,0x000000b8,0x60ffffff,0xf1a4082e
+ .long 0x00050004,0x6600000c,0x206effd8,0x4e606000
+ .long 0xfbda0c2e,0x0008ff4a,0x6600fbd0,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c3d7c
+ .long 0x30c4000a,0x3d7ce001,0xff6ef36e,0xff6c2c56
+ .long 0x2f6f00c4,0x00b82f6f,0x00c800bc,0x2f6f00cc
+ .long 0x00c02f6f,0x003800c4,0x2f6f003c,0x00c82f6f
+ .long 0x004000cc,0xdffc0000,0x00b860ff,0xfffff106
+ .long 0xe9ee00c3,0xff420c00,0x00016708,0x0c000005
+ .long 0x67344e75,0x302eff6c,0x02407fff,0x67260c40
+ .long 0x3f806e20,0x44400640,0x3f81222e,0xff70e0a9
+ .long 0x08c1001f,0x2d41ff70,0x026e8000,0xff6c006e
+ .long 0x3f80ff6c,0x4e75302e,0xff6c0240,0x7fff673a
+ .long 0x0c403c00,0x6e344a2e,0xff6c5bee,0xff6e3d40
+ .long 0xff6c4280,0x41eeff6c,0x323c3c01,0x61ff0000
+ .long 0xb156303c,0x3c004a2e,0xff6e6704,0x08c0000f
+ .long 0x08ee0007,0xff703d40,0xff6c4e75,0x082e0005
+ .long 0x000467ff,0xfffff176,0x2d680000,0xff782d68
+ .long 0x0004ff7c,0x2d680008,0xff804281,0x4e752f00
+ .long 0x4e7a0808,0x08000001,0x66000460,0x201f4e56
+ .long 0xff4048ee,0x0303ff9c,0xf22ebc00,0xff60f22e
+ .long 0xf0c0ffdc,0x2d6e0006,0xff44206e,0xff4458ae
+ .long 0xff4461ff,0xfffff152,0x2d40ff40,0x4a406b00
+ .long 0x020e02ae,0x00ff00ff,0xff640800,0x000a6618
+ .long 0x206eff44,0x43eeff6c,0x700c61ff,0xfffff0d2
+ .long 0x4a816600,0x04926048,0x206eff44,0x43eeff6c
+ .long 0x700c61ff,0xfffff0ba,0x4a816600,0x047ae9ee
+ .long 0x004fff6c,0x0c407fff,0x6726102e,0xff6f0200
+ .long 0x000f660c,0x4aaeff70,0x66064aae,0xff746710
+ .long 0x41eeff6c,0x61ff0000,0xb88cf22e,0xf080ff6c
+ .long 0x06ae0000,0x000cff44,0x41eeff6c,0x61ff0000
+ .long 0xb3c21d40,0xff4e0c00,0x0006660a,0x61ff0000
+ .long 0xb3221d40,0xff4e422e,0xff53082e,0x0005ff43
+ .long 0x6748082e,0x0004ff43,0x662ce9ee,0x0183ff42
+ .long 0x61ff0000,0xaeec41ee,0xff7861ff,0x0000b384
+ .long 0x1d40ff4f,0x0c000006,0x662061ff,0x0000b2e4
+ .long 0x1d40ff4f,0x6014082e,0x0003ff43,0x670c50ee
+ .long 0xff53082e,0x0001ff43,0x67c04280,0x102eff63
+ .long 0x122eff43,0x0241007f,0xf23c9000,0x00000000
+ .long 0xf23c8800,0x00000000,0x41eeff6c,0x43eeff78
+ .long 0x223b1530,0x000062ca,0x4ebb1930,0x000062c2
+ .long 0x102eff62,0x66404a2e,0xff53660c,0xe9ee0183
+ .long 0xff4261ff,0x0000aefa,0x2d6e0006,0xff682d6e
+ .long 0xff440006,0xf22ed0c0,0xffdcf22e,0x9c00ff60
+ .long 0x4cee0303,0xff9c4e5e,0x08170007,0x66000096
+ .long 0x60ffffff,0xee6ec02e,0xff66edc0,0x06086612
+ .long 0x082e0004,0xff6667ae,0x082e0001,0xff6267ac
+ .long 0x60340480,0x00000018,0x0c000006,0x6610082e
+ .long 0x0004ff66,0x6620082e,0x0003ff66,0x66203d7b
+ .long 0x0206ff6e,0x601ee002,0xe006e004,0xe005e003
+ .long 0xe002e001,0xe0013d7c,0xe005ff6e,0x60063d7c
+ .long 0xe003ff6e,0x2d6e0006,0xff682d6e,0xff440006
+ .long 0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+ .long 0xff9cf36e,0xff6c4e5e,0x08170007,0x660660ff
+ .long 0xffffede0,0x2f173f6f,0x00080004,0x3f7c2024
+ .long 0x0006f22f,0xa4000008,0x60ffffff,0xeeb80800
+ .long 0x000e6700,0x01c2082e,0x00050004,0x66164e68
+ .long 0x2d48ffd8,0x61ff0000,0x9564206e,0xffd84e60
+ .long 0x600001aa,0x422eff4a,0x41ee000c,0x2d48ffd8
+ .long 0x61ff0000,0x95480c2e,0x0008ff4a,0x67000086
+ .long 0x0c2e0004,0xff4a6600,0x0184082e,0x00070004
+ .long 0x66363dae,0x00040804,0x2daeff44,0x08063dbc
+ .long 0x00f0080a,0x41f60804,0x2d480004,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c4e5e
+ .long 0x2e5f60ff,0xffffed3c,0x3dae0004,0x08002dae
+ .long 0xff440802,0x3dbc2024,0x08062dae,0x00060808
+ .long 0x41f60800,0x2d480004,0xf22ed0c0,0xffdcf22e
+ .long 0x9c00ff60,0x4cee0303,0xff9c4e5e,0x2e5f60ff
+ .long 0xffffedf2,0x1d41000a,0x1d40000b,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c2f16
+ .long 0x2f002f01,0x2f2eff44,0x4280102e,0x000b4480
+ .long 0x082e0007,0x0004671c,0x3dae0004,0x08002dae
+ .long 0x00060808,0x2d9f0802,0x3dbc2024,0x08064876
+ .long 0x08006014,0x3dae0004,0x08042d9f,0x08063dbc
+ .long 0x00f0080a,0x48760804,0x4281122e,0x000a4a01
+ .long 0x6a0cf236,0xf080080c,0x06800000,0x000ce309
+ .long 0x6a0cf236,0xf040080c,0x06800000,0x000ce309
+ .long 0x6a0cf236,0xf020080c,0x06800000,0x000ce309
+ .long 0x6a0cf236,0xf010080c,0x06800000,0x000ce309
+ .long 0x6a0cf236,0xf008080c,0x06800000,0x000ce309
+ .long 0x6a0cf236,0xf004080c,0x06800000,0x000ce309
+ .long 0x6a0cf236,0xf002080c,0x06800000,0x000ce309
+ .long 0x6a06f236,0xf001080c,0x222f0004,0x202f0008
+ .long 0x2c6f000c,0x2e5f0817,0x000767ff,0xffffec04
+ .long 0x60ffffff,0xecf061ff,0x00009bda,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c082e
+ .long 0x00070004,0x660e2d6e,0xff440006,0x4e5e60ff
+ .long 0xffffebd0,0x2c563f6f,0x00c400c0,0x2f6f00c6
+ .long 0x00c82f6f,0x000400c2,0x3f7c2024,0x00c6dffc
+ .long 0x000000c0,0x60ffffff,0xec9c201f,0x4e56ff40
+ .long 0x48ee0303,0xff9c2d6e,0x0006ff44,0x206eff44
+ .long 0x58aeff44,0x61ffffff,0xed002d40,0xff404a40
+ .long 0x6b047010,0x60260800,0x000e6610,0xe9c014c3
+ .long 0x700c0c01,0x00076614,0x58806010,0x428061ff
+ .long 0x0000967c,0x202eff44,0x90ae0006,0x3d40000a
+ .long 0x4cee0303,0xff9c4e5e,0x518f2f00,0x3f6f000c
+ .long 0x00042f6f,0x000e0006,0x4280302f,0x00122f6f
+ .long 0x00060010,0xd1af0006,0x3f7c402c,0x000a201f
+ .long 0x60ffffff,0xebe44e7a,0x08080800,0x0001660c
+ .long 0xf22e9c00,0xff60f22e,0xd0c0ffdc,0x4cee0303
+ .long 0xff9c4e5e,0x514f2eaf,0x00083f6f,0x000c0004
+ .long 0x3f7c4008,0x00062f6f,0x00020008,0x2f7c0942
+ .long 0x8001000c,0x08170005,0x670608ef,0x0002000d
+ .long 0x60ffffff,0xebd64fee,0xff404e7a,0x18080801
+ .long 0x0001660c,0xf22ed0c0,0xffdcf22f,0x9c000020
+ .long 0x2c562f6f,0x00c400bc,0x3f6f00c8,0x00c03f7c
+ .long 0x400800c2,0x2f4800c4,0x3f4000c8,0x3f7c0001
+ .long 0x00ca4cef,0x0303005c,0xdefc00bc,0x60a64e56
+ .long 0xff40f32e,0xff6c48ee,0x0303ff9c,0xf22ebc00
+ .long 0xff60f22e,0xf0c0ffdc,0x2d6eff68,0xff44206e
+ .long 0xff4458ae,0xff4461ff,0xffffebce,0x2d40ff40
+ .long 0x0800000d,0x662841ee,0xff6c61ff,0xfffff1ea
+ .long 0xf22ed0c0,0xffdcf22e,0x9c00ff60,0x4cee0303
+ .long 0xff9cf36e,0xff6c4e5e,0x60ffffff,0xea94322e
+ .long 0xff6c0241,0x7fff0c41,0x7fff661a,0x4aaeff74
+ .long 0x660c222e,0xff700281,0x7fffffff,0x67082d6e
+ .long 0xff70ff54,0x6012223c,0x7fffffff,0x4a2eff6c
+ .long 0x6a025281,0x2d41ff54,0xe9c004c3,0x122eff41
+ .long 0x307b0206,0x4efb8802,0x006c0000,0x0000ff98
+ .long 0x003e0000,0x00100000,0x102eff54,0x0c010007
+ .long 0x6f16206e,0x000c61ff,0xffffeb86,0x4a8166ff
+ .long 0x0000bca8,0x6000ff6a,0x02410007,0x61ff0000
+ .long 0xa8046000,0xff5c302e,0xff540c01,0x00076f16
+ .long 0x206e000c,0x61ffffff,0xeb6e4a81,0x66ff0000
+ .long 0xbc886000,0xff3c0241,0x000761ff,0x0000a79a
+ .long 0x6000ff2e,0x202eff54,0x0c010007,0x6f16206e
+ .long 0x000c61ff,0xffffeb56,0x4a8166ff,0x0000bc68
+ .long 0x6000ff0e,0x02410007,0x61ff0000,0xa7306000
+ .long 0xff004e56,0xff40f32e,0xff6c48ee,0x0303ff9c
+ .long 0xf22ebc00,0xff60f22e,0xf0c0ffdc,0x2d6eff68
+ .long 0xff44206e,0xff4458ae,0xff4461ff,0xffffea8a
+ .long 0x2d40ff40,0x0800000d,0x6600002a,0x41eeff6c
+ .long 0x61ffffff,0xf0a4f22e,0xd0c0ffdc,0xf22e9c00
+ .long 0xff604cee,0x0303ff9c,0xf36eff6c,0x4e5e60ff
+ .long 0xffffe964,0xe9c004c3,0x122eff41,0x307b0206
+ .long 0x4efb8802,0x007400a6,0x015a0000,0x00420104
+ .long 0x00100000,0x102eff70,0x08c00006,0x0c010007
+ .long 0x6f16206e,0x000c61ff,0xffffea76,0x4a8166ff
+ .long 0x0000bb98,0x6000ffa0,0x02410007,0x61ff0000
+ .long 0xa6f46000,0xff92302e,0xff7008c0,0x000e0c01
+ .long 0x00076f16,0x206e000c,0x61ffffff,0xea5a4a81
+ .long 0x66ff0000,0xbb746000,0xff6e0241,0x000761ff
+ .long 0x0000a686,0x6000ff60,0x202eff70,0x08c0001e
+ .long 0x0c010007,0x6f16206e,0x000c61ff,0xffffea3e
+ .long 0x4a8166ff,0x0000bb50,0x6000ff3c,0x02410007
+ .long 0x61ff0000,0xa6186000,0xff2e0c01,0x00076f2e
+ .long 0x202eff6c,0x02808000,0x00000080,0x7fc00000
+ .long 0x222eff70,0xe0898081,0x206e000c,0x61ffffff
+ .long 0xe9fc4a81,0x66ff0000,0xbb0e6000,0xfefa202e
+ .long 0xff6c0280,0x80000000,0x00807fc0,0x00002f01
+ .long 0x222eff70,0xe0898081,0x221f0241,0x000761ff
+ .long 0x0000a5ba,0x6000fed0,0x202eff6c,0x02808000
+ .long 0x00000080,0x7ff80000,0x222eff70,0x2d40ff84
+ .long 0x700be0a9,0x83aeff84,0x222eff70,0x02810000
+ .long 0x07ffe0b9,0x2d41ff88,0x222eff74,0xe0a983ae
+ .long 0xff8841ee,0xff84226e,0x000c7008,0x61ffffff
+ .long 0xe8cc4a81,0x66ff0000,0xba9c6000,0xfe7a422e
+ .long 0xff4a3d6e,0xff6cff84,0x426eff86,0x202eff70
+ .long 0x08c0001e,0x2d40ff88,0x2d6eff74,0xff8c082e
+ .long 0x00050004,0x66384e68,0x2d48ffd8,0x2d56ffd4
+ .long 0x61ff0000,0x98922248,0x2d48000c,0x206effd8
+ .long 0x4e602cae,0xffd441ee,0xff84700c,0x61ffffff
+ .long 0xe86c4a81,0x66ff0000,0xba4a6000,0xfe1a2d56
+ .long 0xffd461ff,0x00009860,0x22482d48,0x000c2cae
+ .long 0xffd40c2e,0x0008ff4a,0x66ccf22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0xf36eff6c
+ .long 0x2c6effd4,0x2f6f00c4,0x00b82f6f,0x00c800bc
+ .long 0x2f6f00cc,0x00c02f6f,0x004400c4,0x2f6f0048
+ .long 0x00c82f6f,0x004c00cc,0xdffc0000,0x00b860ff
+ .long 0xffffe734,0x4e56ff40,0xf32eff6c,0x48ee0303
+ .long 0xff9cf22e,0xbc00ff60,0xf22ef0c0,0xffdc2d6e
+ .long 0xff68ff44,0x206eff44,0x58aeff44,0x61ffffff
+ .long 0xe7f82d40,0xff400800,0x000d6600,0x0106e9c0
+ .long 0x04c36622,0x0c6e401e,0xff6c661a,0xf23c9000
+ .long 0x00000000,0xf22e4000,0xff70f22e,0x6800ff6c
+ .long 0x3d7ce001,0xff6e41ee,0xff6c61ff,0xffffedea
+ .long 0x02ae00ff,0x01ffff64,0xf23c9000,0x00000000
+ .long 0xf23c8800,0x00000000,0xe9ee1006,0xff420c01
+ .long 0x00176700,0x009641ee,0xff6c61ff,0x0000aa84
+ .long 0x1d40ff4e,0x082e0005,0xff43672e,0x082e0004
+ .long 0xff436626,0xe9ee0183,0xff4261ff,0x0000a5c2
+ .long 0x41eeff78,0x61ff0000,0xaa5a0c00,0x00066606
+ .long 0x61ff0000,0xa9be1d40,0xff4f4280,0x102eff63
+ .long 0x122eff43,0x0241007f,0x41eeff6c,0x43eeff78
+ .long 0x223b1530,0x000059ca,0x4ebb1930,0x000059c2
+ .long 0xe9ee0183,0xff4261ff,0x0000a606,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9cf36e
+ .long 0xff6c4e5e,0x60ffffff,0xe5cc4280,0x102eff63
+ .long 0x122eff43,0x02810000,0x007f61ff,0x000043ce
+ .long 0x60be1d7c,0x0000ff4e,0x4280102e,0xff6302ae
+ .long 0xffff00ff,0xff6441ee,0xff6c61ff,0x00009be4
+ .long 0x60aa4e56,0xff40f32e,0xff6c48ee,0x0303ff9c
+ .long 0xf22ebc00,0xff60f22e,0xf0c0ffdc,0x2d6eff68
+ .long 0xff44206e,0xff4458ae,0xff4461ff,0xffffe69a
+ .long 0x2d40ff40,0x41eeff6c,0x61ffffff,0xecbcf22e
+ .long 0xd0c0ffdc,0xf22e9c00,0xff604cee,0x0303ff9c
+ .long 0xf36eff6c,0x4e5e60ff,0xffffe592,0x0c6f202c
+ .long 0x000667ff,0x000000aa,0x0c6f402c,0x000667ff
+ .long 0xffffe5a6,0x4e56ff40,0x48ee0303,0xff9c2d6e
+ .long 0x0006ff44,0x206eff44,0x58aeff44,0x61ffffff
+ .long 0xe638e9c0,0x100a0c41,0x03c86664,0xe9c01406
+ .long 0x0c010017,0x665a4e7a,0x08080800,0x0001672a
+ .long 0x4cee0303,0xff9c4e5e,0x518f3eaf,0x00082f6f
+ .long 0x000a0002,0x3f7c402c,0x00062f6f,0x0002000c
+ .long 0x58af0002,0x60ffffff,0xe5404cee,0x0303ff9c
+ .long 0x4e5ef22f,0x84000002,0x58af0002,0x2f172f6f
+ .long 0x00080004,0x1f7c0020,0x000660ff,0x00000012
+ .long 0x4cee0303,0xff9c4e5e,0x60ffffff,0xe4f64e56
+ .long 0xff4048ee,0x0303ff9c,0xf22ebc00,0xff60f22e
+ .long 0xf0c0ffdc,0x082e0005,0x00046608,0x4e682d48
+ .long 0xffd8600c,0x41ee0010,0x2d48ffd8,0x2d48ffd4
+ .long 0x2d6eff68,0xff44206e,0xff4458ae,0xff4461ff
+ .long 0xffffe576,0x2d40ff40,0xf23c9000,0x00000000
+ .long 0xf23c8800,0x00000000,0x422eff4a,0x08000016
+ .long 0x66000182,0x422eff53,0x02ae00ff,0x00ffff64
+ .long 0xe9c01406,0x0c010017,0x670000be,0x61ff0000
+ .long 0x95fc4280,0x102eff63,0x122eff43,0x0241003f
+ .long 0xe749822e,0xff4e43ee,0xff7841ee,0xff6c323b
+ .long 0x132002b2,0x4ebb1120,0x02ac102e,0xff626600
+ .long 0x00a2e9ee,0x0183ff42,0x61ff0000,0xa3e4f22e
+ .long 0xd0c0ffdc,0xf22e9c00,0xff604cee,0x0303ff9c
+ .long 0x0c2e0004,0xff4a672a,0x0c2e0008,0xff4a6722
+ .long 0x4e5e0817,0x000767ff,0xffffe358,0xf327f22f
+ .long 0xa4000014,0xf35f3f7c,0x20240006,0x60ffffff
+ .long 0xe434082e,0x00050004,0x660c2f08,0x206effd8
+ .long 0x4e60205f,0x60ca2f00,0x202effd8,0x90aeffd4
+ .long 0x2dae0008,0x08082dae,0x00040804,0x3d400004
+ .long 0x201f4e5e,0xded760aa,0x4280102e,0xff63122e
+ .long 0xff430281,0x0000007f,0x61ff0000,0x41506000
+ .long 0xff5ac02e,0xff66edc0,0x06086616,0x082e0004
+ .long 0xff666700,0xff4e082e,0x0001ff62,0x6700ff44
+ .long 0x603e0480,0x00000018,0x0c000006,0x6610082e
+ .long 0x0004ff66,0x662a082e,0x0003ff66,0x66302f00
+ .long 0x61ffffff,0xf1ee201f,0x3d7b0206,0xff6e602a
+ .long 0xe002e006,0xe004e005,0xe003e002,0xe001e001
+ .long 0x61ffffff,0xf1ce3d7c,0xe005ff6e,0x600c61ff
+ .long 0xfffff1c0,0x3d7ce003,0xff6ef22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0xf36eff6c
+ .long 0x6000feee,0xe9c01283,0x0c010001,0x67000056
+ .long 0x0c010007,0x66000078,0xe9c01343,0x0c010002
+ .long 0x6d00006c,0x61ff0000,0x82780c2e,0x0002ff4a
+ .long 0x670000d2,0x0c2e0001,0xff4a6600,0x01002d6e
+ .long 0xff68000c,0x3d7c201c,0x000af22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0x4e5e60ff
+ .long 0xffffe2dc,0x206eff44,0x54aeff44,0x61ffffff
+ .long 0xe3524a81,0x6600047c,0x48c061ff,0x00007e60
+ .long 0x0c2e0002,0xff4a6700,0x007c6000,0x00b061ff
+ .long 0x00008562,0x0c2e0002,0xff4a6700,0x0068082e
+ .long 0x00050004,0x660a206e,0xffd84e60,0x6000008e
+ .long 0x0c2e0008,0xff4a6600,0x0084f22e,0xd0c0ffdc
+ .long 0xf22e9c00,0xff604cee,0x0303ff9c,0x4e5e0817
+ .long 0x00076612,0x558f2eaf,0x00022f6f,0x00060004
+ .long 0x60ffffff,0xe17e558f,0x2eaf0002,0x3f6f0006
+ .long 0x00043f7c,0x20240006,0xf22fa400,0x000860ff
+ .long 0xffffe252,0x3d7c00c0,0x000e2d6e,0xff68000a
+ .long 0x3d6e0004,0x00083d7c,0xe000ff6e,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9cf36e
+ .long 0xff6c4e5e,0x588f60ff,0xffffe180,0xf22ed0c0
+ .long 0xffdcf22e,0x9c00ff60,0x4cee0303,0xff9c4e5e
+ .long 0x08170007,0x660660ff,0xffffe108,0xf22fa400
+ .long 0x00081f7c,0x00240007,0x60ffffff,0xe1e84afc
+ .long 0x01c00000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x000028a4,0x4b1e4b4c,0x4f4c2982,0x4f3c0000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x000035c6,0x4b1e4b82,0x4f4c371a,0x4f3c0000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x000024b0,0x4b1e4b8c,0x4f4c2766,0x4f3c0000
+ .long 0x00002988,0x4b1e4b94,0x4f4c2af0,0x4f3c0000
+ .long 0x00001ab8,0x4b1e4bd0,0x4f4c1cf6,0x4f3c0000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00001cfc,0x4b1e4744,0x4f4c1daa,0x4f3c0000
+ .long 0x00003720,0x4b1e4744,0x4f4c37a2,0x4f3c0000
+ .long 0x00000468,0x4b1e4744,0x4f4c064c,0x4f3c0000
+ .long 0x00000f2a,0x4b1e4744,0x4f4c108e,0x4f3c0000
+ .long 0x000022e0,0x4b9a4b7a,0x4f4c248c,0x4f3c0000
+ .long 0x00003d02,0x4b9a4b7a,0x4f4c3ddc,0x4f3c0000
+ .long 0x00003dfa,0x4b9a4b7a,0x4f4c3f2a,0x4f3c0000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00003386,0x47324b82,0x4f4c3538,0x4f3c0000
+ .long 0x000037c8,0x47324b82,0x4f4c37f8,0x4f3c0000
+ .long 0x00003818,0x47324b82,0x4f4c3872,0x4f3c0000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x000027e6,0x4b9a4b52,0x4f4c288a,0x4f3c0000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00001db0,0x4bd64744,0x4f4c1e40,0x4f3c0000
+ .long 0x00000472,0x4b9a4744,0x4f4c0652,0x4f3c0000
+ .long 0x0000276c,0x4b1e4744,0x4f4c2788,0x4f3c0000
+ .long 0x000027a0,0x4b1e4744,0x4f4c27ce,0x4f3c0000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00004ca4,0x4cda4d12,0x4ee24ca4,0x4ef40000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00004dac,0x4de24e1a,0x4ee24dac,0x4ef40000
+ .long 0x00004e4e,0x4e864ebe,0x4ee24e4e,0x4ef40000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+ .long 0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+ .long 0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+ .long 0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+ .long 0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+ .long 0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+ .long 0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+ .long 0x00000660,0x4bf24c20,0x4c3008f6,0x4c400000
+ .long 0x00004cee,0x0303ff9c,0xf22e9c00,0xff60f22e
+ .long 0xd0c0ffdc,0x2d6eff68,0x00064e5e,0x2f173f6f
+ .long 0x00080004,0x3f7c4008,0x00062f6f,0x00020008
+ .long 0x2f7c0942,0x8001000c,0x08170005,0x670608ef
+ .long 0x0002000d,0x60ffffff,0xde32bd6a,0xaa77ccc9
+ .long 0x94f53de6,0x12097aae,0x8da1be5a,0xe6452a11
+ .long 0x8ae43ec7,0x1de3a534,0x1531bf2a,0x01a01a01
+ .long 0x8b590000,0x00000000,0x00003ff8,0x00008888
+ .long 0x88888888,0x59af0000,0x0000bffc,0x0000aaaa
+ .long 0xaaaaaaaa,0xaa990000,0x00003d2a,0xc4d0d601
+ .long 0x1ee3bda9,0x396f9f45,0xac193e21,0xeed90612
+ .long 0xc972be92,0x7e4fb79d,0x9fcf3efa,0x01a01a01
+ .long 0xd4230000,0x00000000,0x0000bff5,0x0000b60b
+ .long 0x60b60b61,0xd4380000,0x00003ffa,0x0000aaaa
+ .long 0xaaaaaaaa,0xab5ebf00,0x00002d7c,0x00000000
+ .long 0xff5c6008,0x2d7c0000,0x0001ff5c,0xf2104800
+ .long 0xf22e6800,0xff842210,0x32280004,0x02817fff
+ .long 0xffff0c81,0x3fd78000,0x6c046000,0x01780c81
+ .long 0x4004bc7e,0x6d046000,0x0468f200,0x0080f23a
+ .long 0x54a3de7e,0x43fb0170,0x00000866,0xf22e6080
+ .long 0xff58222e,0xff58e981,0xd3c1f219,0x4828f211
+ .long 0x4428222e,0xff58d2ae,0xff5ce299,0x0c810000
+ .long 0x00006d00,0x0088f227,0xe00cf22e,0x6800ff84
+ .long 0xf2000023,0xf23a5580,0xfed2f23a,0x5500fed4
+ .long 0xf2000080,0xf20004a3,0xe2990281,0x80000000
+ .long 0xb3aeff84,0xf20005a3,0xf2000523,0xf23a55a2
+ .long 0xfebaf23a,0x5522febc,0xf20005a3,0xf2000523
+ .long 0xf23a55a2,0xfeb6f23a,0x4922fec0,0xf2000ca3
+ .long 0xf2000123,0xf23a48a2,0xfec2f22e,0x4823ff84
+ .long 0xf20008a2,0xf2000423,0xf21fd030,0xf2009000
+ .long 0xf22e4822,0xff8460ff,0x00004364,0xf227e00c
+ .long 0xf2000023,0xf23a5500,0xfea2f23a,0x5580fea4
+ .long 0xf2000080,0xf20004a3,0xf22e6800,0xff84e299
+ .long 0x02818000,0x0000f200,0x0523b3ae,0xff840281
+ .long 0x80000000,0xf20005a3,0x00813f80,0x00002d41
+ .long 0xff54f23a,0x5522fe74,0xf23a55a2,0xfe76f200
+ .long 0x0523f200,0x05a3f23a,0x5522fe70,0xf23a49a2
+ .long 0xfe7af200,0x0523f200,0x0ca3f23a,0x4922fe7c
+ .long 0xf23a44a2,0xfe82f200,0x0823f200,0x0422f22e
+ .long 0x4823ff84,0xf21fd030,0xf2009000,0xf22e4422
+ .long 0xff5460ff,0x000042c8,0x0c813fff,0x80006eff
+ .long 0x00000300,0x222eff5c,0x0c810000,0x00006e14
+ .long 0xf2009000,0x123c0003,0xf22e4800,0xff8460ff
+ .long 0x0000428e,0xf23c4400,0x3f800000,0xf2009000
+ .long 0xf23c4422,0x80800000,0x60ff0000,0x428a60ff
+ .long 0x00004110,0xf23c4400,0x3f800000,0x60ff0000
+ .long 0x42762d7c,0x00000004,0xff5cf210,0x4800f22e
+ .long 0x6800ff84,0x22103228,0x00040281,0x7fffffff
+ .long 0x0c813fd7,0x80006c04,0x60000240,0x0c814004
+ .long 0xbc7e6d04,0x6000027a,0xf2000080,0xf23a54a3
+ .long 0xdc9043fb,0x01700000,0x0678f22e,0x6080ff58
+ .long 0x222eff58,0xe981d3c1,0xf2194828,0xf2114428
+ .long 0x222eff58,0xe2990c81,0x00000000,0x6c000106
+ .long 0xf227e004,0xf22e6800,0xff84f200,0x0023f23a
+ .long 0x5480fce8,0xf23a5500,0xfd32f200,0x00a3f200
+ .long 0x01232f02,0x2401e29a,0x02828000,0x0000b382
+ .long 0x02828000,0x0000f23a,0x54a2fcc8,0xf23a5522
+ .long 0xfd12f200,0x00a3b5ae,0xff84241f,0xf2000123
+ .long 0xe2990281,0x80000000,0x2d7c3f80,0x0000ff54
+ .long 0xb3aeff54,0xf23a54a2,0xfca2f23a,0x5522fcec
+ .long 0xf20000a3,0xf2000123,0xf22e6800,0xff90f23a
+ .long 0x54a2fc90,0xb3aeff90,0xf23a5522,0xfcd6f200
+ .long 0x00a3f200,0x0123f23a,0x54a2fc80,0xf23a5522
+ .long 0xfccaf200,0x00a3f200,0x0123f23a,0x48a2fc7c
+ .long 0xf23a4922,0xfcc6f200,0x00a3f200,0x0123f23a
+ .long 0x48a2fc78,0xf23a4922,0xfcc2f200,0x00a3f200
+ .long 0x0823f22e,0x48a3ff84,0xf23a4422,0xfcbaf22e
+ .long 0x4823ff90,0xf21fd020,0xf2009000,0xf22e48a2
+ .long 0xff8461ff,0x0000448e,0xf22e4422,0xff5460ff
+ .long 0x000040fc,0xf227e004,0xf22e6800,0xff84f200
+ .long 0x0023f23a,0x5480fc34,0xf23a5500,0xfbdef200
+ .long 0x00a3f22e,0x6800ff90,0xf2000123,0xe2990281
+ .long 0x80000000,0xf23a54a2,0xfc1af23a,0x5522fbc4
+ .long 0xb3aeff84,0xb3aeff90,0xf20000a3,0x00813f80
+ .long 0x00002d41,0xff54f200,0x0123f23a,0x54a2fbfc
+ .long 0xf23a5522,0xfba6f200,0x00a3f200,0x0123f23a
+ .long 0x54a2fbf0,0xf23a5522,0xfb9af200,0x00a3f200
+ .long 0x0123f23a,0x54a2fbe4,0xf23a5522,0xfb8ef200
+ .long 0x00a3f200,0x0123f23a,0x48a2fbe0,0xf23a4922
+ .long 0xfb8af200,0x00a3f200,0x0123f23a,0x48a2fbdc
+ .long 0xf23a4922,0xfb86f200,0x00a3f200,0x0823f23a
+ .long 0x44a2fbd4,0xf22e4823,0xff84f22e,0x48a3ff90
+ .long 0xf21fd020,0xf2009000,0xf22e44a2,0xff5461ff
+ .long 0x000043a2,0xf22e4822,0xff8460ff,0x00004010
+ .long 0x0c813fff,0x80006e00,0x0048f23c,0x44803f80
+ .long 0x0000f200,0x9000f23c,0x44a80080,0x000061ff
+ .long 0x00004372,0xf200b000,0x123c0003,0xf22e4800
+ .long 0xff8460ff,0x00003fca,0x2f00f23c,0x44803f80
+ .long 0x000061ff,0x0000434e,0x201f60ff,0x00003e54
+ .long 0xf227e03c,0x2f02f23c,0x44800000,0x00000c81
+ .long 0x7ffeffff,0x66523d7c,0x7ffeff84,0x2d7cc90f
+ .long 0xdaa2ff88,0x42aeff8c,0x3d7c7fdc,0xff902d7c
+ .long 0x85a308d3,0xff9442ae,0xff98f200,0x003af294
+ .long 0x000e002e,0x0080ff84,0x002e0080,0xff90f22e
+ .long 0x4822ff84,0xf2000080,0xf22e4822,0xff90f200
+ .long 0x00a8f22e,0x48a2ff90,0xf22e6800,0xff84322e
+ .long 0xff842241,0x02810000,0x7fff0481,0x00003fff
+ .long 0x0c810000,0x001c6f0e,0x04810000,0x001b1d7c
+ .long 0x0000ff58,0x60084281,0x1d7c0001,0xff58243c
+ .long 0x00003ffe,0x94812d7c,0xa2f9836e,0xff882d7c
+ .long 0x4e44152a,0xff8c3d42,0xff84f200,0x0100f22e
+ .long 0x4923ff84,0x24094842,0x02828000,0x00000082
+ .long 0x5f000000,0x2d42ff54,0xf22e4522,0xff54f22e
+ .long 0x4528ff54,0x24010682,0x00003fff,0x3d42ff84
+ .long 0x2d7cc90f,0xdaa2ff88,0x42aeff8c,0x06810000
+ .long 0x3fdd3d41,0xff902d7c,0x85a308d3,0xff9442ae
+ .long 0xff98122e,0xff58f200,0x0a00f22e,0x4a23ff84
+ .long 0xf2000a80,0xf22e4aa3,0xff90f200,0x1180f200
+ .long 0x15a2f200,0x0e28f200,0x0c28f200,0x1622f200
+ .long 0x0180f200,0x10a8f200,0x04220c01,0x00006e00
+ .long 0x000ef200,0x01a8f200,0x0ca26000,0xff0cf22e
+ .long 0x6100ff58,0x241ff21f,0xd03c222e,0xff5c0c81
+ .long 0x00000004,0x6d00fa4c,0x6000fc36,0x3ea0b759
+ .long 0xf50f8688,0xbef2baa5,0xa8924f04,0xbf346f59
+ .long 0xb39ba65f,0x00000000,0x00000000,0x3ff60000
+ .long 0xe073d3fc,0x199c4a00,0x00000000,0x3ff90000
+ .long 0xd23cd684,0x15d95fa1,0x00000000,0xbffc0000
+ .long 0x8895a6c5,0xfb423bca,0x00000000,0xbffd0000
+ .long 0xeef57e0d,0xa84bc8ce,0x00000000,0x3ffc0000
+ .long 0xa2f9836e,0x4e44152a,0x00000000,0x40010000
+ .long 0xc90fdaa2,0x00000000,0x00000000,0x3fdf0000
+ .long 0x85a308d4,0x00000000,0x00000000,0xc0040000
+ .long 0xc90fdaa2,0x2168c235,0x21800000,0xc0040000
+ .long 0xc2c75bcd,0x105d7c23,0xa0d00000,0xc0040000
+ .long 0xbc7edcf7,0xff523611,0xa1e80000,0xc0040000
+ .long 0xb6365e22,0xee46f000,0x21480000,0xc0040000
+ .long 0xafeddf4d,0xdd3ba9ee,0xa1200000,0xc0040000
+ .long 0xa9a56078,0xcc3063dd,0x21fc0000,0xc0040000
+ .long 0xa35ce1a3,0xbb251dcb,0x21100000,0xc0040000
+ .long 0x9d1462ce,0xaa19d7b9,0xa1580000,0xc0040000
+ .long 0x96cbe3f9,0x990e91a8,0x21e00000,0xc0040000
+ .long 0x90836524,0x88034b96,0x20b00000,0xc0040000
+ .long 0x8a3ae64f,0x76f80584,0xa1880000,0xc0040000
+ .long 0x83f2677a,0x65ecbf73,0x21c40000,0xc0030000
+ .long 0xfb53d14a,0xa9c2f2c2,0x20000000,0xc0030000
+ .long 0xeec2d3a0,0x87ac669f,0x21380000,0xc0030000
+ .long 0xe231d5f6,0x6595da7b,0xa1300000,0xc0030000
+ .long 0xd5a0d84c,0x437f4e58,0x9fc00000,0xc0030000
+ .long 0xc90fdaa2,0x2168c235,0x21000000,0xc0030000
+ .long 0xbc7edcf7,0xff523611,0xa1680000,0xc0030000
+ .long 0xafeddf4d,0xdd3ba9ee,0xa0a00000,0xc0030000
+ .long 0xa35ce1a3,0xbb251dcb,0x20900000,0xc0030000
+ .long 0x96cbe3f9,0x990e91a8,0x21600000,0xc0030000
+ .long 0x8a3ae64f,0x76f80584,0xa1080000,0xc0020000
+ .long 0xfb53d14a,0xa9c2f2c2,0x1f800000,0xc0020000
+ .long 0xe231d5f6,0x6595da7b,0xa0b00000,0xc0020000
+ .long 0xc90fdaa2,0x2168c235,0x20800000,0xc0020000
+ .long 0xafeddf4d,0xdd3ba9ee,0xa0200000,0xc0020000
+ .long 0x96cbe3f9,0x990e91a8,0x20e00000,0xc0010000
+ .long 0xfb53d14a,0xa9c2f2c2,0x1f000000,0xc0010000
+ .long 0xc90fdaa2,0x2168c235,0x20000000,0xc0010000
+ .long 0x96cbe3f9,0x990e91a8,0x20600000,0xc0000000
+ .long 0xc90fdaa2,0x2168c235,0x1f800000,0xbfff0000
+ .long 0xc90fdaa2,0x2168c235,0x1f000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x3fff0000
+ .long 0xc90fdaa2,0x2168c235,0x9f000000,0x40000000
+ .long 0xc90fdaa2,0x2168c235,0x9f800000,0x40010000
+ .long 0x96cbe3f9,0x990e91a8,0xa0600000,0x40010000
+ .long 0xc90fdaa2,0x2168c235,0xa0000000,0x40010000
+ .long 0xfb53d14a,0xa9c2f2c2,0x9f000000,0x40020000
+ .long 0x96cbe3f9,0x990e91a8,0xa0e00000,0x40020000
+ .long 0xafeddf4d,0xdd3ba9ee,0x20200000,0x40020000
+ .long 0xc90fdaa2,0x2168c235,0xa0800000,0x40020000
+ .long 0xe231d5f6,0x6595da7b,0x20b00000,0x40020000
+ .long 0xfb53d14a,0xa9c2f2c2,0x9f800000,0x40030000
+ .long 0x8a3ae64f,0x76f80584,0x21080000,0x40030000
+ .long 0x96cbe3f9,0x990e91a8,0xa1600000,0x40030000
+ .long 0xa35ce1a3,0xbb251dcb,0xa0900000,0x40030000
+ .long 0xafeddf4d,0xdd3ba9ee,0x20a00000,0x40030000
+ .long 0xbc7edcf7,0xff523611,0x21680000,0x40030000
+ .long 0xc90fdaa2,0x2168c235,0xa1000000,0x40030000
+ .long 0xd5a0d84c,0x437f4e58,0x1fc00000,0x40030000
+ .long 0xe231d5f6,0x6595da7b,0x21300000,0x40030000
+ .long 0xeec2d3a0,0x87ac669f,0xa1380000,0x40030000
+ .long 0xfb53d14a,0xa9c2f2c2,0xa0000000,0x40040000
+ .long 0x83f2677a,0x65ecbf73,0xa1c40000,0x40040000
+ .long 0x8a3ae64f,0x76f80584,0x21880000,0x40040000
+ .long 0x90836524,0x88034b96,0xa0b00000,0x40040000
+ .long 0x96cbe3f9,0x990e91a8,0xa1e00000,0x40040000
+ .long 0x9d1462ce,0xaa19d7b9,0x21580000,0x40040000
+ .long 0xa35ce1a3,0xbb251dcb,0xa1100000,0x40040000
+ .long 0xa9a56078,0xcc3063dd,0xa1fc0000,0x40040000
+ .long 0xafeddf4d,0xdd3ba9ee,0x21200000,0x40040000
+ .long 0xb6365e22,0xee46f000,0xa1480000,0x40040000
+ .long 0xbc7edcf7,0xff523611,0x21e80000,0x40040000
+ .long 0xc2c75bcd,0x105d7c23,0x20d00000,0x40040000
+ .long 0xc90fdaa2,0x2168c235,0xa1800000,0xf2104800
+ .long 0x22103228,0x00040281,0x7fffffff,0x0c813fd7
+ .long 0x80006c04,0x60000134,0x0c814004,0xbc7e6d04
+ .long 0x60000144,0xf2000080,0xf23a54a3,0xd3d443fa
+ .long 0xfdbcf201,0x6080e981,0xd3c1f219,0x4828f211
+ .long 0x4428ea99,0x02818000,0x0000f227,0xe00c0c81
+ .long 0x00000000,0x6d000072,0xf2000080,0xf20004a3
+ .long 0xf23a5580,0xfaf8f23a,0x5500fafa,0xf20005a3
+ .long 0xf2000523,0xf23a55a2,0xfaf4f23a,0x4922fafe
+ .long 0xf20005a3,0xf2000523,0xf23a49a2,0xfb00f23a
+ .long 0x4922fb0a,0xf20005a3,0xf2000523,0xf23a49a2
+ .long 0xfb0cf200,0x0123f200,0x0ca3f200,0x0822f23c
+ .long 0x44a23f80,0x0000f21f,0xd030f200,0x9000f200
+ .long 0x042060ff,0x000038d8,0xf2000080,0xf2000023
+ .long 0xf23a5580,0xfa88f23a,0x5500fa8a,0xf20001a3
+ .long 0xf2000123,0xf23a55a2,0xfa84f23a,0x4922fa8e
+ .long 0xf20001a3,0xf2000123,0xf23a49a2,0xfa90f23a
+ .long 0x4922fa9a,0xf20001a3,0xf2000123,0xf23a49a2
+ .long 0xfa9cf200,0x0523f200,0x0c23f200,0x08a2f23c
+ .long 0x44223f80,0x0000f21f,0xd030f227,0x68800a97
+ .long 0x80000000,0xf2009000,0xf21f4820,0x60ff0000
+ .long 0x385e0c81,0x3fff8000,0x6e1cf227,0x6800f200
+ .long 0x9000123c,0x0003f21f,0x480060ff,0x00003832
+ .long 0x60ff0000,0x36cef227,0xe03c2f02,0xf23c4480
+ .long 0x00000000,0x0c817ffe,0xffff6652,0x3d7c7ffe
+ .long 0xff842d7c,0xc90fdaa2,0xff8842ae,0xff8c3d7c
+ .long 0x7fdcff90,0x2d7c85a3,0x08d3ff94,0x42aeff98
+ .long 0xf200003a,0xf294000e,0x002e0080,0xff84002e
+ .long 0x0080ff90,0xf22e4822,0xff84f200,0x0080f22e
+ .long 0x4822ff90,0xf20000a8,0xf22e48a2,0xff90f22e
+ .long 0x6800ff84,0x322eff84,0x22410281,0x00007fff
+ .long 0x04810000,0x3fff0c81,0x0000001c,0x6f0e0481
+ .long 0x0000001b,0x1d7c0000,0xff586008,0x42811d7c
+ .long 0x0001ff58,0x243c0000,0x3ffe9481,0x2d7ca2f9
+ .long 0x836eff88,0x2d7c4e44,0x152aff8c,0x3d42ff84
+ .long 0xf2000100,0xf22e4923,0xff842409,0x48420282
+ .long 0x80000000,0x00825f00,0x00002d42,0xff54f22e
+ .long 0x4522ff54,0xf22e4528,0xff542401,0x06820000
+ .long 0x3fff3d42,0xff842d7c,0xc90fdaa2,0xff8842ae
+ .long 0xff8c0681,0x00003fdd,0x3d41ff90,0x2d7c85a3
+ .long 0x08d3ff94,0x42aeff98,0x122eff58,0xf2000a00
+ .long 0xf22e4a23,0xff84f200,0x0a80f22e,0x4aa3ff90
+ .long 0xf2001180,0xf20015a2,0xf2000e28,0xf2000c28
+ .long 0xf2001622,0xf2000180,0xf20010a8,0xf2000422
+ .long 0x0c010000,0x6e00000e,0xf20001a8,0xf2000ca2
+ .long 0x6000ff0c,0xf22e6100,0xff54241f,0xf21fd03c
+ .long 0x222eff54,0xe2996000,0xfd72bff6,0x687e3149
+ .long 0x87d84002,0xac6934a2,0x6db3bfc2,0x476f4e1d
+ .long 0xa28e3fb3,0x44447f87,0x6989bfb7,0x44ee7faf
+ .long 0x45db3fbc,0x71c64694,0x0220bfc2,0x49249218
+ .long 0x72f93fc9,0x99999999,0x8fa9bfd5,0x55555555
+ .long 0x5555bfb7,0x0bf39853,0x9e6a3fbc,0x7187962d
+ .long 0x1d7dbfc2,0x49248271,0x07b83fc9,0x99999996
+ .long 0x263ebfd5,0x55555555,0x55363fff,0x0000c90f
+ .long 0xdaa22168,0xc2350000,0x0000bfff,0x0000c90f
+ .long 0xdaa22168,0xc2350000,0x00000001,0x00008000
+ .long 0x00000000,0x00000000,0x00008001,0x00008000
+ .long 0x00000000,0x00000000,0x00003ffb,0x000083d1
+ .long 0x52c5060b,0x7a510000,0x00003ffb,0x00008bc8
+ .long 0x54456549,0x8b8b0000,0x00003ffb,0x000093be
+ .long 0x40601762,0x6b0d0000,0x00003ffb,0x00009bb3
+ .long 0x078d35ae,0xc2020000,0x00003ffb,0x0000a3a6
+ .long 0x9a525ddc,0xe7de0000,0x00003ffb,0x0000ab98
+ .long 0xe9436276,0x56190000,0x00003ffb,0x0000b389
+ .long 0xe502f9c5,0x98620000,0x00003ffb,0x0000bb79
+ .long 0x7e436b09,0xe6fb0000,0x00003ffb,0x0000c367
+ .long 0xa5c739e5,0xf4460000,0x00003ffb,0x0000cb54
+ .long 0x4c61cff7,0xd5c60000,0x00003ffb,0x0000d33f
+ .long 0x62f82488,0x533e0000,0x00003ffb,0x0000db28
+ .long 0xda816240,0x4c770000,0x00003ffb,0x0000e310
+ .long 0xa4078ad3,0x4f180000,0x00003ffb,0x0000eaf6
+ .long 0xb0a8188e,0xe1eb0000,0x00003ffb,0x0000f2da
+ .long 0xf1949dbe,0x79d50000,0x00003ffb,0x0000fabd
+ .long 0x581361d4,0x7e3e0000,0x00003ffc,0x00008346
+ .long 0xac210959,0xecc40000,0x00003ffc,0x00008b23
+ .long 0x2a083042,0x82d80000,0x00003ffc,0x000092fb
+ .long 0x70b8d29a,0xe2f90000,0x00003ffc,0x00009acf
+ .long 0x476f5ccd,0x1cb40000,0x00003ffc,0x0000a29e
+ .long 0x76304954,0xf23f0000,0x00003ffc,0x0000aa68
+ .long 0xc5d08ab8,0x52300000,0x00003ffc,0x0000b22d
+ .long 0xfffd9d53,0x9f830000,0x00003ffc,0x0000b9ed
+ .long 0xef453e90,0x0ea50000,0x00003ffc,0x0000c1a8
+ .long 0x5f1cc75e,0x3ea50000,0x00003ffc,0x0000c95d
+ .long 0x1be82813,0x8de60000,0x00003ffc,0x0000d10b
+ .long 0xf300840d,0x2de40000,0x00003ffc,0x0000d8b4
+ .long 0xb2ba6bc0,0x5e7a0000,0x00003ffc,0x0000e057
+ .long 0x2a6bb423,0x35f60000,0x00003ffc,0x0000e7f3
+ .long 0x2a70ea9c,0xaa8f0000,0x00003ffc,0x0000ef88
+ .long 0x843264ec,0xefaa0000,0x00003ffc,0x0000f717
+ .long 0x0a28ecc0,0x66660000,0x00003ffd,0x0000812f
+ .long 0xd288332d,0xad320000,0x00003ffd,0x000088a8
+ .long 0xd1b1218e,0x4d640000,0x00003ffd,0x00009012
+ .long 0xab3f23e4,0xaee80000,0x00003ffd,0x0000976c
+ .long 0xc3d411e7,0xf1b90000,0x00003ffd,0x00009eb6
+ .long 0x89493889,0xa2270000,0x00003ffd,0x0000a5ef
+ .long 0x72c34487,0x361b0000,0x00003ffd,0x0000ad17
+ .long 0x00baf07a,0x72270000,0x00003ffd,0x0000b42c
+ .long 0xbcfafd37,0xefb70000,0x00003ffd,0x0000bb30
+ .long 0x3a940ba8,0x0f890000,0x00003ffd,0x0000c221
+ .long 0x15c6fcae,0xbbaf0000,0x00003ffd,0x0000c8fe
+ .long 0xf3e68633,0x12210000,0x00003ffd,0x0000cfc9
+ .long 0x8330b400,0x0c700000,0x00003ffd,0x0000d680
+ .long 0x7aa1102c,0x5bf90000,0x00003ffd,0x0000dd23
+ .long 0x99bc3125,0x2aa30000,0x00003ffd,0x0000e3b2
+ .long 0xa8556b8f,0xc5170000,0x00003ffd,0x0000ea2d
+ .long 0x764f6431,0x59890000,0x00003ffd,0x0000f3bf
+ .long 0x5bf8bad1,0xa21d0000,0x00003ffe,0x0000801c
+ .long 0xe39e0d20,0x5c9a0000,0x00003ffe,0x00008630
+ .long 0xa2dada1e,0xd0660000,0x00003ffe,0x00008c1a
+ .long 0xd445f3e0,0x9b8c0000,0x00003ffe,0x000091db
+ .long 0x8f1664f3,0x50e20000,0x00003ffe,0x00009773
+ .long 0x1420365e,0x538c0000,0x00003ffe,0x00009ce1
+ .long 0xc8e6a0b8,0xcdba0000,0x00003ffe,0x0000a228
+ .long 0x32dbcada,0xae090000,0x00003ffe,0x0000a746
+ .long 0xf2ddb760,0x22940000,0x00003ffe,0x0000ac3e
+ .long 0xc0fb997d,0xd6a20000,0x00003ffe,0x0000b110
+ .long 0x688aebdc,0x6f6a0000,0x00003ffe,0x0000b5bc
+ .long 0xc49059ec,0xc4b00000,0x00003ffe,0x0000ba44
+ .long 0xbc7dd470,0x782f0000,0x00003ffe,0x0000bea9
+ .long 0x4144fd04,0x9aac0000,0x00003ffe,0x0000c2eb
+ .long 0x4abb6616,0x28b60000,0x00003ffe,0x0000c70b
+ .long 0xd54ce602,0xee140000,0x00003ffe,0x0000cd00
+ .long 0x0549adec,0x71590000,0x00003ffe,0x0000d484
+ .long 0x57d2d8ea,0x4ea30000,0x00003ffe,0x0000db94
+ .long 0x8da712de,0xce3b0000,0x00003ffe,0x0000e238
+ .long 0x55f969e8,0x096a0000,0x00003ffe,0x0000e877
+ .long 0x1129c435,0x32590000,0x00003ffe,0x0000ee57
+ .long 0xc16e0d37,0x9c0d0000,0x00003ffe,0x0000f3e1
+ .long 0x0211a87c,0x37790000,0x00003ffe,0x0000f919
+ .long 0x039d758b,0x8d410000,0x00003ffe,0x0000fe05
+ .long 0x8b8f6493,0x5fb30000,0x00003fff,0x00008155
+ .long 0xfb497b68,0x5d040000,0x00003fff,0x00008388
+ .long 0x9e3549d1,0x08e10000,0x00003fff,0x0000859c
+ .long 0xfa76511d,0x724b0000,0x00003fff,0x00008795
+ .long 0x2ecfff81,0x31e70000,0x00003fff,0x00008973
+ .long 0x2fd19557,0x641b0000,0x00003fff,0x00008b38
+ .long 0xcad10193,0x2a350000,0x00003fff,0x00008ce7
+ .long 0xa8d8301e,0xe6b50000,0x00003fff,0x00008f46
+ .long 0xa39e2eae,0x52810000,0x00003fff,0x0000922d
+ .long 0xa7d79188,0x84870000,0x00003fff,0x000094d1
+ .long 0x9fcbdedf,0x52410000,0x00003fff,0x0000973a
+ .long 0xb94419d2,0xa08b0000,0x00003fff,0x0000996f
+ .long 0xf00e08e1,0x0b960000,0x00003fff,0x00009b77
+ .long 0x3f951232,0x1da70000,0x00003fff,0x00009d55
+ .long 0xcc320f93,0x56240000,0x00003fff,0x00009f10
+ .long 0x0575006c,0xc5710000,0x00003fff,0x0000a0a9
+ .long 0xc290d97c,0xc06c0000,0x00003fff,0x0000a226
+ .long 0x59ebebc0,0x630a0000,0x00003fff,0x0000a388
+ .long 0xb4aff6ef,0x0ec90000,0x00003fff,0x0000a4d3
+ .long 0x5f1061d2,0x92c40000,0x00003fff,0x0000a608
+ .long 0x95dcfbe3,0x187e0000,0x00003fff,0x0000a72a
+ .long 0x51dc7367,0xbeac0000,0x00003fff,0x0000a83a
+ .long 0x51530956,0x168f0000,0x00003fff,0x0000a93a
+ .long 0x20077539,0x546e0000,0x00003fff,0x0000aa9e
+ .long 0x7245023b,0x26050000,0x00003fff,0x0000ac4c
+ .long 0x84ba6fe4,0xd58f0000,0x00003fff,0x0000adce
+ .long 0x4a4a606b,0x97120000,0x00003fff,0x0000af2a
+ .long 0x2dcd8d26,0x3c9c0000,0x00003fff,0x0000b065
+ .long 0x6f81f222,0x65c70000,0x00003fff,0x0000b184
+ .long 0x65150f71,0x496a0000,0x00003fff,0x0000b28a
+ .long 0xaa156f9a,0xda350000,0x00003fff,0x0000b37b
+ .long 0x44ff3766,0xb8950000,0x00003fff,0x0000b458
+ .long 0xc3dce963,0x04330000,0x00003fff,0x0000b525
+ .long 0x529d5622,0x46bd0000,0x00003fff,0x0000b5e2
+ .long 0xcca95f9d,0x88cc0000,0x00003fff,0x0000b692
+ .long 0xcada7aca,0x1ada0000,0x00003fff,0x0000b736
+ .long 0xaea7a692,0x58380000,0x00003fff,0x0000b7cf
+ .long 0xab287e9f,0x7b360000,0x00003fff,0x0000b85e
+ .long 0xcc66cb21,0x98350000,0x00003fff,0x0000b8e4
+ .long 0xfd5a20a5,0x93da0000,0x00003fff,0x0000b99f
+ .long 0x41f64aff,0x9bb50000,0x00003fff,0x0000ba7f
+ .long 0x1e17842b,0xbe7b0000,0x00003fff,0x0000bb47
+ .long 0x12857637,0xe17d0000,0x00003fff,0x0000bbfa
+ .long 0xbe8a4788,0xdf6f0000,0x00003fff,0x0000bc9d
+ .long 0x0fad2b68,0x9d790000,0x00003fff,0x0000bd30
+ .long 0x6a39471e,0xcd860000,0x00003fff,0x0000bdb6
+ .long 0xc731856a,0xf18a0000,0x00003fff,0x0000be31
+ .long 0xcac502e8,0x0d700000,0x00003fff,0x0000bea2
+ .long 0xd55ce331,0x94e20000,0x00003fff,0x0000bf0b
+ .long 0x10b7c031,0x28f00000,0x00003fff,0x0000bf6b
+ .long 0x7a18dacb,0x778d0000,0x00003fff,0x0000bfc4
+ .long 0xea4663fa,0x18f60000,0x00003fff,0x0000c018
+ .long 0x1bde8b89,0xa4540000,0x00003fff,0x0000c065
+ .long 0xb066cfbf,0x64390000,0x00003fff,0x0000c0ae
+ .long 0x345f5634,0x0ae60000,0x00003fff,0x0000c0f2
+ .long 0x22919cb9,0xe6a70000,0x0000f210,0x48002210
+ .long 0x32280004,0xf22e6800,0xff840281,0x7fffffff
+ .long 0x0c813ffb,0x80006c04,0x600000d0,0x0c814002
+ .long 0xffff6f04,0x6000014c,0x02aef800,0x0000ff88
+ .long 0x00ae0400,0x0000ff88,0x2d7c0000,0x0000ff8c
+ .long 0xf2000080,0xf22e48a3,0xff84f22e,0x4828ff84
+ .long 0xf23c44a2,0x3f800000,0xf2000420,0x2f022401
+ .long 0x02810000,0x78000282,0x7fff0000,0x04823ffb
+ .long 0x0000e282,0xd282ee81,0x43faf780,0xd3c12d59
+ .long 0xff902d59,0xff942d59,0xff98222e,0xff840281
+ .long 0x80000000,0x83aeff90,0x241ff227,0xe004f200
+ .long 0x0080f200,0x04a3f23a,0x5500f6a0,0xf2000522
+ .long 0xf2000523,0xf20000a3,0xf23a5522,0xf696f23a
+ .long 0x54a3f698,0xf20008a3,0xf2000422,0xf21fd020
+ .long 0xf2009000,0xf22e4822,0xff9060ff,0x00002d30
+ .long 0x0c813fff,0x80006e00,0x008a0c81,0x3fd78000
+ .long 0x6d00006c,0xf227e00c,0xf2000023,0xf2000080
+ .long 0xf20004a3,0xf23a5500,0xf65af23a,0x5580f65c
+ .long 0xf2000523,0xf20005a3,0xf23a5522,0xf656f23a
+ .long 0x55a2f658,0xf2000523,0xf2000ca3,0xf23a5522
+ .long 0xf652f23a,0x54a2f654,0xf2000123,0xf22e4823
+ .long 0xff84f200,0x08a2f200,0x0423f21f,0xd030f200
+ .long 0x9000f22e,0x4822ff84,0x60ff0000,0x2cb2f200
+ .long 0x9000123c,0x0003f22e,0x4800ff84,0x60ff0000
+ .long 0x2c900c81,0x40638000,0x6e00008e,0xf227e00c
+ .long 0xf23c4480,0xbf800000,0xf20000a0,0xf2000400
+ .long 0xf2000023,0xf22e6880,0xff84f200,0x0080f200
+ .long 0x04a3f23a,0x5580f5ec,0xf23a5500,0xf5eef200
+ .long 0x05a3f200,0x0523f23a,0x55a2f5e8,0xf23a5522
+ .long 0xf5eaf200,0x0ca3f200,0x0123f23a,0x54a2f5e4
+ .long 0xf22e4823,0xff84f200,0x08a2f200,0x0423f22e
+ .long 0x4822ff84,0xf21fd030,0xf2009000,0x4a106a0c
+ .long 0xf23a4822,0xf5d660ff,0x00002c24,0xf23a4822
+ .long 0xf5ba60ff,0x00002c10,0x4a106a16,0xf23a4800
+ .long 0xf5baf200,0x9000f23a,0x4822f5c0,0x60ff0000
+ .long 0x2bfef23a,0x4800f594,0xf2009000,0xf23a4822
+ .long 0xf5ba60ff,0x00002be0,0x60ff0000,0x2a66f210
+ .long 0x48002210,0x32280004,0x02817fff,0xffff0c81
+ .long 0x3fff8000,0x6c4e0c81,0x3fd78000,0x6d00007c
+ .long 0xf23c4480,0x3f800000,0xf20000a8,0xf227e004
+ .long 0xf23c4500,0x3f800000,0xf2000122,0xf20008a3
+ .long 0xf21fd020,0xf2000484,0xf2000420,0xf227e001
+ .long 0x41d761ff,0xfffffd66,0xdffc0000,0x000c60ff
+ .long 0x00002b6c,0xf2000018,0xf23c4438,0x3f800000
+ .long 0xf2d20000,0x29d4f23a,0x4800c5a6,0x22100281
+ .long 0x80000000,0x00813f80,0x00002f01,0xf2009000
+ .long 0xf21f4423,0x60ff0000,0x2b36f200,0x9000123c
+ .long 0x0003f210,0x480060ff,0x00002b16,0x60ff0000
+ .long 0x29b2f210,0x48002210,0x32280004,0x02817fff
+ .long 0xffff0c81,0x3fff8000,0x6c44f23c,0x44803f80
+ .long 0x0000f200,0x00a2f200,0x001af23c,0x44223f80
+ .long 0x0000f200,0x0420f200,0x00042f00,0x4280f227
+ .long 0xe00141d7,0x61ffffff,0xfcc4dffc,0x0000000c
+ .long 0xf21f9000,0xf2000022,0x60ff0000,0x2acaf200
+ .long 0x0018f23c,0x44383f80,0x0000f2d2,0x0000292a
+ .long 0x4a106a18,0xf23a4800,0xc4e8f200,0x9000f23c
+ .long 0x44220080,0x000060ff,0x00002a9c,0x60ff0000
+ .long 0x2ce8f200,0x9000f23a,0x4800c4d6,0x60ff0000
+ .long 0x2a863fdc,0x000082e3,0x08654361,0xc4c60000
+ .long 0x00003fa5,0x55555555,0x4cc13fc5,0x55555555
+ .long 0x4a543f81,0x11111117,0x43853fa5,0x55555555
+ .long 0x4f5a3fc5,0x55555555,0x55550000,0x00000000
+ .long 0x00003ec7,0x1de3a577,0x46823efa,0x01a019d7
+ .long 0xcb683f2a,0x01a01a01,0x9df33f56,0xc16c16c1
+ .long 0x70e23f81,0x11111111,0x11113fa5,0x55555555
+ .long 0x55553ffc,0x0000aaaa,0xaaaaaaaa,0xaaab0000
+ .long 0x000048b0,0x00000000,0x00003730,0x00000000
+ .long 0x00003fff,0x00008000,0x00000000,0x00000000
+ .long 0x00003fff,0x00008164,0xd1f3bc03,0x07749f84
+ .long 0x1a9b3fff,0x000082cd,0x8698ac2b,0xa1d89fc1
+ .long 0xd5b93fff,0x0000843a,0x28c3acde,0x4048a072
+ .long 0x83693fff,0x000085aa,0xc367cc48,0x7b141fc5
+ .long 0xc95c3fff,0x0000871f,0x61969e8d,0x10101ee8
+ .long 0x5c9f3fff,0x00008898,0x0e8092da,0x85289fa2
+ .long 0x07293fff,0x00008a14,0xd575496e,0xfd9ca07b
+ .long 0xf9af3fff,0x00008b95,0xc1e3ea8b,0xd6e8a002
+ .long 0x0dcf3fff,0x00008d1a,0xdf5b7e5b,0xa9e4205a
+ .long 0x63da3fff,0x00008ea4,0x398b45cd,0x53c01eb7
+ .long 0x00513fff,0x00009031,0xdc431466,0xb1dc1f6e
+ .long 0xb0293fff,0x000091c3,0xd373ab11,0xc338a078
+ .long 0x14943fff,0x0000935a,0x2b2f13e6,0xe92c9eb3
+ .long 0x19b03fff,0x000094f4,0xefa8fef7,0x09602017
+ .long 0x457d3fff,0x00009694,0x2d372018,0x5a001f11
+ .long 0xd5373fff,0x00009837,0xf0518db8,0xa9709fb9
+ .long 0x52dd3fff,0x000099e0,0x459320b7,0xfa641fe4
+ .long 0x30873fff,0x00009b8d,0x39b9d54e,0x55381fa2
+ .long 0xa8183fff,0x00009d3e,0xd9a72cff,0xb7501fde
+ .long 0x494d3fff,0x00009ef5,0x326091a1,0x11ac2050
+ .long 0x48903fff,0x0000a0b0,0x510fb971,0x4fc4a073
+ .long 0x691c3fff,0x0000a270,0x43030c49,0x68181f9b
+ .long 0x7a053fff,0x0000a435,0x15ae09e6,0x80a0a079
+ .long 0x71263fff,0x0000a5fe,0xd6a9b151,0x38eca071
+ .long 0xa1403fff,0x0000a7cd,0x93b4e965,0x3568204f
+ .long 0x62da3fff,0x0000a9a1,0x5ab4ea7c,0x0ef81f28
+ .long 0x3c4a3fff,0x0000ab7a,0x39b5a93e,0xd3389f9a
+ .long 0x7fdc3fff,0x0000ad58,0x3eea42a1,0x4ac8a05b
+ .long 0x3fac3fff,0x0000af3b,0x78ad690a,0x43741fdf
+ .long 0x26103fff,0x0000b123,0xf581d2ac,0x25909f70
+ .long 0x5f903fff,0x0000b311,0xc412a911,0x2488201f
+ .long 0x678a3fff,0x0000b504,0xf333f9de,0x64841f32
+ .long 0xfb133fff,0x0000b6fd,0x91e328d1,0x77902003
+ .long 0x8b303fff,0x0000b8fb,0xaf4762fb,0x9ee8200d
+ .long 0xc3cc3fff,0x0000baff,0x5ab2133e,0x45fc9f8b
+ .long 0x2ae63fff,0x0000bd08,0xa39f580c,0x36c0a02b
+ .long 0xbf703fff,0x0000bf17,0x99b67a73,0x1084a00b
+ .long 0xf5183fff,0x0000c12c,0x4cca6670,0x9458a041
+ .long 0xdd413fff,0x0000c346,0xccda2497,0x64089fdf
+ .long 0x137b3fff,0x0000c567,0x2a115506,0xdadc201f
+ .long 0x15683fff,0x0000c78d,0x74c8abb9,0xb15c1fc1
+ .long 0x3a2e3fff,0x0000c9b9,0xbd866e2f,0x27a4a03f
+ .long 0x8f033fff,0x0000cbec,0x14fef272,0x7c5c1ff4
+ .long 0x907d3fff,0x0000ce24,0x8c151f84,0x80e49e6e
+ .long 0x53e43fff,0x0000d063,0x33daef2b,0x25941fd6
+ .long 0xd45c3fff,0x0000d2a8,0x1d91f12a,0xe45ca076
+ .long 0xedb93fff,0x0000d4f3,0x5aabcfed,0xfa209fa6
+ .long 0xde213fff,0x0000d744,0xfccad69d,0x6af41ee6
+ .long 0x9a2f3fff,0x0000d99d,0x15c278af,0xd7b4207f
+ .long 0x439f3fff,0x0000dbfb,0xb797daf2,0x3754201e
+ .long 0xc2073fff,0x0000de60,0xf4825e0e,0x91249e8b
+ .long 0xe1753fff,0x0000e0cc,0xdeec2a94,0xe1102003
+ .long 0x2c4b3fff,0x0000e33f,0x8972be8a,0x5a502004
+ .long 0xdff53fff,0x0000e5b9,0x06e77c83,0x48a81e72
+ .long 0xf47a3fff,0x0000e839,0x6a503c4b,0xdc681f72
+ .long 0x2f223fff,0x0000eac0,0xc6e7dd24,0x3930a017
+ .long 0xe9453fff,0x0000ed4f,0x301ed994,0x2b841f40
+ .long 0x1a5b3fff,0x0000efe4,0xb99bdcda,0xf5cc9fb9
+ .long 0xa9e33fff,0x0000f281,0x773c59ff,0xb1382074
+ .long 0x4c053fff,0x0000f525,0x7d152486,0xcc2c1f77
+ .long 0x3a193fff,0x0000f7d0,0xdf730ad1,0x3bb81ffe
+ .long 0x90d53fff,0x0000fa83,0xb2db722a,0x033ca041
+ .long 0xed223fff,0x0000fd3e,0x0c0cf486,0xc1741f85
+ .long 0x3f3a2210,0x02817fff,0x00000c81,0x3fbe0000
+ .long 0x6c0660ff,0x00000108,0x32280004,0x0c81400c
+ .long 0xb1676d06,0x60ff0000,0x010cf210,0x4800f200
+ .long 0x0080f23c,0x442342b8,0xaa3bf227,0xe00c2d7c
+ .long 0x00000000,0xff58f201,0x600043fa,0xfbb6f201
+ .long 0x40002d41,0xff540281,0x0000003f,0xe989d3c1
+ .long 0x222eff54,0xec810641,0x3fff3d7a,0xfb06ff54
+ .long 0xf2000100,0xf23c4423,0xbc317218,0xf23a4923
+ .long 0xfaf2f200,0x0422f200,0x0822f200,0x0080f200
+ .long 0x04a3f23c,0x45003ab6,0x0b70f200,0x0523f200
+ .long 0x0580f23c,0x45a33c08,0x8895f23a,0x5522fad4
+ .long 0xf23a55a2,0xfad6f200,0x05233d41,0xff842d7c
+ .long 0x80000000,0xff8842ae,0xff8cf200,0x05a3f23c
+ .long 0x45223f00,0x0000f200,0x01a3f200,0x0523f200
+ .long 0x0c22f219,0x4880f200,0x0822f200,0x0423f21f
+ .long 0xd030f211,0x4422f200,0x0422222e,0xff584a81
+ .long 0x6706f22e,0x4823ff90,0xf2009000,0x123c0000
+ .long 0xf22e4823,0xff8460ff,0x000024c6,0xf210d080
+ .long 0xf2009000,0xf23c4422,0x3f800000,0x60ff0000
+ .long 0x24c60c81,0x400cb27c,0x6e66f210,0x4800f200
+ .long 0x0080f23c,0x442342b8,0xaa3bf227,0xe00c2d7c
+ .long 0x00000001,0xff58f201,0x600043fa,0xfaa6f201
+ .long 0x40002d41,0xff540281,0x0000003f,0xe989d3c1
+ .long 0x222eff54,0xec812d41,0xff54e281,0x93aeff54
+ .long 0x06413fff,0x3d41ff90,0x2d7c8000,0x0000ff94
+ .long 0x42aeff98,0x222eff54,0x06413fff,0x6000fed2
+ .long 0x4a106bff,0x00002370,0x60ff0000,0x24122f10
+ .long 0x02978000,0x00000097,0x00800000,0xf23c4400
+ .long 0x3f800000,0xf2009000,0xf21f4422,0x60ff0000
+ .long 0x24262210,0x02817fff,0x00000c81,0x3ffd0000
+ .long 0x6c0660ff,0x0000015e,0x32280004,0x0c814004
+ .long 0xc2156f06,0x60ff0000,0x026cf210,0x4800f200
+ .long 0x0080f23c,0x442342b8,0xaa3bf227,0xe00cf201
+ .long 0x600043fa,0xf9eef201,0x40002d41,0xff540281
+ .long 0x0000003f,0xe989d3c1,0x222eff54,0xec812d41
+ .long 0xff54f200,0x0100f23c,0x4423bc31,0x7218f23a
+ .long 0x4923f930,0xf2000422,0xf2000822,0x06413fff
+ .long 0xf2000080,0xf20004a3,0xf23c4500,0x3950097b
+ .long 0xf2000523,0xf2000580,0xf23c45a3,0x3ab60b6a
+ .long 0xf23a5522,0xf91ef23a,0x55a2f920,0x3d41ff84
+ .long 0x2d7c8000,0x0000ff88,0x42aeff8c,0xf2000523
+ .long 0x222eff54,0x4441f200,0x05a30641,0x3ffff23a
+ .long 0x5522f900,0xf23c45a2,0x3f000000,0xf2000523
+ .long 0x00418000,0x3d41ff90,0x2d7c8000,0x0000ff94
+ .long 0x42aeff98,0xf2000ca3,0xf2000123,0xf2000422
+ .long 0xf2000822,0xf21fd030,0xf2114823,0x222eff54
+ .long 0x0c810000,0x003f6f1a,0xf2294480,0x000cf22e
+ .long 0x48a2ff90,0xf2000422,0xf2114822,0x60ff0000
+ .long 0x00340c81,0xfffffffd,0x6c16f229,0x4422000c
+ .long 0xf2114822,0xf22e4822,0xff9060ff,0x00000016
+ .long 0xf2194880,0xf2114422,0xf22e48a2,0xff90f200
+ .long 0x0422f200,0x9000f22e,0x4823ff84,0x60ff0000
+ .long 0x22ae0c81,0x3fbe0000,0x6c6c0c81,0x00330000
+ .long 0x6d2c2d7c,0x80010000,0xff842d7c,0x80000000
+ .long 0xff8842ae,0xff8cf210,0x4800f200,0x9000123c
+ .long 0x0002f22e,0x4822ff84,0x60ff0000,0x2264f210
+ .long 0x4800f23a,0x5423f86c,0x2d7c8001,0x0000ff84
+ .long 0x2d7c8000,0x0000ff88,0x42aeff8c,0xf22e4822
+ .long 0xff84f200,0x9000123c,0x0000f23a,0x5423f84c
+ .long 0x60ff0000,0x222cf210,0x4800f200,0x0023f227
+ .long 0xe00cf23c,0x44802f30,0xcaa8f200,0x00a3f23c
+ .long 0x4500310f,0x8290f23c,0x44a232d7,0x3220f200
+ .long 0x0123f200,0x00a3f23c,0x45223493,0xf281f23a
+ .long 0x54a2f7c0,0xf2000123,0xf20000a3,0xf23a5522
+ .long 0xf7baf23a,0x54a2f7bc,0xf2000123,0xf20000a3
+ .long 0xf23a5522,0xf7b6f23a,0x54a2f7b8,0xf2000123
+ .long 0xf20000a3,0xf23a5522,0xf7b2f23a,0x48a2f7b4
+ .long 0xf2000123,0xf20000a3,0xf2000123,0xf21048a3
+ .long 0xf23c4423,0x3f000000,0xf20008a2,0xf21fd030
+ .long 0xf2000422,0xf2009000,0xf2104822,0x60ff0000
+ .long 0x218e2210,0x0c810000,0x00006e00,0xfbacf23c
+ .long 0x4400bf80,0x0000f200,0x9000f23c,0x44220080
+ .long 0x000060ff,0x00002178,0x60ff0000,0x1ff63028
+ .long 0x00000880,0x000f0440,0x3ffff200,0x50006d02
+ .long 0x4e751d7c,0x0008ff64,0x4e7561ff,0x00007cfc
+ .long 0x44400440,0x3ffff200,0x50001d7c,0x0008ff64
+ .long 0x4e753028,0x00000040,0x7fff0880,0x000e2d68
+ .long 0x0004ff88,0x2d680008,0xff8c3d40,0xff84f22e
+ .long 0x4800ff84,0x6b024e75,0x1d7c0008,0xff644e75
+ .long 0x61ff0000,0x7cb660ca,0x7ffb0000,0x80000000
+ .long 0x00000000,0x00000000,0xf2104800,0x22103228
+ .long 0x00040281,0x7fffffff,0x0c81400c,0xb1676e42
+ .long 0xf2000018,0x2f004280,0xf227e001,0x41d761ff
+ .long 0xfffffad2,0xdffc0000,0x000cf23c,0x44233f00
+ .long 0x0000201f,0xf23c4480,0x3e800000,0xf20000a0
+ .long 0xf2009000,0x123c0002,0xf2000422,0x60ff0000
+ .long 0x20800c81,0x400cb2b3,0x6e3cf200,0x0018f23a
+ .long 0x5428baae,0xf23a5428,0xbab02f00,0x4280f227
+ .long 0xe00141d7,0x61ffffff,0xfa7cdffc,0x0000000c
+ .long 0x201ff200,0x9000123c,0x0000f23a,0x4823ff5a
+ .long 0x60ff0000,0x203c60ff,0x00002014,0xf23c4400
+ .long 0x3f800000,0xf2009000,0xf23c4422,0x00800000
+ .long 0x60ff0000,0x2032f210,0x48002210,0x32280004
+ .long 0x22410281,0x7fffffff,0x0c81400c,0xb1676e62
+ .long 0xf2000018,0x48e78040,0xf227e001,0x41d74280
+ .long 0x61ffffff,0xfbe0dffc,0x0000000c,0xf23c9000
+ .long 0x00000000,0x4cdf0201,0xf2000080,0xf23c44a2
+ .long 0x3f800000,0xf2276800,0xf2000420,0x22090281
+ .long 0x80000000,0x00813f00,0x0000f21f,0x48222f01
+ .long 0xf2009000,0x123c0000,0xf21f4423,0x60ff0000
+ .long 0x1fa00c81,0x400cb2b3,0x6eff0000,0x1f4cf200
+ .long 0x0018f23a,0x5428b9ca,0x2f3c0000,0x00002f3c
+ .long 0x80000000,0x22090281,0x80000000,0x00817ffb
+ .long 0x00002f01,0xf23a5428,0xb9b02f00,0x4280f227
+ .long 0xe00141d7,0x61ffffff,0xf97cdffc,0x0000000c
+ .long 0x201ff200,0x9000123c,0x0000f21f,0x482360ff
+ .long 0x00001f3e,0x60ff0000,0x1ddaf210,0x4800f22e
+ .long 0x6800ff84,0x22103228,0x00042d41,0xff840281
+ .long 0x7fffffff,0x0c813fd7,0x80006d00,0x00740c81
+ .long 0x3fffddce,0x6e00006a,0x222eff84,0x2d41ff5c
+ .long 0x02817fff,0x00000681,0x00010000,0x2d41ff84
+ .long 0x02ae8000,0x0000ff5c,0xf22e4800,0xff842f00
+ .long 0x4280f227,0xe00141d7,0x61ffffff,0xfac8dffc
+ .long 0x0000000c,0x201ff200,0x0080f23c,0x44a24000
+ .long 0x0000222e,0xff5cf22e,0x6880ff84,0xb3aeff84
+ .long 0xf2009000,0xf22e4820,0xff8460ff,0x00001eb0
+ .long 0x0c813fff,0x80006d00,0x00880c81,0x40048aa1
+ .long 0x6e000092,0x222eff84,0x2d41ff5c,0x02817fff
+ .long 0x00000681,0x00010000,0x2d41ff84,0x02ae8000
+ .long 0x0000ff5c,0x222eff5c,0xf22e4800,0xff842f00
+ .long 0x4280f227,0xe00141d7,0x61ffffff,0xf878dffc
+ .long 0x0000000c,0x201f222e,0xff5cf23c,0x44223f80
+ .long 0x00000a81,0xc0000000,0xf2014480,0xf20000a0
+ .long 0x222eff5c,0x00813f80,0x0000f201,0x4400f200
+ .long 0x9000123c,0x0002f200,0x042260ff,0x00001e20
+ .long 0xf2009000,0x123c0003,0xf22e4800,0xff8460ff
+ .long 0x00001dfe,0x222eff84,0x02818000,0x00000081
+ .long 0x3f800000,0xf2014400,0x02818000,0x00000a81
+ .long 0x80800000,0xf2009000,0xf2014422,0x60ff0000
+ .long 0x1dde60ff,0x00001c6c,0x3ffe0000,0xb17217f7
+ .long 0xd1cf79ac,0x00000000,0x3f800000,0x00000000
+ .long 0x7f800000,0xbf800000,0x3fc2499a,0xb5e4040b
+ .long 0xbfc555b5,0x848cb7db,0x3fc99999,0x987d8730
+ .long 0xbfcfffff,0xff6f7e97,0x3fd55555,0x555555a4
+ .long 0xbfe00000,0x00000008,0x3f175496,0xadd7dad6
+ .long 0x3f3c71c2,0xfe80c7e0,0x3f624924,0x928bccff
+ .long 0x3f899999,0x999995ec,0x3fb55555,0x55555555
+ .long 0x40000000,0x00000000,0x3f990000,0x80000000
+ .long 0x00000000,0x00000000,0x3ffe0000,0xfe03f80f
+ .long 0xe03f80fe,0x00000000,0x3ff70000,0xff015358
+ .long 0x833c47e2,0x00000000,0x3ffe0000,0xfa232cf2
+ .long 0x52138ac0,0x00000000,0x3ff90000,0xbdc8d83e
+ .long 0xad88d549,0x00000000,0x3ffe0000,0xf6603d98
+ .long 0x0f6603da,0x00000000,0x3ffa0000,0x9cf43dcf
+ .long 0xf5eafd48,0x00000000,0x3ffe0000,0xf2b9d648
+ .long 0x0f2b9d65,0x00000000,0x3ffa0000,0xda16eb88
+ .long 0xcb8df614,0x00000000,0x3ffe0000,0xef2eb71f
+ .long 0xc4345238,0x00000000,0x3ffb0000,0x8b29b775
+ .long 0x1bd70743,0x00000000,0x3ffe0000,0xebbdb2a5
+ .long 0xc1619c8c,0x00000000,0x3ffb0000,0xa8d839f8
+ .long 0x30c1fb49,0x00000000,0x3ffe0000,0xe865ac7b
+ .long 0x7603a197,0x00000000,0x3ffb0000,0xc61a2eb1
+ .long 0x8cd907ad,0x00000000,0x3ffe0000,0xe525982a
+ .long 0xf70c880e,0x00000000,0x3ffb0000,0xe2f2a47a
+ .long 0xde3a18af,0x00000000,0x3ffe0000,0xe1fc780e
+ .long 0x1fc780e2,0x00000000,0x3ffb0000,0xff64898e
+ .long 0xdf55d551,0x00000000,0x3ffe0000,0xdee95c4c
+ .long 0xa037ba57,0x00000000,0x3ffc0000,0x8db956a9
+ .long 0x7b3d0148,0x00000000,0x3ffe0000,0xdbeb61ee
+ .long 0xd19c5958,0x00000000,0x3ffc0000,0x9b8fe100
+ .long 0xf47ba1de,0x00000000,0x3ffe0000,0xd901b203
+ .long 0x6406c80e,0x00000000,0x3ffc0000,0xa9372f1d
+ .long 0x0da1bd17,0x00000000,0x3ffe0000,0xd62b80d6
+ .long 0x2b80d62c,0x00000000,0x3ffc0000,0xb6b07f38
+ .long 0xce90e46b,0x00000000,0x3ffe0000,0xd3680d36
+ .long 0x80d3680d,0x00000000,0x3ffc0000,0xc3fd0329
+ .long 0x06488481,0x00000000,0x3ffe0000,0xd0b69fcb
+ .long 0xd2580d0b,0x00000000,0x3ffc0000,0xd11de0ff
+ .long 0x15ab18ca,0x00000000,0x3ffe0000,0xce168a77
+ .long 0x25080ce1,0x00000000,0x3ffc0000,0xde1433a1
+ .long 0x6c66b150,0x00000000,0x3ffe0000,0xcb8727c0
+ .long 0x65c393e0,0x00000000,0x3ffc0000,0xeae10b5a
+ .long 0x7ddc8add,0x00000000,0x3ffe0000,0xc907da4e
+ .long 0x871146ad,0x00000000,0x3ffc0000,0xf7856e5e
+ .long 0xe2c9b291,0x00000000,0x3ffe0000,0xc6980c69
+ .long 0x80c6980c,0x00000000,0x3ffd0000,0x82012ca5
+ .long 0xa68206d7,0x00000000,0x3ffe0000,0xc4372f85
+ .long 0x5d824ca6,0x00000000,0x3ffd0000,0x882c5fcd
+ .long 0x7256a8c5,0x00000000,0x3ffe0000,0xc1e4bbd5
+ .long 0x95f6e947,0x00000000,0x3ffd0000,0x8e44c60b
+ .long 0x4ccfd7de,0x00000000,0x3ffe0000,0xbfa02fe8
+ .long 0x0bfa02ff,0x00000000,0x3ffd0000,0x944ad09e
+ .long 0xf4351af6,0x00000000,0x3ffe0000,0xbd691047
+ .long 0x07661aa3,0x00000000,0x3ffd0000,0x9a3eecd4
+ .long 0xc3eaa6b2,0x00000000,0x3ffe0000,0xbb3ee721
+ .long 0xa54d880c,0x00000000,0x3ffd0000,0xa0218434
+ .long 0x353f1de8,0x00000000,0x3ffe0000,0xb92143fa
+ .long 0x36f5e02e,0x00000000,0x3ffd0000,0xa5f2fcab
+ .long 0xbbc506da,0x00000000,0x3ffe0000,0xb70fbb5a
+ .long 0x19be3659,0x00000000,0x3ffd0000,0xabb3b8ba
+ .long 0x2ad362a5,0x00000000,0x3ffe0000,0xb509e68a
+ .long 0x9b94821f,0x00000000,0x3ffd0000,0xb1641795
+ .long 0xce3ca97b,0x00000000,0x3ffe0000,0xb30f6352
+ .long 0x8917c80b,0x00000000,0x3ffd0000,0xb7047551
+ .long 0x5d0f1c61,0x00000000,0x3ffe0000,0xb11fd3b8
+ .long 0x0b11fd3c,0x00000000,0x3ffd0000,0xbc952afe
+ .long 0xea3d13e1,0x00000000,0x3ffe0000,0xaf3addc6
+ .long 0x80af3ade,0x00000000,0x3ffd0000,0xc2168ed0
+ .long 0xf458ba4a,0x00000000,0x3ffe0000,0xad602b58
+ .long 0x0ad602b6,0x00000000,0x3ffd0000,0xc788f439
+ .long 0xb3163bf1,0x00000000,0x3ffe0000,0xab8f69e2
+ .long 0x8359cd11,0x00000000,0x3ffd0000,0xccecac08
+ .long 0xbf04565d,0x00000000,0x3ffe0000,0xa9c84a47
+ .long 0xa07f5638,0x00000000,0x3ffd0000,0xd2420487
+ .long 0x2dd85160,0x00000000,0x3ffe0000,0xa80a80a8
+ .long 0x0a80a80b,0x00000000,0x3ffd0000,0xd7894992
+ .long 0x3bc3588a,0x00000000,0x3ffe0000,0xa655c439
+ .long 0x2d7b73a8,0x00000000,0x3ffd0000,0xdcc2c4b4
+ .long 0x9887dacc,0x00000000,0x3ffe0000,0xa4a9cf1d
+ .long 0x96833751,0x00000000,0x3ffd0000,0xe1eebd3e
+ .long 0x6d6a6b9e,0x00000000,0x3ffe0000,0xa3065e3f
+ .long 0xae7cd0e0,0x00000000,0x3ffd0000,0xe70d785c
+ .long 0x2f9f5bdc,0x00000000,0x3ffe0000,0xa16b312e
+ .long 0xa8fc377d,0x00000000,0x3ffd0000,0xec1f392c
+ .long 0x5179f283,0x00000000,0x3ffe0000,0x9fd809fd
+ .long 0x809fd80a,0x00000000,0x3ffd0000,0xf12440d3
+ .long 0xe36130e6,0x00000000,0x3ffe0000,0x9e4cad23
+ .long 0xdd5f3a20,0x00000000,0x3ffd0000,0xf61cce92
+ .long 0x346600bb,0x00000000,0x3ffe0000,0x9cc8e160
+ .long 0xc3fb19b9,0x00000000,0x3ffd0000,0xfb091fd3
+ .long 0x8145630a,0x00000000,0x3ffe0000,0x9b4c6f9e
+ .long 0xf03a3caa,0x00000000,0x3ffd0000,0xffe97042
+ .long 0xbfa4c2ad,0x00000000,0x3ffe0000,0x99d722da
+ .long 0xbde58f06,0x00000000,0x3ffe0000,0x825efced
+ .long 0x49369330,0x00000000,0x3ffe0000,0x9868c809
+ .long 0x868c8098,0x00000000,0x3ffe0000,0x84c37a7a
+ .long 0xb9a905c9,0x00000000,0x3ffe0000,0x97012e02
+ .long 0x5c04b809,0x00000000,0x3ffe0000,0x87224c2e
+ .long 0x8e645fb7,0x00000000,0x3ffe0000,0x95a02568
+ .long 0x095a0257,0x00000000,0x3ffe0000,0x897b8cac
+ .long 0x9f7de298,0x00000000,0x3ffe0000,0x94458094
+ .long 0x45809446,0x00000000,0x3ffe0000,0x8bcf55de
+ .long 0xc4cd05fe,0x00000000,0x3ffe0000,0x92f11384
+ .long 0x0497889c,0x00000000,0x3ffe0000,0x8e1dc0fb
+ .long 0x89e125e5,0x00000000,0x3ffe0000,0x91a2b3c4
+ .long 0xd5e6f809,0x00000000,0x3ffe0000,0x9066e68c
+ .long 0x955b6c9b,0x00000000,0x3ffe0000,0x905a3863
+ .long 0x3e06c43b,0x00000000,0x3ffe0000,0x92aade74
+ .long 0xc7be59e0,0x00000000,0x3ffe0000,0x8f1779d9
+ .long 0xfdc3a219,0x00000000,0x3ffe0000,0x94e9bff6
+ .long 0x15845643,0x00000000,0x3ffe0000,0x8dda5202
+ .long 0x37694809,0x00000000,0x3ffe0000,0x9723a1b7
+ .long 0x20134203,0x00000000,0x3ffe0000,0x8ca29c04
+ .long 0x6514e023,0x00000000,0x3ffe0000,0x995899c8
+ .long 0x90eb8990,0x00000000,0x3ffe0000,0x8b70344a
+ .long 0x139bc75a,0x00000000,0x3ffe0000,0x9b88bdaa
+ .long 0x3a3dae2f,0x00000000,0x3ffe0000,0x8a42f870
+ .long 0x5669db46,0x00000000,0x3ffe0000,0x9db4224f
+ .long 0xffe1157c,0x00000000,0x3ffe0000,0x891ac73a
+ .long 0xe9819b50,0x00000000,0x3ffe0000,0x9fdadc26
+ .long 0x8b7a12da,0x00000000,0x3ffe0000,0x87f78087
+ .long 0xf78087f8,0x00000000,0x3ffe0000,0xa1fcff17
+ .long 0xce733bd4,0x00000000,0x3ffe0000,0x86d90544
+ .long 0x7a34acc6,0x00000000,0x3ffe0000,0xa41a9e8f
+ .long 0x5446fb9f,0x00000000,0x3ffe0000,0x85bf3761
+ .long 0x2cee3c9b,0x00000000,0x3ffe0000,0xa633cd7e
+ .long 0x6771cd8b,0x00000000,0x3ffe0000,0x84a9f9c8
+ .long 0x084a9f9d,0x00000000,0x3ffe0000,0xa8489e60
+ .long 0x0b435a5e,0x00000000,0x3ffe0000,0x83993052
+ .long 0x3fbe3368,0x00000000,0x3ffe0000,0xaa59233c
+ .long 0xcca4bd49,0x00000000,0x3ffe0000,0x828cbfbe
+ .long 0xb9a020a3,0x00000000,0x3ffe0000,0xac656dae
+ .long 0x6bcc4985,0x00000000,0x3ffe0000,0x81848da8
+ .long 0xfaf0d277,0x00000000,0x3ffe0000,0xae6d8ee3
+ .long 0x60bb2468,0x00000000,0x3ffe0000,0x80808080
+ .long 0x80808081,0x00000000,0x3ffe0000,0xb07197a2
+ .long 0x3c46c654,0x00000000,0xf2104800,0x2d7c0000
+ .long 0x0000ff54,0x22103228,0x00042d50,0xff842d68
+ .long 0x0004ff88,0x2d680008,0xff8c0c81,0x00000000
+ .long 0x6d000182,0x0c813ffe,0xf07d6d0a,0x0c813fff
+ .long 0x88416f00,0x00e2e081,0xe0810481,0x00003fff
+ .long 0xd2aeff54,0x41faf7b2,0xf2014080,0x2d7c3fff
+ .long 0x0000ff84,0x2d6eff88,0xff9402ae,0xfe000000
+ .long 0xff9400ae,0x01000000,0xff94222e,0xff940281
+ .long 0x7e000000,0xe081e081,0xe881d1c1,0xf22e4800
+ .long 0xff842d7c,0x3fff0000,0xff9042ae,0xff98f22e
+ .long 0x4828ff90,0xf227e00c,0xf2104823,0xf23a48a3
+ .long 0xf6c8f200,0x0100f200,0x0923f22e,0x6880ff84
+ .long 0xf2000980,0xf2000880,0xf23a54a3,0xf6ccf23a
+ .long 0x5523f6ce,0xf23a54a2,0xf6d0f23a,0x5522f6d2
+ .long 0xf2000ca3,0xf2000d23,0xf23a54a2,0xf6ccf23a
+ .long 0x5522f6ce,0xf2000ca3,0xd1fc0000,0x0010f200
+ .long 0x0d23f200,0x00a3f200,0x0822f210,0x48a2f21f
+ .long 0xd030f200,0x0422f200,0x9000f22e,0x4822ff84
+ .long 0x60ff0000,0x142af23c,0x58380001,0xf2c10000
+ .long 0x1678f200,0x0080f23a,0x44a8f64e,0xf23a4422
+ .long 0xf648f200,0x04a2f200,0x00a0f227,0xe00cf200
+ .long 0x0400f200,0x0023f22e,0x6880ff84,0xf2000080
+ .long 0xf20004a3,0xf23a5580,0xf660f23a,0x5500f662
+ .long 0xf20005a3,0xf2000523,0xf23a55a2,0xf65cf23a
+ .long 0x5522f65e,0xf2000ca3,0xf2000123,0xf23a54a2
+ .long 0xf658f22e,0x4823ff84,0xf20008a2,0xf21fd030
+ .long 0xf2000423,0xf2009000,0xf22e4822,0xff8460ff
+ .long 0x0000139c,0x60ff0000,0x12102d7c,0xffffff9c
+ .long 0xff5448e7,0x3f002610,0x28280004,0x2a280008
+ .long 0x42824a84,0x66342805,0x42857420,0x4286edc4
+ .long 0x6000edac,0xd4862d43,0xff842d44,0xff882d45
+ .long 0xff8c4482,0x2d42ff54,0xf22e4800,0xff844cdf
+ .long 0x00fc41ee,0xff846000,0xfe0c4286,0xedc46000
+ .long 0x2406edac,0x2e05edad,0x44860686,0x00000020
+ .long 0xecaf8887,0x2d43ff84,0x2d44ff88,0x2d45ff8c
+ .long 0x44822d42,0xff54f22e,0x4800ff84,0x4cdf00fc
+ .long 0x41eeff84,0x6000fdce,0xf2104800,0xf2000018
+ .long 0xf23a4838,0xf5a4f292,0x0014f200,0x9000123c
+ .long 0x0003f210,0x480060ff,0x000012d6,0xf2104800
+ .long 0x2d7c0000,0x0000ff54,0xf2000080,0xf23a4422
+ .long 0xf508f22e,0x6800ff84,0x3d6eff88,0xff86222e
+ .long 0xff840c81,0x00000000,0x6f0000da,0x0c813ffe
+ .long 0x80006d00,0xfda20c81,0x3fffc000,0x6e00fd98
+ .long 0x0c813ffe,0xf07d6d00,0x001a0c81,0x3fff8841
+ .long 0x6e000010,0xf20004a2,0xf23a4422,0xf4bc6000
+ .long 0xfe762d6e,0xff88ff94,0x02aefe00,0x0000ff94
+ .long 0x00ae0100,0x0000ff94,0x0c813fff,0x80006c44
+ .long 0xf23a4400,0xf4fc2d7c,0x3fff0000,0xff9042ae
+ .long 0xff98f22e,0x4828ff90,0x222eff94,0x02817e00
+ .long 0x0000e081,0xe081e881,0xf20004a2,0xf227e00c
+ .long 0xf2000422,0x41faf4e2,0xd1c1f23a,0x4480f466
+ .long 0x6000fd76,0xf23a4400,0xf4502d7c,0x3fff0000
+ .long 0xff9042ae,0xff98f22e,0x4828ff90,0x222eff94
+ .long 0x02817e00,0x0000e081,0xe081e881,0xf2000422
+ .long 0xf227e00c,0x41faf4a2,0xd1c1f23a,0x4480f41e
+ .long 0x6000fd36,0x0c810000,0x00006d10,0xf23a4400
+ .long 0xf414f200,0x900060ff,0x00001014,0xf23a4400
+ .long 0xf3fcf200,0x900060ff,0x0000102e,0x60ff0000
+ .long 0x10422210,0x32280004,0x02817fff,0xffff0c81
+ .long 0x3fff8000,0x6c56f210,0x4818f200,0x0080f200
+ .long 0x049af200,0x0022f23c,0x44a23f80,0x0000f200
+ .long 0x04202210,0x02818000,0x00000081,0x3f000000
+ .long 0x2f012f00,0x4280f227,0xe00141d7,0x61ffffff
+ .long 0xfe5adffc,0x0000000c,0x201ff200,0x9000123c
+ .long 0x0000f21f,0x442360ff,0x00001136,0xf2104818
+ .long 0xf23c4438,0x3f800000,0xf2d20000,0x0fac60ff
+ .long 0x00000f7c,0x60ff0000,0x0fba3ffd,0x0000de5b
+ .long 0xd8a93728,0x71950000,0x00003fff,0x0000b8aa
+ .long 0x3b295c17,0xf0bc0000,0x0000f23c,0x58000001
+ .long 0xf2104838,0xf2c10000,0x13502210,0x6d000090
+ .long 0x2f004280,0x61ffffff,0xfba2f21f,0x9000f23a
+ .long 0x4823ffb8,0x60ff0000,0x10d62210,0x6d000070
+ .long 0x2f004280,0x61ffffff,0xfd34f21f,0x9000f23a
+ .long 0x4823ff98,0x60ff0000,0x10c62210,0x6d000050
+ .long 0x22280008,0x662e2228,0x00040281,0x7fffffff
+ .long 0x66223210,0x02810000,0x7fff0481,0x00003fff
+ .long 0x67ff0000,0x12e4f200,0x9000f201,0x400060ff
+ .long 0x0000107c,0x2f004280,0x61ffffff,0xfb2ef21f
+ .long 0x9000f23a,0x4823ff54,0x60ff0000,0x106260ff
+ .long 0x00000ed6,0x22106d00,0xfff62f00,0x428061ff
+ .long 0xfffffcba,0xf21f9000,0xf23a4823,0xff2e60ff
+ .long 0x0000104c,0x406a934f,0x0979a371,0x3f734413
+ .long 0x509f8000,0xbfcd0000,0xc0219dc1,0xda994fd2
+ .long 0x00000000,0x40000000,0x935d8ddd,0xaaa8ac17
+ .long 0x00000000,0x3ffe0000,0xb17217f7,0xd1cf79ac
+ .long 0x00000000,0x3f56c16d,0x6f7bd0b2,0x3f811112
+ .long 0x302c712c,0x3fa55555,0x55554cc1,0x3fc55555
+ .long 0x55554a54,0x3fe00000,0x00000000,0x00000000
+ .long 0x00000000,0x3fff0000,0x80000000,0x00000000
+ .long 0x3f738000,0x3fff0000,0x8164d1f3,0xbc030773
+ .long 0x3fbef7ca,0x3fff0000,0x82cd8698,0xac2ba1d7
+ .long 0x3fbdf8a9,0x3fff0000,0x843a28c3,0xacde4046
+ .long 0x3fbcd7c9,0x3fff0000,0x85aac367,0xcc487b15
+ .long 0xbfbde8da,0x3fff0000,0x871f6196,0x9e8d1010
+ .long 0x3fbde85c,0x3fff0000,0x88980e80,0x92da8527
+ .long 0x3fbebbf1,0x3fff0000,0x8a14d575,0x496efd9a
+ .long 0x3fbb80ca,0x3fff0000,0x8b95c1e3,0xea8bd6e7
+ .long 0xbfba8373,0x3fff0000,0x8d1adf5b,0x7e5ba9e6
+ .long 0xbfbe9670,0x3fff0000,0x8ea4398b,0x45cd53c0
+ .long 0x3fbdb700,0x3fff0000,0x9031dc43,0x1466b1dc
+ .long 0x3fbeeeb0,0x3fff0000,0x91c3d373,0xab11c336
+ .long 0x3fbbfd6d,0x3fff0000,0x935a2b2f,0x13e6e92c
+ .long 0xbfbdb319,0x3fff0000,0x94f4efa8,0xfef70961
+ .long 0x3fbdba2b,0x3fff0000,0x96942d37,0x20185a00
+ .long 0x3fbe91d5,0x3fff0000,0x9837f051,0x8db8a96f
+ .long 0x3fbe8d5a,0x3fff0000,0x99e04593,0x20b7fa65
+ .long 0xbfbcde7b,0x3fff0000,0x9b8d39b9,0xd54e5539
+ .long 0xbfbebaaf,0x3fff0000,0x9d3ed9a7,0x2cffb751
+ .long 0xbfbd86da,0x3fff0000,0x9ef53260,0x91a111ae
+ .long 0xbfbebedd,0x3fff0000,0xa0b0510f,0xb9714fc2
+ .long 0x3fbcc96e,0x3fff0000,0xa2704303,0x0c496819
+ .long 0xbfbec90b,0x3fff0000,0xa43515ae,0x09e6809e
+ .long 0x3fbbd1db,0x3fff0000,0xa5fed6a9,0xb15138ea
+ .long 0x3fbce5eb,0x3fff0000,0xa7cd93b4,0xe965356a
+ .long 0xbfbec274,0x3fff0000,0xa9a15ab4,0xea7c0ef8
+ .long 0x3fbea83c,0x3fff0000,0xab7a39b5,0xa93ed337
+ .long 0x3fbecb00,0x3fff0000,0xad583eea,0x42a14ac6
+ .long 0x3fbe9301,0x3fff0000,0xaf3b78ad,0x690a4375
+ .long 0xbfbd8367,0x3fff0000,0xb123f581,0xd2ac2590
+ .long 0xbfbef05f,0x3fff0000,0xb311c412,0xa9112489
+ .long 0x3fbdfb3c,0x3fff0000,0xb504f333,0xf9de6484
+ .long 0x3fbeb2fb,0x3fff0000,0xb6fd91e3,0x28d17791
+ .long 0x3fbae2cb,0x3fff0000,0xb8fbaf47,0x62fb9ee9
+ .long 0x3fbcdc3c,0x3fff0000,0xbaff5ab2,0x133e45fb
+ .long 0x3fbee9aa,0x3fff0000,0xbd08a39f,0x580c36bf
+ .long 0xbfbeaefd,0x3fff0000,0xbf1799b6,0x7a731083
+ .long 0xbfbcbf51,0x3fff0000,0xc12c4cca,0x66709456
+ .long 0x3fbef88a,0x3fff0000,0xc346ccda,0x24976407
+ .long 0x3fbd83b2,0x3fff0000,0xc5672a11,0x5506dadd
+ .long 0x3fbdf8ab,0x3fff0000,0xc78d74c8,0xabb9b15d
+ .long 0xbfbdfb17,0x3fff0000,0xc9b9bd86,0x6e2f27a3
+ .long 0xbfbefe3c,0x3fff0000,0xcbec14fe,0xf2727c5d
+ .long 0xbfbbb6f8,0x3fff0000,0xce248c15,0x1f8480e4
+ .long 0xbfbcee53,0x3fff0000,0xd06333da,0xef2b2595
+ .long 0xbfbda4ae,0x3fff0000,0xd2a81d91,0xf12ae45a
+ .long 0x3fbc9124,0x3fff0000,0xd4f35aab,0xcfedfa1f
+ .long 0x3fbeb243,0x3fff0000,0xd744fcca,0xd69d6af4
+ .long 0x3fbde69a,0x3fff0000,0xd99d15c2,0x78afd7b6
+ .long 0xbfb8bc61,0x3fff0000,0xdbfbb797,0xdaf23755
+ .long 0x3fbdf610,0x3fff0000,0xde60f482,0x5e0e9124
+ .long 0xbfbd8be1,0x3fff0000,0xe0ccdeec,0x2a94e111
+ .long 0x3fbacb12,0x3fff0000,0xe33f8972,0xbe8a5a51
+ .long 0x3fbb9bfe,0x3fff0000,0xe5b906e7,0x7c8348a8
+ .long 0x3fbcf2f4,0x3fff0000,0xe8396a50,0x3c4bdc68
+ .long 0x3fbef22f,0x3fff0000,0xeac0c6e7,0xdd24392f
+ .long 0xbfbdbf4a,0x3fff0000,0xed4f301e,0xd9942b84
+ .long 0x3fbec01a,0x3fff0000,0xefe4b99b,0xdcdaf5cb
+ .long 0x3fbe8cac,0x3fff0000,0xf281773c,0x59ffb13a
+ .long 0xbfbcbb3f,0x3fff0000,0xf5257d15,0x2486cc2c
+ .long 0x3fbef73a,0x3fff0000,0xf7d0df73,0x0ad13bb9
+ .long 0xbfb8b795,0x3fff0000,0xfa83b2db,0x722a033a
+ .long 0x3fbef84b,0x3fff0000,0xfd3e0c0c,0xf486c175
+ .long 0xbfbef581,0xf210d080,0x22103228,0x0004f22e
+ .long 0x6800ff84,0x02817fff,0xffff0c81,0x3fb98000
+ .long 0x6c046000,0x00880c81,0x400d80c0,0x6f046000
+ .long 0x007cf200,0x0080f23c,0x44a34280,0x0000f22e
+ .long 0x6080ff54,0x2f0243fa,0xfbbcf22e,0x4080ff54
+ .long 0x222eff54,0x24010281,0x0000003f,0xe981d3c1
+ .long 0xec822202,0xe2819481,0x06820000,0x3ffff227
+ .long 0xe00cf23c,0x44a33c80,0x00002d59,0xff842d59
+ .long 0xff882d59,0xff8c3d59,0xff90f200,0x04283d59
+ .long 0xff94426e,0xff9642ae,0xff98d36e,0xff84f23a
+ .long 0x4823fb22,0xd36eff90,0x60000100,0x0c813fff
+ .long 0x80006e12,0xf2009000,0xf23c4422,0x3f800000
+ .long 0x60ff0000,0x0b12222e,0xff840c81,0x00000000
+ .long 0x6d0660ff,0x00000ac8,0x60ff0000,0x0a1af200
+ .long 0x9000f23c,0x44003f80,0x00002210,0x00810080
+ .long 0x0001f201,0x442260ff,0x00000adc,0xf210d080
+ .long 0x22103228,0x0004f22e,0x6800ff84,0x02817fff
+ .long 0xffff0c81,0x3fb98000,0x6c046000,0xff900c81
+ .long 0x400b9b07,0x6f046000,0xff84f200,0x0080f23a
+ .long 0x54a3fa62,0xf22e6080,0xff542f02,0x43fafac6
+ .long 0xf22e4080,0xff54222e,0xff542401,0x02810000
+ .long 0x003fe981,0xd3c1ec82,0x2202e281,0x94810682
+ .long 0x00003fff,0xf227e00c,0xf2000500,0xf23a54a3
+ .long 0xfa2c2d59,0xff84f23a,0x4923fa2a,0x2d59ff88
+ .long 0x2d59ff8c,0xf2000428,0x3d59ff90,0xf2000828
+ .long 0x3d59ff94,0x426eff96,0x42aeff98,0xf23a4823
+ .long 0xfa14d36e,0xff84d36e,0xff90f200,0x0080f200
+ .long 0x04a3f23a,0x5500fa1e,0xf23a5580,0xfa20f200
+ .long 0x0523f200,0x05a3f23a,0x5522fa1a,0xf23a55a2
+ .long 0xfa1cf200,0x0523f200,0x05a3f23a,0x5522fa16
+ .long 0xf20001a3,0xf2000523,0xf2000c22,0xf2000822
+ .long 0xf21fd030,0xf22e4823,0xff84f22e,0x4822ff90
+ .long 0xf22e4822,0xff84f200,0x90003d42,0xff84241f
+ .long 0x2d7c8000,0x0000ff88,0x42aeff8c,0x123c0000
+ .long 0xf22e4823,0xff8460ff,0x00000996,0xf2009000
+ .long 0xf23c4400,0x3f800000,0x22100081,0x00800001
+ .long 0xf2014422,0x60ff0000,0x098e2f01,0xe8082200
+ .long 0x02410003,0x0240000c,0x48403001,0x221f4a01
+ .long 0x671e0c01,0x000a6f12,0x0c01000e,0x6f3c0c01
+ .long 0x002f6f06,0x0c01003f,0x6f6260ff,0x00000baa
+ .long 0x4a00660c,0x41fb0170,0x000000d6,0x60000086
+ .long 0x0c000003,0x670a41fb,0x01700000,0x00d06074
+ .long 0x41fb0170,0x000000d2,0x606a0401,0x000b4a00
+ .long 0x661041fb,0x01700000,0x00cc0c01,0x00026f54
+ .long 0x605a0c00,0x0003670a,0x41fb0170,0x000000f2
+ .long 0x60e841fb,0x01700000,0x012460de,0x04010030
+ .long 0x4a006616,0x41fb0170,0x0000014e,0x0c010001
+ .long 0x6f220c01,0x00076f24,0x601a0c00,0x0003670a
+ .long 0x41fb0170,0x000001f2,0x60e241fb,0x01700000
+ .long 0x02a860d8,0x00ae0000,0x0208ff64,0xc2fc000c
+ .long 0x48404a00,0x6608f230,0xd0801000,0x4e754840
+ .long 0x3d701000,0xff902d70,0x1004ff94,0x2d701008
+ .long 0xff982200,0x428041ee,0xff904268,0x000261ff
+ .long 0x000062c6,0xf210d080,0x4e7551fc,0x40000000
+ .long 0xc90fdaa2,0x2168c235,0x40000000,0xc90fdaa2
+ .long 0x2168c234,0x40000000,0xc90fdaa2,0x2168c235
+ .long 0x3ffd0000,0x9a209a84,0xfbcff798,0x40000000
+ .long 0xadf85458,0xa2bb4a9a,0x3fff0000,0xb8aa3b29
+ .long 0x5c17f0bc,0x3ffd0000,0xde5bd8a9,0x37287195
+ .long 0x00000000,0x00000000,0x00000000,0x3ffd0000
+ .long 0x9a209a84,0xfbcff798,0x40000000,0xadf85458
+ .long 0xa2bb4a9a,0x3fff0000,0xb8aa3b29,0x5c17f0bb
+ .long 0x3ffd0000,0xde5bd8a9,0x37287195,0x00000000
+ .long 0x00000000,0x00000000,0x3ffd0000,0x9a209a84
+ .long 0xfbcff799,0x40000000,0xadf85458,0xa2bb4a9b
+ .long 0x3fff0000,0xb8aa3b29,0x5c17f0bc,0x3ffd0000
+ .long 0xde5bd8a9,0x37287195,0x00000000,0x00000000
+ .long 0x00000000,0x3ffe0000,0xb17217f7,0xd1cf79ac
+ .long 0x40000000,0x935d8ddd,0xaaa8ac17,0x3fff0000
+ .long 0x80000000,0x00000000,0x40020000,0xa0000000
+ .long 0x00000000,0x40050000,0xc8000000,0x00000000
+ .long 0x400c0000,0x9c400000,0x00000000,0x40190000
+ .long 0xbebc2000,0x00000000,0x40340000,0x8e1bc9bf
+ .long 0x04000000,0x40690000,0x9dc5ada8,0x2b70b59e
+ .long 0x40d30000,0xc2781f49,0xffcfa6d5,0x41a80000
+ .long 0x93ba47c9,0x80e98ce0,0x43510000,0xaa7eebfb
+ .long 0x9df9de8e,0x46a30000,0xe319a0ae,0xa60e91c7
+ .long 0x4d480000,0xc9767586,0x81750c17,0x5a920000
+ .long 0x9e8b3b5d,0xc53d5de5,0x75250000,0xc4605202
+ .long 0x8a20979b,0x3ffe0000,0xb17217f7,0xd1cf79ab
+ .long 0x40000000,0x935d8ddd,0xaaa8ac16,0x3fff0000
+ .long 0x80000000,0x00000000,0x40020000,0xa0000000
+ .long 0x00000000,0x40050000,0xc8000000,0x00000000
+ .long 0x400c0000,0x9c400000,0x00000000,0x40190000
+ .long 0xbebc2000,0x00000000,0x40340000,0x8e1bc9bf
+ .long 0x04000000,0x40690000,0x9dc5ada8,0x2b70b59d
+ .long 0x40d30000,0xc2781f49,0xffcfa6d5,0x41a80000
+ .long 0x93ba47c9,0x80e98cdf,0x43510000,0xaa7eebfb
+ .long 0x9df9de8d,0x46a30000,0xe319a0ae,0xa60e91c6
+ .long 0x4d480000,0xc9767586,0x81750c17,0x5a920000
+ .long 0x9e8b3b5d,0xc53d5de4,0x75250000,0xc4605202
+ .long 0x8a20979a,0x3ffe0000,0xb17217f7,0xd1cf79ac
+ .long 0x40000000,0x935d8ddd,0xaaa8ac17,0x3fff0000
+ .long 0x80000000,0x00000000,0x40020000,0xa0000000
+ .long 0x00000000,0x40050000,0xc8000000,0x00000000
+ .long 0x400c0000,0x9c400000,0x00000000,0x40190000
+ .long 0xbebc2000,0x00000000,0x40340000,0x8e1bc9bf
+ .long 0x04000000,0x40690000,0x9dc5ada8,0x2b70b59e
+ .long 0x40d30000,0xc2781f49,0xffcfa6d6,0x41a80000
+ .long 0x93ba47c9,0x80e98ce0,0x43510000,0xaa7eebfb
+ .long 0x9df9de8e,0x46a30000,0xe319a0ae,0xa60e91c7
+ .long 0x4d480000,0xc9767586,0x81750c18,0x5a920000
+ .long 0x9e8b3b5d,0xc53d5de5,0x75250000,0xc4605202
+ .long 0x8a20979b,0x2f003229,0x00005bee,0xff540281
+ .long 0x00007fff,0x30280000,0x02407fff,0x0c403fff
+ .long 0x6d0000c0,0x0c40400c,0x6e0000a4,0xf2284803
+ .long 0x0000f200,0x6000f23c,0x88000000,0x00004a29
+ .long 0x00046b5e,0x2f003d69,0x0000ff84,0x2d690004
+ .long 0xff882d69,0x0008ff8c,0x41eeff84,0x61ff0000
+ .long 0x60ba4480,0xd09ff22e,0xd080ff84,0x0c40c001
+ .long 0x6c36f21f,0x9000223c,0x80000000,0x0480ffff
+ .long 0xc0014480,0x0c000020,0x6c0ae0a9,0x42a72f01
+ .long 0x42a76028,0x04000020,0xe0a92f01,0x42a742a7
+ .long 0x601af229,0xd0800000,0xf21f9000,0x06403fff
+ .long 0x484042a7,0x2f3c8000,0x00002f00,0xf200b000
+ .long 0x123c0000,0xf21f4823,0x60ff0000,0x04ce201f
+ .long 0xc1494a29,0x00006bff,0x0000038c,0x60ff0000
+ .long 0x03c44a29,0x00046a16,0x201ff200,0x9000123c
+ .long 0x0003f229,0x48000000,0x60ff0000,0x049e201f
+ .long 0x204960ff,0x000002e2,0x00010000,0x80000000
+ .long 0x00000000,0x00000000,0x422eff65,0x2f00422e
+ .long 0xff5c600c,0x422eff65,0x2f001d7c,0x0001ff5c
+ .long 0x48e73f00,0x36280000,0x3d43ff58,0x02830000
+ .long 0x7fff2828,0x00042a28,0x00084a83,0x663c263c
+ .long 0x00003ffe,0x4a846616,0x28054285,0x04830000
+ .long 0x00204286,0xedc46000,0xedac9686,0x60224286
+ .long 0xedc46000,0x9686edac,0x2e05edad,0x44860686
+ .long 0x00000020,0xecaf8887,0x60060683,0x00003ffe
+ .long 0x30290000,0x3d40ff5a,0x322eff58,0xb1810281
+ .long 0x00008000,0x3d41ff5e,0x02800000,0x7fff2229
+ .long 0x00042429,0x00084a80,0x663c203c,0x00003ffe
+ .long 0x4a816616,0x22024282,0x04800000,0x00204286
+ .long 0xedc16000,0xeda99086,0x60224286,0xedc16000
+ .long 0x9086eda9,0x2e02edaa,0x44860686,0x00000020
+ .long 0xecaf8287,0x60060680,0x00003ffe,0x2d43ff54
+ .long 0x2f009083,0x42864283,0x227c0000,0x00004a80
+ .long 0x6c06201f,0x6000006a,0x588f4a86,0x6e0eb284
+ .long 0x6608b485,0x66046000,0x01366508,0x94859384
+ .long 0x42865283,0x4a80670e,0xd683d482,0xe39155c6
+ .long 0x52895380,0x60d4202e,0xff544a81,0x66162202
+ .long 0x42820480,0x00000020,0x4286edc1,0x6000eda9
+ .long 0x9086601c,0x4286edc1,0x60006b14,0x9086eda9
+ .long 0x2e02edaa,0x44860686,0x00000020,0xecaf8287
+ .long 0x0c800000,0x41fe6c2a,0x3d40ff90,0x2d41ff94
+ .long 0x2d42ff98,0x2c2eff54,0x3d46ff84,0x2d44ff88
+ .long 0x2d45ff8c,0xf22e4800,0xff901d7c,0x0001ff5d
+ .long 0x60362d41,0xff942d42,0xff980480,0x00003ffe
+ .long 0x3d40ff90,0x2c2eff54,0x04860000,0x3ffe2d46
+ .long 0xff54f22e,0x4800ff90,0x3d46ff84,0x2d44ff88
+ .long 0x2d45ff8c,0x422eff5d,0x4a2eff5c,0x67222c2e
+ .long 0xff545386,0xb0866d18,0x6e0eb284,0x6608b485
+ .long 0x66046000,0x007a6508,0xf22e4828,0xff845283
+ .long 0x3c2eff5a,0x6c04f200,0x001a4286,0x3c2eff5e
+ .long 0x7e08eeae,0x02830000,0x007f8686,0x1d43ff65
+ .long 0x4cdf00fc,0x201ff200,0x90004a2e,0xff5d6710
+ .long 0x123c0000,0xf23a4823,0xfdc060ff,0x0000024c
+ .long 0x123c0003,0xf2000000,0x60ff0000,0x023e5283
+ .long 0x0c800000,0x00086c04,0xe1ab6002,0x4283f23c
+ .long 0x44000000,0x0000422e,0xff5d6000,0xff942c03
+ .long 0x02860000,0x00014a86,0x6700ff86,0x52833c2e
+ .long 0xff5a0a86,0x00008000,0x3d46ff5a,0x6000ff72
+ .long 0x7fff0000,0xffffffff,0xffffffff,0x4a280000
+ .long 0x6b12f23c,0x44007f80,0x000000ae,0x02000410
+ .long 0xff644e75,0xf23c4400,0xff800000,0x00ae0a00
+ .long 0x0410ff64,0x4e7500ae,0x01002080,0xff64f23a
+ .long 0xd080ffbe,0x4e7500ae,0x00000800,0xff646008
+ .long 0x00ae0000,0x0a28ff64,0x22482200,0x020100c0
+ .long 0x660e4a28,0x00006a18,0x08ee0003,0xff646010
+ .long 0x2f094a28,0x00005bc1,0x61ff0000,0x0196225f
+ .long 0xf210d080,0x102eff62,0x0200000a,0x66024e75
+ .long 0x3d690000,0xff842d69,0x0004ff88,0x2d690008
+ .long 0xff8c41ee,0xff8461ff,0x00005cd0,0x06800000
+ .long 0x6000026e,0x8000ff84,0x816eff84,0xf22ed040
+ .long 0xff844e75,0x00ae0000,0x0a28ff64,0x4a105bc1
+ .long 0x61ff0000,0x013ef210,0xd080f23c,0x44800000
+ .long 0x00004e75,0x00ae0000,0x0a28ff64,0x51c161ff
+ .long 0x00000120,0xf210d080,0xf23c4480,0x00000000
+ .long 0x4e7500ae,0x00001048,0xff641200,0x020100c0
+ .long 0x675c4a28,0x00046b24,0x3d680000,0xff842d68
+ .long 0x0004ff88,0x2d680008,0xff8c41ee,0xff8448e7
+ .long 0xc08061ff,0x00005c44,0x4cdf0103,0x0c010040
+ .long 0x660e4aa8,0x00086614,0x4a280007,0x660e601e
+ .long 0x22280008,0x02810000,0x07ff6712,0x00ae0000
+ .long 0x0200ff64,0x600800ae,0x00001248,0xff644a28
+ .long 0x00005bc1,0x61ff0000,0x5f261d40,0xff64f210
+ .long 0xd080f23c,0x44800000,0x00004e75,0x00ae0000
+ .long 0x1248ff64,0x51c161ff,0x00005f04,0x1d40ff64
+ .long 0xf210d080,0xf23c4480,0x00000000,0x4e75f327
+ .long 0x4a2f0002,0x6b2edffc,0x0000000c,0xf294000e
+ .long 0xf2810014,0x006e0208,0xff664e75,0x00ae0800
+ .long 0x0208ff64,0x4e751d7c,0x0004ff64,0x006e0208
+ .long 0xff664e75,0x006e0208,0xff6661ff,0x00000bae
+ .long 0xdffc0000,0x000c4e75,0xf3274a2f,0x00026bea
+ .long 0xdffc0000,0x000cf200,0xa80081ae,0xff644e75
+ .long 0x00ae0000,0x0a28ff64,0x02410010,0xe8080200
+ .long 0x000f8001,0x2200e309,0x1d7b000a,0xff6441fb
+ .long 0x16204e75,0x04040400,0x04040400,0x04040400
+ .long 0x00000000,0x0c0c080c,0x0c0c080c,0x0c0c080c
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000001,0x00000000
+ .long 0x3f810000,0x00000000,0x00000000,0x00000000
+ .long 0x3f810000,0x00000000,0x00000000,0x00000000
+ .long 0x3f810000,0x00000000,0x00000000,0x00000000
+ .long 0x3f810000,0x00000100,0x00000000,0x00000000
+ .long 0x3c010000,0x00000000,0x00000000,0x00000000
+ .long 0x3c010000,0x00000000,0x00000000,0x00000000
+ .long 0x3c010000,0x00000000,0x00000000,0x00000000
+ .long 0x3c010000,0x00000000,0x00000800,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x80000000,0x00000000,0x00000000,0x00000000
+ .long 0x80000000,0x00000000,0x00000000,0x00000000
+ .long 0x80000000,0x00000000,0x00000001,0x00000000
+ .long 0x80000000,0x00000000,0x00000000,0x00000000
+ .long 0xbf810000,0x00000000,0x00000000,0x00000000
+ .long 0xbf810000,0x00000000,0x00000000,0x00000000
+ .long 0xbf810000,0x00000100,0x00000000,0x00000000
+ .long 0xbf810000,0x00000000,0x00000000,0x00000000
+ .long 0xbc010000,0x00000000,0x00000000,0x00000000
+ .long 0xbc010000,0x00000000,0x00000000,0x00000000
+ .long 0xbc010000,0x00000000,0x00000800,0x00000000
+ .long 0xbc010000,0x00000000,0x00000000,0x00000000
+ .long 0x4a280000,0x6b10f23c,0x44000000,0x00001d7c
+ .long 0x0004ff64,0x4e75f23c,0x44008000,0x00001d7c
+ .long 0x000cff64,0x4e754a29,0x00006bea,0x60d84a28
+ .long 0x00006b10,0xf23c4400,0x7f800000,0x1d7c0002
+ .long 0xff644e75,0xf23c4400,0xff800000,0x1d7c000a
+ .long 0xff644e75,0x4a290000,0x6bea60d8,0x4a280000
+ .long 0x6ba460d0,0x4a280000,0x6b00fbbc,0x60c64a28
+ .long 0x00006b16,0x60be4a28,0x00006b0e,0xf23c4400
+ .long 0x3f800000,0x422eff64,0x4e75f23c,0x4400bf80
+ .long 0x00001d7c,0x0008ff64,0x4e753fff,0x0000c90f
+ .long 0xdaa22168,0xc235bfff,0x0000c90f,0xdaa22168
+ .long 0xc2354a28,0x00006b0e,0xf2009000,0xf23a4800
+ .long 0xffda6000,0xfcf0f200,0x9000f23a,0x4800ffd8
+ .long 0x6000fcea,0xf23c4480,0x3f800000,0x4a280000
+ .long 0x6a10f23c,0x44008000,0x00001d7c,0x000cff64
+ .long 0x6040f23c,0x44000000,0x00001d7c,0x0004ff64
+ .long 0x6030f23a,0x4880faea,0x61ff0000,0x00286000
+ .long 0xfb16f228,0x48800000,0x61ff0000,0x00186000
+ .long 0x030ef228,0x48800000,0x61ff0000,0x00086000
+ .long 0x02ee102e,0xff430240,0x0007303b,0x02064efb
+ .long 0x00020010,0x00180020,0x0026002c,0x00320038
+ .long 0x003ef22e,0xf040ffdc,0x4e75f22e,0xf040ffe8
+ .long 0x4e75f200,0x05004e75,0xf2000580,0x4e75f200
+ .long 0x06004e75,0xf2000680,0x4e75f200,0x07004e75
+ .long 0xf2000780,0x4e75122e,0xff4f67ff,0xfffff7dc
+ .long 0x0c010001,0x67000096,0x0c010002,0x67ffffff
+ .long 0xfa880c01,0x000467ff,0xfffff7c0,0x0c010005
+ .long 0x67ff0000,0x024060ff,0x0000024a,0x122eff4f
+ .long 0x67ffffff,0xfa640c01,0x000167ff,0xfffffa5a
+ .long 0x0c010002,0x67ffffff,0xfa500c01,0x000467ff
+ .long 0xfffffa46,0x0c010003,0x67ff0000,0x021860ff
+ .long 0x00000202,0x122eff4f,0x67ff0000,0x004e0c01
+ .long 0x000167ff,0x00000028,0x0c010002,0x67ffffff
+ .long 0xfa180c01,0x000467ff,0x00000030,0x0c010003
+ .long 0x67ff0000,0x01e060ff,0x000001ca,0x12280000
+ .long 0x10290000,0xb1010201,0x00801d41,0xff654a00
+ .long 0x6a00fdc4,0x6000fdd0,0x422eff65,0x2f001228
+ .long 0x00001029,0x0000b101,0x02010080,0x1d41ff65
+ .long 0x0c2e0004,0xff4f660c,0x41e90000,0x201f60ff
+ .long 0xfffff9c6,0xf21f9000,0xf2294800,0x00004a29
+ .long 0x00006b02,0x4e751d7c,0x0008ff64,0x4e75122e
+ .long 0xff4f67ff,0xfffff6e0,0x0c010001,0x6700ff8e
+ .long 0x0c010002,0x67ffffff,0xf9800c01,0x000467ff
+ .long 0xfffff6c4,0x0c010003,0x67ff0000,0x014860ff
+ .long 0x00000132,0x122eff4f,0x67ffffff,0xf95c0c01
+ .long 0x000167ff,0xfffff952,0x0c010002,0x67ffffff
+ .long 0xf9480c01,0x000467ff,0xfffff93e,0x0c010003
+ .long 0x67ff0000,0x011060ff,0x000000fa,0x122eff4f
+ .long 0x6700ff46,0x0c010001,0x6700ff22,0x0c010002
+ .long 0x67ffffff,0xf9140c01,0x000467ff,0xffffff2c
+ .long 0x0c010003,0x67ff0000,0x00dc60ff,0x000000c6
+ .long 0x122eff4f,0x67ffffff,0xf51e0c01,0x000167ff
+ .long 0xfffffce6,0x0c010002,0x67ffffff,0xfd0a0c01
+ .long 0x000467ff,0xfffff500,0x0c010003,0x67ff0000
+ .long 0x00a460ff,0x0000008e,0x122eff4f,0x67ffffff
+ .long 0xf4e60c01,0x000167ff,0xfffffcae,0x0c010002
+ .long 0x67ffffff,0xfcd20c01,0x000467ff,0xfffff4c8
+ .long 0x0c010003,0x67ff0000,0x006c60ff,0x00000056
+ .long 0x122eff4f,0x67ffffff,0xf8800c01,0x000367ff
+ .long 0x00000052,0x0c010005,0x67ff0000,0x003860ff
+ .long 0xfffff866,0x122eff4f,0x0c010003,0x67340c01
+ .long 0x0005671e,0x6058122e,0xff4f0c01,0x00036708
+ .long 0x0c010005,0x670c6036,0x00ae0100,0x4080ff64
+ .long 0x6010f229,0x48000000,0xf200a800,0x81aeff64
+ .long 0x4e75f229,0x48000000,0x4a290000,0x6b081d7c
+ .long 0x0001ff64,0x4e751d7c,0x0009ff64,0x4e75f228
+ .long 0x48000000,0xf200a800,0x81aeff64,0x4e75f228
+ .long 0x48000000,0x4a280000,0x6bdc1d7c,0x0001ff64
+ .long 0x4e751d7c,0x0009ff64,0x4e75122e,0xff4e67ff
+ .long 0xffffd936,0x0c010001,0x67ffffff,0xfba60c01
+ .long 0x000267ff,0xfffffbca,0x0c010004,0x67ffffff
+ .long 0xd9f60c01,0x000367ff,0xffffffb6,0x60ffffff
+ .long 0xffa0122e,0xff4e67ff,0xffffe620,0x0c010001
+ .long 0x67ffffff,0xfb6e0c01,0x000267ff,0xfffffbc8
+ .long 0x0c010004,0x67ffffff,0xe7560c01,0x000367ff
+ .long 0xffffff7e,0x60ffffff,0xff68122e,0xff4e67ff
+ .long 0xffffd4d2,0x0c010001,0x67ffffff,0xfb360c01
+ .long 0x000267ff,0xfffffb9a,0x0c010004,0x67ffffff
+ .long 0xd76a0c01,0x000367ff,0xffffff46,0x60ffffff
+ .long 0xff30122e,0xff4e67ff,0xffffd972,0x0c010001
+ .long 0x67ffffff,0xfafe0c01,0x000267ff,0xfffffb6a
+ .long 0x0c010004,0x67ffffff,0xdabc0c01,0x000367ff
+ .long 0xffffff0e,0x60ffffff,0xfef8122e,0xff4e67ff
+ .long 0xffffca6a,0x0c010001,0x67ffffff,0xfac60c01
+ .long 0x000267ff,0xfffffb6e,0x0c010004,0x67ffffff
+ .long 0xcc8a0c01,0x000367ff,0xfffffed6,0x60ffffff
+ .long 0xfec0122e,0xff4e67ff,0xffffcc76,0x0c010001
+ .long 0x67ffffff,0xfa8e0c01,0x000267ff,0xfffff6aa
+ .long 0x0c010004,0x67ffffff,0xcd060c01,0x000367ff
+ .long 0xfffffe9e,0x60ffffff,0xfe88122e,0xff4e67ff
+ .long 0xffffe662,0x0c010001,0x67ffffff,0xfa560c01
+ .long 0x000267ff,0xfffff672,0x0c010004,0x67ffffff
+ .long 0xe6c60c01,0x000367ff,0xfffffe66,0x60ffffff
+ .long 0xfe50122e,0xff4e67ff,0xffffb372,0x0c010001
+ .long 0x67ffffff,0xfa1e0c01,0x000267ff,0xfffff63a
+ .long 0x0c010004,0x67ffffff,0xb5380c01,0x000367ff
+ .long 0xfffffe2e,0x60ffffff,0xfe18122e,0xff4e67ff
+ .long 0xffffbdfc,0x0c010001,0x67ffffff,0xf9e60c01
+ .long 0x000267ff,0xfffff602,0x0c010004,0x67ffffff
+ .long 0xbf420c01,0x000367ff,0xfffffdf6,0x60ffffff
+ .long 0xfde0122e,0xff4e67ff,0xffffd17a,0x0c010001
+ .long 0x67ffffff,0xfa2a0c01,0x000267ff,0xfffffa00
+ .long 0x0c010004,0x67ffffff,0xd3080c01,0x000367ff
+ .long 0xfffffdbe,0x60ffffff,0xfda8122e,0xff4e67ff
+ .long 0xffffeb64,0x0c010001,0x67ffffff,0xf9f20c01
+ .long 0x000267ff,0xfffff9c8,0x0c010004,0x67ffffff
+ .long 0xec200c01,0x000367ff,0xfffffd86,0x60ffffff
+ .long 0xfd70122e,0xff4e67ff,0xffffec24,0x0c010001
+ .long 0x67ffffff,0xf9ba0c01,0x000267ff,0xfffff990
+ .long 0x0c010004,0x67ffffff,0xed360c01,0x000367ff
+ .long 0xfffffd4e,0x60ffffff,0xfd38122e,0xff4e67ff
+ .long 0xffffe178,0x0c010001,0x67ffffff,0xf51a0c01
+ .long 0x000267ff,0xfffff960,0x0c010004,0x67ffffff
+ .long 0xe30c0c01,0x000367ff,0xfffffd16,0x60ffffff
+ .long 0xfd00122e,0xff4e67ff,0xffffe582,0x0c010001
+ .long 0x67ffffff,0xf4e20c01,0x000267ff,0xfffff928
+ .long 0x0c010004,0x67ffffff,0xe5940c01,0x000367ff
+ .long 0xfffffcde,0x60ffffff,0xfcc8122e,0xff4e67ff
+ .long 0xffffe59a,0x0c010001,0x67ffffff,0xf4aa0c01
+ .long 0x000267ff,0xfffff8f0,0x0c010004,0x67ffffff
+ .long 0xe5d60c01,0x000367ff,0xfffffca6,0x60ffffff
+ .long 0xfc90122e,0xff4e67ff,0xffffd530,0x0c010001
+ .long 0x67ffffff,0xf8da0c01,0x000267ff,0xfffff888
+ .long 0x0c010004,0x67ffffff,0xd5b60c01,0x000367ff
+ .long 0xfffffc6e,0x60ffffff,0xfc58122e,0xff4e67ff
+ .long 0xffffcac2,0x0c010001,0x67ffffff,0xf8de0c01
+ .long 0x000267ff,0xfffff442,0x0c010004,0x67ffffff
+ .long 0xcb340c01,0x000367ff,0xfffffc36,0x60ffffff
+ .long 0xfc20122e,0xff4e67ff,0xffffb14c,0x0c010001
+ .long 0x67ffffff,0xf86a0c01,0x000267ff,0xfffff40a
+ .long 0x0c010004,0x67ffffff,0xb30e0c01,0x000367ff
+ .long 0xfffffbfe,0x60ffffff,0xfbe8122e,0xff4e67ff
+ .long 0xffffd40e,0x0c010001,0x67ffffff,0xf7b60c01
+ .long 0x000267ff,0xfffff3d2,0x0c010004,0x67ffffff
+ .long 0xd40c0c01,0x000367ff,0xfffffbc6,0x60ffffff
+ .long 0xfbb0122e,0xff4e67ff,0xffffd40a,0x0c010001
+ .long 0x67ffffff,0xf77e0c01,0x000267ff,0xfffff39a
+ .long 0x0c010004,0x67ffffff,0xd41a0c01,0x000367ff
+ .long 0xfffffb8e,0x60ffffff,0xfb78122e,0xff4e67ff
+ .long 0xffffb292,0x0c010001,0x67ffffff,0xf81a0c01
+ .long 0x000267ff,0xfffff83e,0x0c010004,0x67ffffff
+ .long 0xb50a0c01,0x000367ff,0xfffff83a,0x60ffffff
+ .long 0xf844122e,0xff4e67ff,0xfffff89e,0x0c010001
+ .long 0x67ffffff,0xf8ca0c01,0x000267ff,0xfffff8f8
+ .long 0x0c010004,0x67ffffff,0xf8800c01,0x000367ff
+ .long 0xfffffab4,0x60ffffff,0xfac0122e,0xff4e67ff
+ .long 0xfffff96e,0x0c010001,0x67ffffff,0xf99a0c01
+ .long 0x000267ff,0xfffff9c8,0x0c010004,0x67ffffff
+ .long 0xf9500c01,0x000367ff,0xfffffa7c,0x60ffffff
+ .long 0xfa88122e,0xff4e67ff,0xfffff9d8,0x0c010001
+ .long 0x67ffffff,0xfa060c01,0x000267ff,0xfffffa34
+ .long 0x0c010004,0x67ffffff,0xf9ba0c01,0x000367ff
+ .long 0xfffffa44,0x60ffffff,0xfa500c2f,0x00070003
+ .long 0x673e1d7c,0x0000ff4e,0x1d7c0000,0xff4ff22e
+ .long 0xf080ff78,0x41ef0004,0x43eeff78,0x0c010003
+ .long 0x67160c01,0x00026708,0x61ff0000,0x02004e75
+ .long 0x61ff0000,0x1b9e4e75,0x61ff0000,0x05e44e75
+ .long 0x1d7c0004,0xff4e60c0,0x4afc006d,0x000005d2
+ .long 0x00000fc8,0xfffffa6e,0x0000106c,0x00002314
+ .long 0x00000000,0xfffffaa6,0x00000000,0xfffffade
+ .long 0xfffffb16,0xfffffb4e,0x00000000,0xfffffb86
+ .long 0xfffffbbe,0xfffffbf6,0xfffffc2e,0xfffffc66
+ .long 0xfffffc9e,0xfffffcd6,0x00000000,0xfffffd0e
+ .long 0xfffffd46,0xfffffd7e,0x00000000,0x00001112
+ .long 0xfffffdb6,0x00000ca8,0x00000000,0xfffffdee
+ .long 0xfffffe26,0xfffffe5e,0xfffffe96,0x0000089e
+ .long 0xffffff06,0x00001b84,0x000001de,0x00001854
+ .long 0xffffff3e,0xffffff76,0x00001512,0x00001f4c
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0xfffffece
+ .long 0xfffffece,0xfffffece,0xfffffece,0xfffffece
+ .long 0xfffffece,0xfffffece,0xfffffece,0x000013b0
+ .long 0x00000000,0x00000f56,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x000005c0
+ .long 0x00002302,0x00000000,0x00000000,0x000005ca
+ .long 0x0000230c,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00001100
+ .long 0x00000000,0x00000c96,0x00000000,0x0000110a
+ .long 0x00000000,0x00000ca0,0x00000000,0x0000088c
+ .long 0x00000000,0x00001b72,0x000001cc,0x00000896
+ .long 0x00000000,0x00001b7c,0x000001d6,0x00001f3a
+ .long 0x00000000,0x00000000,0x00000000,0x00001f44
+ .long 0xffffc001,0xffffff81,0xfffffc01,0x00004000
+ .long 0x0000007f,0x000003ff,0x02000030,0x00000040
+ .long 0x60080200,0x00300000,0x00802d40,0xff5c4241
+ .long 0x122eff4f,0xe709822e,0xff4e6600,0x02e43d69
+ .long 0x0000ff90,0x2d690004,0xff942d69,0x0008ff98
+ .long 0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+ .long 0xff8c61ff,0x000024ce,0x2f0061ff,0x00002572
+ .long 0xd197322e,0xff5eec09,0x201fb0bb,0x14846700
+ .long 0x011e6d00,0x0062b0bb,0x14846700,0x021a6e00
+ .long 0x014af22e,0xd080ff90,0xf22e9000,0xff5cf23c
+ .long 0x88000000,0x0000f22e,0x4823ff84,0xf201a800
+ .long 0xf23c9000,0x00000000,0x83aeff64,0xf22ef080
+ .long 0xff842f02,0x322eff84,0x24010281,0x00007fff
+ .long 0x02428000,0x92808242,0x3d41ff84,0x241ff22e
+ .long 0xd080ff84,0x4e75f22e,0xd080ff90,0xf22e9000
+ .long 0xff5cf23c,0x88000000,0x0000f22e,0x4823ff84
+ .long 0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+ .long 0x00ae0000,0x1048ff64,0x122eff62,0x02010013
+ .long 0x661c082e,0x0003ff64,0x56c1202e,0xff5c61ff
+ .long 0x00004fcc,0x812eff64,0xf210d080,0x4e75222e
+ .long 0xff5c0201,0x00c06634,0xf22ef080,0xff842f02
+ .long 0x322eff84,0x34010281,0x00007fff,0x92800481
+ .long 0x00006000,0x02417fff,0x02428000,0x82423d41
+ .long 0xff84241f,0xf22ed040,0xff8460a6,0xf22ed080
+ .long 0xff90222e,0xff5c0201,0x0030f201,0x9000f22e
+ .long 0x4823ff84,0xf23c9000,0x00000000,0x60aaf22e
+ .long 0xd080ff90,0xf22e9000,0xff5cf23c,0x88000000
+ .long 0x0000f22e,0x4823ff84,0xf201a800,0xf23c9000
+ .long 0x00000000,0x83aeff64,0xf2000098,0xf23c58b8
+ .long 0x0002f293,0xff3c6000,0xfee408ee,0x0003ff66
+ .long 0xf22ed080,0xff90f23c,0x90000000,0x0010f23c
+ .long 0x88000000,0x0000f22e,0x4823ff84,0xf201a800
+ .long 0xf23c9000,0x00000000,0x83aeff64,0x122eff62
+ .long 0x0201000b,0x6620f22e,0xf080ff84,0x41eeff84
+ .long 0x222eff5c,0x61ff0000,0x4dd8812e,0xff64f22e
+ .long 0xd080ff84,0x4e75f22e,0xd040ff90,0x222eff5c
+ .long 0x020100c0,0x6652f22e,0x9000ff5c,0xf23c8800
+ .long 0x00000000,0xf22e48a3,0xff84f23c,0x90000000
+ .long 0x0000f22e,0xf040ff84,0x2f02322e,0xff842401
+ .long 0x02810000,0x7fff0242,0x80009280,0x06810000
+ .long 0x60000241,0x7fff8242,0x3d41ff84,0x241ff22e
+ .long 0xd040ff84,0x6000ff80,0x222eff5c,0x02010030
+ .long 0xf2019000,0x60a6f22e,0xd080ff90,0xf22e9000
+ .long 0xff5cf23c,0x88000000,0x0000f22e,0x4823ff84
+ .long 0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+ .long 0xf2000098,0xf23c58b8,0x0002f292,0xfde0f294
+ .long 0xfefaf22e,0xd040ff90,0x222eff5c,0x020100c0
+ .long 0x00010010,0xf2019000,0xf23c8800,0x00000000
+ .long 0xf22e48a3,0xff84f23c,0x90000000,0x0000f200
+ .long 0x0498f23c,0x58b80002,0xf293fda2,0x6000febc
+ .long 0x323b120a,0x4efb1006,0x4afc0030,0xfd120072
+ .long 0x00cc006c,0xfd120066,0x00000000,0x00720072
+ .long 0x0060006c,0x00720066,0x00000000,0x009e0060
+ .long 0x009e006c,0x009e0066,0x00000000,0x006c006c
+ .long 0x006c006c,0x006c0066,0x00000000,0xfd120072
+ .long 0x00cc006c,0xfd120066,0x00000000,0x00660066
+ .long 0x00660066,0x00660066,0x00000000,0x60ff0000
+ .long 0x230e60ff,0x00002284,0x60ff0000,0x227e1028
+ .long 0x00001229,0x0000b101,0x6a10f23c,0x44008000
+ .long 0x00001d7c,0x000cff64,0x4e75f23c,0x44000000
+ .long 0x00001d7c,0x0004ff64,0x4e75f229,0xd0800000
+ .long 0x10280000,0x12290000,0xb1016a10,0xf2000018
+ .long 0xf200001a,0x1d7c000a,0xff644e75,0xf2000018
+ .long 0x1d7c0002,0xff644e75,0xf228d080,0x00001028
+ .long 0x00001229,0x0000b101,0x6ae260d0,0x02000030
+ .long 0x00000040,0x60080200,0x00300000,0x00802d40
+ .long 0xff5c122e,0xff4e6600,0x02620200,0x00c06600
+ .long 0x007c4a28,0x00006a06,0x08ee0003,0xff64f228
+ .long 0xd0800000,0x4e750200,0x00c06600,0x006008ee
+ .long 0x0003ff66,0x4a280000,0x6a0608ee,0x0003ff64
+ .long 0xf228d080,0x0000082e,0x0003ff62,0x66024e75
+ .long 0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+ .long 0xff8c41ee,0xff8461ff,0x00004950,0x44400640
+ .long 0x6000322e,0xff840241,0x80000240,0x7fff8041
+ .long 0x3d40ff84,0xf22ed040,0xff844e75,0x0c000040
+ .long 0x667e3d68,0x0000ff84,0x2d680004,0xff882d68
+ .long 0x0008ff8c,0x61ff0000,0x206c0c80,0x0000007f
+ .long 0x6c000092,0x0c80ffff,0xff816700,0x01786d00
+ .long 0x00f4f23c,0x88000000,0x0000f22e,0x9000ff5c
+ .long 0xf22e4800,0xff84f201,0xa800f23c,0x90000000
+ .long 0x000083ae,0xff642f02,0xf22ef080,0xff84322e
+ .long 0xff843401,0x02810000,0x7fff9280,0x02428000
+ .long 0x84413d42,0xff84241f,0xf22ed080,0xff844e75
+ .long 0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+ .long 0xff8c61ff,0x00001fee,0x0c800000,0x03ff6c00
+ .long 0x00140c80,0xfffffc01,0x670000fa,0x6d000076
+ .long 0x6000ff80,0x08ee0003,0xff664a2e,0xff846a06
+ .long 0x08ee0003,0xff64122e,0xff620201,0x000b661a
+ .long 0x41eeff84,0x222eff5c,0x61ff0000,0x4a74812e
+ .long 0xff64f22e,0xd080ff84,0x4e752d6e,0xff88ff94
+ .long 0x2d6eff8c,0xff98322e,0xff842f02,0x34010281
+ .long 0x00007fff,0x92800242,0x80000681,0x00006000
+ .long 0x02417fff,0x84413d42,0xff90f22e,0xd040ff90
+ .long 0x241f60ac,0xf23c8800,0x00000000,0xf22e9000
+ .long 0xff5cf22e,0x4800ff84,0xf23c9000,0x00000000
+ .long 0xf201a800,0x83aeff64,0x00ae0000,0x1048ff64
+ .long 0x122eff62,0x02010013,0x661c082e,0x0003ff64
+ .long 0x56c1202e,0xff5c61ff,0x00004ae4,0x812eff64
+ .long 0xf210d080,0x4e752f02,0x322eff84,0x24010281
+ .long 0x00007fff,0x02428000,0x92800481,0x00006000
+ .long 0x02417fff,0x82423d41,0xff84241f,0xf22ed040
+ .long 0xff8460b6,0xf23c8800,0x00000000,0xf22e9000
+ .long 0xff5cf22e,0x4800ff84,0xf201a800,0xf23c9000
+ .long 0x00000000,0x83aeff64,0xf2000098,0xf23c58b8
+ .long 0x0002f293,0xff746000,0xfe7e0c01,0x00046700
+ .long 0xfdb60c01,0x000567ff,0x00001f98,0x0c010003
+ .long 0x67ff0000,0x1fa2f228,0x48000000,0xf200a800
+ .long 0xe1981d40,0xff644e75,0x51fc51fc,0x51fc51fc
+ .long 0x00003fff,0x0000007e,0x000003fe,0xffffc001
+ .long 0xffffff81,0xfffffc01,0x02000030,0x00000040
+ .long 0x60080200,0x00300000,0x00802d40,0xff5c4241
+ .long 0x122eff4f,0xe709822e,0xff4e6600,0x02d63d69
+ .long 0x0000ff90,0x2d690004,0xff942d69,0x0008ff98
+ .long 0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+ .long 0xff8c61ff,0x00001e0e,0x2f0061ff,0x00001eb2
+ .long 0x4497d197,0x322eff5e,0xec09201f,0xb0bb148e
+ .long 0x6f000074,0xb0bb1520,0xff7a6700,0x020c6e00
+ .long 0x013cf22e,0xd080ff90,0xf22e9000,0xff5cf23c
+ .long 0x88000000,0x0000f22e,0x4820ff84,0xf201a800
+ .long 0xf23c9000,0x00000000,0x83aeff64,0xf22ef080
+ .long 0xff842f02,0x322eff84,0x24010281,0x00007fff
+ .long 0x02428000,0x92808242,0x3d41ff84,0x241ff22e
+ .long 0xd080ff84,0x4e750000,0x7fff0000,0x407f0000
+ .long 0x43ff201f,0x60c62f00,0xf22ed080,0xff90f22e
+ .long 0x9000ff5c,0xf23c8800,0x00000000,0xf22e4820
+ .long 0xff84f200,0xa800f23c,0x90000000,0x000081ae
+ .long 0xff64f227,0xe0013017,0xdffc0000,0x000c0280
+ .long 0x00007fff,0x9097b0bb,0x14ae6db6,0x201f00ae
+ .long 0x00001048,0xff64122e,0xff620201,0x0013661c
+ .long 0x082e0003,0xff6456c1,0x202eff5c,0x61ff0000
+ .long 0x48de812e,0xff64f210,0xd0804e75,0x222eff5c
+ .long 0x020100c0,0x6634f22e,0xf080ff84,0x2f02322e
+ .long 0xff843401,0x02810000,0x7fff9280,0x04810000
+ .long 0x60000241,0x7fff0242,0x80008242,0x3d41ff84
+ .long 0x241ff22e,0xd040ff84,0x60a6f22e,0xd080ff90
+ .long 0x222eff5c,0x02010030,0xf2019000,0xf22e4820
+ .long 0xff84f23c,0x90000000,0x000060aa,0x08ee0003
+ .long 0xff66f22e,0xd080ff90,0xf23c9000,0x00000010
+ .long 0xf23c8800,0x00000000,0xf22e4820,0xff84f201
+ .long 0xa800f23c,0x90000000,0x000083ae,0xff64122e
+ .long 0xff620201,0x000b6620,0xf22ef080,0xff8441ee
+ .long 0xff84222e,0xff5c61ff,0x00004726,0x812eff64
+ .long 0xf22ed080,0xff844e75,0xf22ed040,0xff90222e
+ .long 0xff5c0201,0x00c06652,0xf22e9000,0xff5cf23c
+ .long 0x88000000,0x0000f22e,0x48a0ff84,0xf23c9000
+ .long 0x00000000,0xf22ef040,0xff842f02,0x322eff84
+ .long 0x24010281,0x00007fff,0x02428000,0x92800681
+ .long 0x00006000,0x02417fff,0x82423d41,0xff84241f
+ .long 0xf22ed040,0xff846000,0xff80222e,0xff5c0201
+ .long 0x0030f201,0x900060a6,0xf22ed080,0xff90f22e
+ .long 0x9000ff5c,0xf23c8800,0x00000000,0xf22e4820
+ .long 0xff84f201,0xa800f23c,0x90000000,0x000083ae
+ .long 0xff64f200,0x0098f23c,0x58b80001,0xf292fdee
+ .long 0xf294fefa,0xf22ed040,0xff90222e,0xff5c0201
+ .long 0x00c00001,0x0010f201,0x9000f23c,0x88000000
+ .long 0x0000f22e,0x48a0ff84,0xf23c9000,0x00000000
+ .long 0xf2000498,0xf23c58b8,0x0001f293,0xfdb06000
+ .long 0xfebc323b,0x120a4efb,0x10064afc,0x0030fd20
+ .long 0x009e0072,0x0060fd20,0x00660000,0x00000072
+ .long 0x006c0072,0x00600072,0x00660000,0x000000d0
+ .long 0x00d0006c,0x006000d0,0x00660000,0x00000060
+ .long 0x00600060,0x00600060,0x00660000,0x0000fd20
+ .long 0x009e0072,0x0060fd20,0x00660000,0x00000066
+ .long 0x00660066,0x00660066,0x00660000,0x000060ff
+ .long 0x00001bd8,0x60ff0000,0x1bd260ff,0x00001c50
+ .long 0x10280000,0x12290000,0xb1016a10,0xf23c4400
+ .long 0x80000000,0x1d7c000c,0xff644e75,0xf23c4400
+ .long 0x00000000,0x1d7c0004,0xff644e75,0x006e0410
+ .long 0xff661028,0x00001229,0x0000b101,0x6a10f23c
+ .long 0x4400ff80,0x00001d7c,0x000aff64,0x4e75f23c
+ .long 0x44007f80,0x00001d7c,0x0002ff64,0x4e751029
+ .long 0x00001228,0x0000b101,0x6a16f229,0xd0800000
+ .long 0xf2000018,0xf200001a,0x1d7c000a,0xff644e75
+ .long 0xf229d080,0x0000f200,0x00181d7c,0x0002ff64
+ .long 0x4e750200,0x00300000,0x00406008,0x02000030
+ .long 0x00000080,0x2d40ff5c,0x122eff4e,0x66000276
+ .long 0x020000c0,0x66000090,0x2d680004,0xff882d68
+ .long 0x0008ff8c,0x30280000,0x0a408000,0x6a061d7c
+ .long 0x0008ff64,0x3d40ff84,0xf22ed080,0xff844e75
+ .long 0x020000c0,0x666008ee,0x0003ff66,0x2d680004
+ .long 0xff882d68,0x0008ff8c,0x30280000,0x0a408000
+ .long 0x6a061d7c,0x0008ff64,0x3d40ff84,0xf22ed080
+ .long 0xff84082e,0x0003ff62,0x66024e75,0x41eeff84
+ .long 0x61ff0000,0x42664440,0x06406000,0x322eff84
+ .long 0x02418000,0x02407fff,0x80413d40,0xff84f22e
+ .long 0xd040ff84,0x4e750c00,0x0040667e,0x3d680000
+ .long 0xff842d68,0x0004ff88,0x2d680008,0xff8c61ff
+ .long 0x00001982,0x0c800000,0x007f6c00,0x00900c80
+ .long 0xffffff81,0x67000178,0x6d0000f4,0xf23c8800
+ .long 0x00000000,0xf22e9000,0xff5cf22e,0x481aff84
+ .long 0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+ .long 0x2f02f22e,0xf080ff84,0x322eff84,0x34010281
+ .long 0x00007fff,0x92800242,0x80008441,0x3d42ff84
+ .long 0x241ff22e,0xd080ff84,0x4e753d68,0x0000ff84
+ .long 0x2d680004,0xff882d68,0x0008ff8c,0x61ff0000
+ .long 0x19040c80,0x000003ff,0x6c120c80,0xfffffc01
+ .long 0x670000fc,0x6d000078,0x6000ff82,0x08ee0003
+ .long 0xff660a2e,0x0080ff84,0x6a0608ee,0x0003ff64
+ .long 0x122eff62,0x0201000b,0x661a41ee,0xff84222e
+ .long 0xff5c61ff,0x0000438a,0x812eff64,0xf22ed080
+ .long 0xff844e75,0x2d6eff88,0xff942d6e,0xff8cff98
+ .long 0x322eff84,0x2f022401,0x02810000,0x7fff0242
+ .long 0x80009280,0x06810000,0x60000241,0x7fff8242
+ .long 0x3d41ff90,0xf22ed040,0xff90241f,0x60acf23c
+ .long 0x88000000,0x0000f22e,0x9000ff5c,0xf22e481a
+ .long 0xff84f23c,0x90000000,0x0000f201,0xa80083ae
+ .long 0xff6400ae,0x00001048,0xff64122e,0xff620201
+ .long 0x0013661c,0x082e0003,0xff6456c1,0x202eff5c
+ .long 0x61ff0000,0x43fa812e,0xff64f210,0xd0804e75
+ .long 0x2f02322e,0xff842401,0x02810000,0x7fff0242
+ .long 0x80009280,0x04810000,0x60000241,0x7fff8242
+ .long 0x3d41ff84,0xf22ed040,0xff84241f,0x60b6f23c
+ .long 0x88000000,0x0000f22e,0x9000ff5c,0xf22e481a
+ .long 0xff84f201,0xa800f23c,0x90000000,0x000083ae
+ .long 0xff64f200,0x0098f23c,0x58b80002,0xf293ff74
+ .long 0x6000fe7e,0x0c010004,0x6700fdb6,0x0c010005
+ .long 0x67ff0000,0x18ae0c01,0x000367ff,0x000018b8
+ .long 0xf228481a,0x0000f200,0xa800e198,0x1d40ff64
+ .long 0x4e75122e,0xff4e6610,0x4a280000,0x6b024e75
+ .long 0x1d7c0008,0xff644e75,0x0c010001,0x67400c01
+ .long 0x00026724,0x0c010005,0x67ff0000,0x18660c01
+ .long 0x000367ff,0x00001870,0x4a280000,0x6b024e75
+ .long 0x1d7c0008,0xff644e75,0x4a280000,0x6b081d7c
+ .long 0x0002ff64,0x4e751d7c,0x000aff64,0x4e754a28
+ .long 0x00006b08,0x1d7c0004,0xff644e75,0x1d7c000c
+ .long 0xff644e75,0x122eff4e,0x66280200,0x0030f200
+ .long 0x9000f23c,0x88000000,0x0000f228,0x48010000
+ .long 0xf23c9000,0x00000000,0xf200a800,0x81aeff64
+ .long 0x4e750c01,0x0001672e,0x0c010002,0x674e0c01
+ .long 0x00046710,0x0c010005,0x67ff0000,0x17d660ff
+ .long 0x000017e4,0x3d680000,0xff841d7c,0x0080ff88
+ .long 0x41eeff84,0x60a44a28,0x00006b10,0xf23c4400
+ .long 0x00000000,0x1d7c0004,0xff644e75,0xf23c4400
+ .long 0x80000000,0x1d7c000c,0xff644e75,0xf228d080
+ .long 0x00004a28,0x00006b08,0x1d7c0002,0xff644e75
+ .long 0x1d7c000a,0xff644e75,0x122eff4e,0x6618f23c
+ .long 0x88000000,0x0000f228,0x48030000,0xf200a800
+ .long 0x81aeff64,0x4e750c01,0x0001672e,0x0c010002
+ .long 0x674e0c01,0x00046710,0x0c010005,0x67ff0000
+ .long 0x174260ff,0x00001750,0x3d680000,0xff841d7c
+ .long 0x0080ff88,0x41eeff84,0x60b44a28,0x00006b10
+ .long 0xf23c4400,0x00000000,0x1d7c0004,0xff644e75
+ .long 0xf23c4400,0x80000000,0x1d7c000c,0xff644e75
+ .long 0xf228d080,0x00004a28,0x00006b08,0x1d7c0002
+ .long 0xff644e75,0x1d7c000a,0xff644e75,0x02000030
+ .long 0x00000040,0x60080200,0x00300000,0x00802d40
+ .long 0xff5c122e,0xff4e6600,0x025c0200,0x00c0667e
+ .long 0x2d680004,0xff882d68,0x0008ff8c,0x32280000
+ .long 0x0881000f,0x3d41ff84,0xf22ed080,0xff844e75
+ .long 0x020000c0,0x665808ee,0x0003ff66,0x2d680004
+ .long 0xff882d68,0x0008ff8c,0x30280000,0x0880000f
+ .long 0x3d40ff84,0xf22ed080,0xff84082e,0x0003ff62
+ .long 0x66024e75,0x41eeff84,0x61ff0000,0x3e0e4440
+ .long 0x06406000,0x322eff84,0x02418000,0x02407fff
+ .long 0x80413d40,0xff84f22e,0xd040ff84,0x4e750c00
+ .long 0x0040667e,0x3d680000,0xff842d68,0x0004ff88
+ .long 0x2d680008,0xff8c61ff,0x0000152a,0x0c800000
+ .long 0x007f6c00,0x00900c80,0xffffff81,0x67000170
+ .long 0x6d0000ec,0xf23c8800,0x00000000,0xf22e9000
+ .long 0xff5cf22e,0x4818ff84,0xf201a800,0xf23c9000
+ .long 0x00000000,0x83aeff64,0x2f02f22e,0xf080ff84
+ .long 0x322eff84,0x24010281,0x00007fff,0x92800242
+ .long 0x80008441,0x3d42ff84,0x241ff22e,0xd080ff84
+ .long 0x4e753d68,0x0000ff84,0x2d680004,0xff882d68
+ .long 0x0008ff8c,0x61ff0000,0x14ac0c80,0x000003ff
+ .long 0x6c120c80,0xfffffc01,0x670000f4,0x6d000070
+ .long 0x6000ff82,0x08ee0003,0xff6608ae,0x0007ff84
+ .long 0x122eff62,0x0201000b,0x661a41ee,0xff84222e
+ .long 0xff5c61ff,0x00003f3a,0x812eff64,0xf22ed080
+ .long 0xff844e75,0x2d6eff88,0xff942d6e,0xff8cff98
+ .long 0x322eff84,0x2f022401,0x02810000,0x7fff0242
+ .long 0x80009280,0x06810000,0x60000241,0x7fff8242
+ .long 0x3d41ff90,0xf22ed040,0xff90241f,0x60acf23c
+ .long 0x88000000,0x0000f22e,0x9000ff5c,0xf22e4818
+ .long 0xff84f23c,0x90000000,0x0000f201,0xa80083ae
+ .long 0xff6400ae,0x00001048,0xff64122e,0xff620201
+ .long 0x0013661c,0x082e0003,0xff6456c1,0x202eff5c
+ .long 0x61ff0000,0x3faa812e,0xff64f210,0xd0804e75
+ .long 0x2f02322e,0xff842401,0x02810000,0x7fff0242
+ .long 0x80009280,0x04810000,0x60000241,0x7fff8242
+ .long 0x3d41ff84,0xf22ed040,0xff84241f,0x60b6f23c
+ .long 0x88000000,0x0000f22e,0x9000ff5c,0xf22e4818
+ .long 0xff84f201,0xa800f23c,0x90000000,0x000083ae
+ .long 0xff64f200,0x0098f23c,0x58b80002,0xf293ff74
+ .long 0x6000fe86,0x0c010004,0x6700fdc6,0x0c010005
+ .long 0x67ff0000,0x145e0c01,0x000367ff,0x00001468
+ .long 0xf2284818,0x00000c01,0x00026708,0x1d7c0004
+ .long 0xff644e75,0x1d7c0002,0xff644e75,0x4241122e
+ .long 0xff4fe709,0x822eff4e,0x6618f229,0xd0800000
+ .long 0xf2284838,0x0000f200,0xa800e198,0x1d40ff64
+ .long 0x4e75323b,0x120a4efb,0x10064afc,0x0030ffdc
+ .long 0xffdcffdc,0x006000f8,0x006e0000,0x0000ffdc
+ .long 0xffdcffdc,0x0060007c,0x006e0000,0x0000ffdc
+ .long 0xffdcffdc,0x0060007c,0x006e0000,0x00000060
+ .long 0x00600060,0x00600060,0x006e0000,0x00000114
+ .long 0x009c009c,0x006000bc,0x006e0000,0x0000006e
+ .long 0x006e006e,0x006e006e,0x006e0000,0x000061ff
+ .long 0x00001388,0x022e00f7,0xff644e75,0x61ff0000
+ .long 0x137a022e,0x00f7ff64,0x4e753d68,0x0000ff84
+ .long 0x20280004,0x08c0001f,0x2d40ff88,0x2d680008
+ .long 0xff8c41ee,0xff846000,0xff422d69,0x0000ff84
+ .long 0x20290004,0x08c0001f,0x2d40ff88,0x2d690008
+ .long 0xff8c43ee,0xff846000,0xff223d69,0x0000ff90
+ .long 0x3d680000,0xff842029,0x000408c0,0x001f2d40
+ .long 0xff942028,0x000408c0,0x001f2d40,0xff882d69
+ .long 0x0008ff98,0x2d680008,0xff8c43ee,0xff9041ee
+ .long 0xff846000,0xfee61028,0x00001229,0x0000b101
+ .long 0x6b00ff78,0x4a006b02,0x4e751d7c,0x0008ff64
+ .long 0x4e751028,0x00001229,0x0000b101,0x6b00ff7c
+ .long 0x4a006a02,0x4e751d7c,0x0008ff64,0x4e752d40
+ .long 0xff5c4241,0x122eff4f,0xe709822e,0xff4e6600
+ .long 0x02a03d69,0x0000ff90,0x2d690004,0xff942d69
+ .long 0x0008ff98,0x3d680000,0xff842d68,0x0004ff88
+ .long 0x2d680008,0xff8c61ff,0x0000119a,0x2f0061ff
+ .long 0x0000123e,0xd09f0c80,0xffffc001,0x670000f8
+ .long 0x6d000064,0x0c800000,0x40006700,0x01da6e00
+ .long 0x0122f22e,0xd080ff90,0xf22e9000,0xff5cf23c
+ .long 0x88000000,0x0000f22e,0x4827ff84,0xf201a800
+ .long 0xf23c9000,0x00000000,0x83aeff64,0xf22ef080
+ .long 0xff842f02,0x322eff84,0x24010281,0x00007fff
+ .long 0x02428000,0x92808242,0x3d41ff84,0x241ff22e
+ .long 0xd080ff84,0x4e75f22e,0xd080ff90,0xf22e9000
+ .long 0xff5cf23c,0x88000000,0x0000f22e,0x4827ff84
+ .long 0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+ .long 0x00ae0000,0x1048ff64,0x122eff62,0x02010013
+ .long 0x6620082e,0x0003ff64,0x56c1202e,0xff5c0200
+ .long 0x003061ff,0x00003c98,0x812eff64,0xf210d080
+ .long 0x4e75f22e,0xf080ff84,0x2f02322e,0xff842401
+ .long 0x02810000,0x7fff9280,0x04810000,0x60000241
+ .long 0x7fff0242,0x80008242,0x3d41ff84,0x241ff22e
+ .long 0xd040ff84,0x60acf22e,0xd080ff90,0xf22e9000
+ .long 0xff5cf23c,0x88000000,0x0000f22e,0x4827ff84
+ .long 0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+ .long 0xf2000098,0xf23c58b8,0x0002f293,0xff646000
+ .long 0xff0c08ee,0x0003ff66,0xf22ed080,0xff90f23c
+ .long 0x90000000,0x0010f23c,0x88000000,0x0000f22e
+ .long 0x4827ff84,0xf201a800,0xf23c9000,0x00000000
+ .long 0x83aeff64,0x122eff62,0x0201000b,0x6620f22e
+ .long 0xf080ff84,0x41eeff84,0x222eff5c,0x61ff0000
+ .long 0x3b56812e,0xff64f22e,0xd080ff84,0x4e75f22e
+ .long 0xd040ff90,0xf22e9000,0xff5cf23c,0x88000000
+ .long 0x0000f22e,0x48a7ff84,0xf23c9000,0x00000000
+ .long 0xf22ef040,0xff842f02,0x322eff84,0x24010281
+ .long 0x00007fff,0x02428000,0x92800681,0x00006000
+ .long 0x02417fff,0x82423d41,0xff84241f,0xf22ed040
+ .long 0xff846000,0xff8af22e,0xd080ff90,0xf22e9000
+ .long 0xff5cf23c,0x88000000,0x0000f22e,0x4827ff84
+ .long 0xf201a800,0xf23c9000,0x00000000,0x83aeff64
+ .long 0xf2000098,0xf23c58b8,0x0002f292,0xfe20f294
+ .long 0xff12f22e,0xd040ff90,0x222eff5c,0x020100c0
+ .long 0x00010010,0xf2019000,0xf23c8800,0x00000000
+ .long 0xf22e48a7,0xff84f23c,0x90000000,0x0000f200
+ .long 0x0498f23c,0x58b80002,0xf293fde2,0x6000fed4
+ .long 0x323b120a,0x4efb1006,0x4afc0030,0xfd560072
+ .long 0x0078006c,0xfd560066,0x00000000,0x00720072
+ .long 0x0060006c,0x00720066,0x00000000,0x007e0060
+ .long 0x007e006c,0x007e0066,0x00000000,0x006c006c
+ .long 0x006c006c,0x006c0066,0x00000000,0xfd560072
+ .long 0x0078006c,0xfd560066,0x00000000,0x00660066
+ .long 0x00660066,0x00660066,0x00000000,0x60ff0000
+ .long 0x101e60ff,0x00000f94,0x60ff0000,0x0f8e60ff
+ .long 0xffffed0e,0x60ffffff,0xed6260ff,0xffffed2e
+ .long 0x2d40ff5c,0x4241122e,0xff4fe709,0x822eff4e
+ .long 0x6600027c,0x3d690000,0xff902d69,0x0004ff94
+ .long 0x2d690008,0xff983d68,0x0000ff84,0x2d680004
+ .long 0xff882d68,0x0008ff8c,0x61ff0000,0x0e582f00
+ .long 0x61ff0000,0x0efc4497,0xd197322e,0xff5eec09
+ .long 0x201f0c80,0xffffc001,0x6f000064,0x0c800000
+ .long 0x3fff6700,0x01b66e00,0x0100f22e,0xd080ff90
+ .long 0xf22e9000,0xff5cf23c,0x88000000,0x0000f22e
+ .long 0x4824ff84,0xf201a800,0xf23c9000,0x00000000
+ .long 0x83aeff64,0xf22ef080,0xff842f02,0x322eff84
+ .long 0x24010281,0x00007fff,0x02428000,0x92808242
+ .long 0x3d41ff84,0x241ff22e,0xd080ff84,0x4e75f22e
+ .long 0xd080ff90,0xf22e9000,0xff5cf23c,0x88000000
+ .long 0x0000f22e,0x4824ff84,0xf201a800,0xf23c9000
+ .long 0x00000000,0x83aeff64,0xf227e001,0x3217dffc
+ .long 0x0000000c,0x02810000,0x7fff9280,0x0c810000
+ .long 0x7fff6d90,0x006e1048,0xff66122e,0xff620201
+ .long 0x00136620,0x082e0003,0xff6456c1,0x202eff5c
+ .long 0x02000030,0x61ff0000,0x3936812e,0xff64f210
+ .long 0xd0804e75,0xf22ef080,0xff842f02,0x322eff84
+ .long 0x24010281,0x00007fff,0x02428000,0x92800481
+ .long 0x00006000,0x02417fff,0x82423d41,0xff84241f
+ .long 0xf22ed040,0xff8460ac,0x08ee0003,0xff66f22e
+ .long 0xd080ff90,0xf23c9000,0x00000010,0xf23c8800
+ .long 0x00000000,0xf22e4824,0xff84f201,0xa800f23c
+ .long 0x90000000,0x000083ae,0xff64122e,0xff620201
+ .long 0x000b6620,0xf22ef080,0xff8441ee,0xff84222e
+ .long 0xff5c61ff,0x00003830,0x812eff64,0xf22ed080
+ .long 0xff844e75,0xf22ed040,0xff90f22e,0x9000ff5c
+ .long 0xf23c8800,0x00000000,0xf22e48a4,0xff84f23c
+ .long 0x90000000,0x0000f22e,0xf040ff84,0x2f02322e
+ .long 0xff842401,0x02810000,0x7fff0242,0x80009280
+ .long 0x06810000,0x60000241,0x7fff8242,0x3d41ff84
+ .long 0x241ff22e,0xd040ff84,0x608af22e,0xd080ff90
+ .long 0xf22e9000,0xff5cf23c,0x88000000,0x0000f22e
+ .long 0x4824ff84,0xf201a800,0xf23c9000,0x00000000
+ .long 0x83aeff64,0xf2000098,0xf23c58b8,0x0001f292
+ .long 0xfe44f294,0xff14f22e,0xd040ff90,0x42810001
+ .long 0x0010f201,0x9000f23c,0x88000000,0x0000f22e
+ .long 0x48a4ff84,0xf23c9000,0x00000000,0xf2000498
+ .long 0xf23c58b8,0x0001f293,0xfe0c6000,0xfedc323b
+ .long 0x120a4efb,0x10064afc,0x0030fd7a,0x00720078
+ .long 0x0060fd7a,0x00660000,0x00000078,0x006c0078
+ .long 0x00600078,0x00660000,0x0000007e,0x007e006c
+ .long 0x0060007e,0x00660000,0x00000060,0x00600060
+ .long 0x00600060,0x00660000,0x0000fd7a,0x00720078
+ .long 0x0060fd7a,0x00660000,0x00000066,0x00660066
+ .long 0x00660066,0x00660000,0x000060ff,0x00000c7c
+ .long 0x60ff0000,0x0c7660ff,0x00000cf4,0x60ffffff
+ .long 0xf0ce60ff,0xfffff09c,0x60ffffff,0xf0f40200
+ .long 0x00300000,0x00406008,0x02000030,0x00000080
+ .long 0x2d40ff5c,0x4241122e,0xff4fe709,0x822eff4e
+ .long 0x6600024c,0x61ff0000,0x0a5cf22e,0xd080ff90
+ .long 0xf23c8800,0x00000000,0xf22e9000,0xff5cf22e
+ .long 0x4822ff84,0xf23c9000,0x00000000,0xf201a800
+ .long 0x83aeff64,0xf281003c,0x2f02f227,0xe001322e
+ .long 0xff5eec09,0x34170282,0x00007fff,0x9480b4bb
+ .long 0x14246c38,0xb4bb142a,0x6d0000b8,0x67000184
+ .long 0x32170241,0x80008242,0x3e81f21f,0xd080241f
+ .long 0x4e754e75,0x00007fff,0x0000407f,0x000043ff
+ .long 0x00000000,0x00003f81,0x00003c01,0x00ae0000
+ .long 0x1048ff64,0x122eff62,0x02010013,0x6624dffc
+ .long 0x0000000c,0x082e0003,0xff6456c1,0x202eff5c
+ .long 0x61ff0000,0x366a812e,0xff64f210,0xd080241f
+ .long 0x4e75122e,0xff5c0201,0x00c0661a,0x32170241
+ .long 0x80000482,0x00006000,0x02427fff,0x82423e81
+ .long 0xf21fd040,0x60bef22e,0xd080ff90,0x222eff5c
+ .long 0x02010030,0xf2019000,0xf22e4822,0xff84f23c
+ .long 0x90000000,0x0000dffc,0x0000000c,0xf227e001
+ .long 0x60ba08ee,0x0003ff66,0xdffc0000,0x000cf22e
+ .long 0xd080ff90,0xf23c9000,0x00000010,0xf23c8800
+ .long 0x00000000,0xf22e4822,0xff84f23c,0x90000000
+ .long 0x0000f201,0xa80083ae,0xff64122e,0xff620201
+ .long 0x000b6622,0xf22ef080,0xff8441ee,0xff84222e
+ .long 0xff5c61ff,0x000034ba,0x812eff64,0xf22ed080
+ .long 0xff84241f,0x4e75f22e,0xd040ff90,0x222eff5c
+ .long 0x020100c0,0x664ef22e,0x9000ff5c,0xf23c8800
+ .long 0x00000000,0xf22e48a2,0xff84f23c,0x90000000
+ .long 0x0000f22e,0xf040ff84,0x322eff84,0x24010281
+ .long 0x00007fff,0x02428000,0x92800681,0x00006000
+ .long 0x02417fff,0x82423d41,0xff84f22e,0xd040ff84
+ .long 0x6000ff82,0x222eff5c,0x02010030,0xf2019000
+ .long 0x60aa222e,0xff5c0201,0x00c06700,0xfe74222f
+ .long 0x00040c81,0x80000000,0x6600fe66,0x4aaf0008
+ .long 0x6600fe5e,0x082e0001,0xff666700,0xfe54f22e
+ .long 0xd040ff90,0x222eff5c,0x020100c0,0x00010010
+ .long 0xf2019000,0xf23c8800,0x00000000,0xf22e48a2
+ .long 0xff84f23c,0x90000000,0x0000f200,0x0018f200
+ .long 0x0498f200,0x0438f292,0xfeca6000,0xfe14323b
+ .long 0x120a4efb,0x10064afc,0x0030fdaa,0x00e4011c
+ .long 0x0060fdaa,0x00660000,0x000000bc,0x006c011c
+ .long 0x006000bc,0x00660000,0x00000130,0x0130010c
+ .long 0x00600130,0x00660000,0x00000060,0x00600060
+ .long 0x00600060,0x00660000,0x0000fdaa,0x00e4011c
+ .long 0x0060fdaa,0x00660000,0x00000066,0x00660066
+ .long 0x00660066,0x00660000,0x000060ff,0x0000097c
+ .long 0x60ff0000,0x09761028,0x00001229,0x0000b101
+ .long 0x6b000016,0x4a006b2e,0xf23c4400,0x00000000
+ .long 0x1d7c0004,0xff644e75,0x122eff5f,0x02010030
+ .long 0x0c010020,0x6710f23c,0x44000000,0x00001d7c
+ .long 0x0004ff64,0x4e75f23c,0x44008000,0x00001d7c
+ .long 0x000cff64,0x4e753d68,0x0000ff84,0x2d680004
+ .long 0xff882d68,0x0008ff8c,0x61ff0000,0x0828426e
+ .long 0xff9042ae,0xff9442ae,0xff986000,0xfcce3d69
+ .long 0x0000ff90,0x2d690004,0xff942d69,0x0008ff98
+ .long 0x61ff0000,0x08ac426e,0xff8442ae,0xff8842ae
+ .long 0xff8c6000,0xfca61028,0x00001229,0x0000b300
+ .long 0x6bff0000,0x094af228,0xd0800000,0x4a280000
+ .long 0x6a1c1d7c,0x000aff64,0x4e75f229,0xd0800000
+ .long 0x4a290000,0x6a081d7c,0x000aff64,0x4e751d7c
+ .long 0x0002ff64,0x4e750200,0x00300000,0x00406008
+ .long 0x02000030,0x00000080,0x2d40ff5c,0x4241122e
+ .long 0xff4fe709,0x822eff4e,0x6600024c,0x61ff0000
+ .long 0x0694f22e,0xd080ff90,0xf23c8800,0x00000000
+ .long 0xf22e9000,0xff5cf22e,0x4828ff84,0xf23c9000
+ .long 0x00000000,0xf201a800,0x83aeff64,0xf281003c
+ .long 0x2f02f227,0xe001322e,0xff5eec09,0x34170282
+ .long 0x00007fff,0x9480b4bb,0x14246c38,0xb4bb142a
+ .long 0x6d0000b8,0x67000184,0x32170241,0x80008242
+ .long 0x3e81f21f,0xd080241f,0x4e754e75,0x00007fff
+ .long 0x0000407f,0x000043ff,0x00000000,0x00003f81
+ .long 0x00003c01,0x00ae0000,0x1048ff64,0x122eff62
+ .long 0x02010013,0x6624dffc,0x0000000c,0x082e0003
+ .long 0xff6456c1,0x202eff5c,0x61ff0000,0x32a2812e
+ .long 0xff64f210,0xd080241f,0x4e75122e,0xff5c0201
+ .long 0x00c0661a,0x32170241,0x80000482,0x00006000
+ .long 0x02427fff,0x82423e81,0xf21fd040,0x60bef22e
+ .long 0xd080ff90,0x222eff5c,0x02010030,0xf2019000
+ .long 0xf22e4828,0xff84f23c,0x90000000,0x0000dffc
+ .long 0x0000000c,0xf227e001,0x60ba08ee,0x0003ff66
+ .long 0xdffc0000,0x000cf22e,0xd080ff90,0xf23c9000
+ .long 0x00000010,0xf23c8800,0x00000000,0xf22e4828
+ .long 0xff84f23c,0x90000000,0x0000f201,0xa80083ae
+ .long 0xff64122e,0xff620201,0x000b6622,0xf22ef080
+ .long 0xff8441ee,0xff84222e,0xff5c61ff,0x000030f2
+ .long 0x812eff64,0xf22ed080,0xff84241f,0x4e75f22e
+ .long 0xd040ff90,0x222eff5c,0x020100c0,0x664ef22e
+ .long 0x9000ff5c,0xf23c8800,0x00000000,0xf22e48a8
+ .long 0xff84f23c,0x90000000,0x0000f22e,0xf040ff84
+ .long 0x322eff84,0x24010281,0x00007fff,0x02428000
+ .long 0x92800681,0x00006000,0x02417fff,0x82423d41
+ .long 0xff84f22e,0xd040ff84,0x6000ff82,0x222eff5c
+ .long 0x02010030,0xf2019000,0x60aa222e,0xff5c0201
+ .long 0x00c06700,0xfe74222f,0x00040c81,0x80000000
+ .long 0x6600fe66,0x4aaf0008,0x6600fe5e,0x082e0001
+ .long 0xff666700,0xfe54f22e,0xd040ff90,0x222eff5c
+ .long 0x020100c0,0x00010010,0xf2019000,0xf23c8800
+ .long 0x00000000,0xf22e48a8,0xff84f23c,0x90000000
+ .long 0x0000f200,0x0018f200,0x0498f200,0x0438f292
+ .long 0xfeca6000,0xfe14323b,0x120a4efb,0x10064afc
+ .long 0x0030fdaa,0x00e2011a,0x0060fdaa,0x00660000
+ .long 0x000000ba,0x006c011a,0x006000ba,0x00660000
+ .long 0x00000130,0x0130010a,0x00600130,0x00660000
+ .long 0x00000060,0x00600060,0x00600060,0x00660000
+ .long 0x0000fdaa,0x00e2011a,0x0060fdaa,0x00660000
+ .long 0x00000066,0x00660066,0x00660066,0x00660000
+ .long 0x000060ff,0x000005b4,0x60ff0000,0x05ae1028
+ .long 0x00001229,0x0000b300,0x6a144a00,0x6b2ef23c
+ .long 0x44000000,0x00001d7c,0x0004ff64,0x4e75122e
+ .long 0xff5f0201,0x00300c01,0x00206710,0xf23c4400
+ .long 0x00000000,0x1d7c0004,0xff644e75,0xf23c4400
+ .long 0x80000000,0x1d7c000c,0xff644e75,0x3d680000
+ .long 0xff842d68,0x0004ff88,0x2d680008,0xff8c61ff
+ .long 0x00000462,0x426eff90,0x42aeff94,0x42aeff98
+ .long 0x6000fcd0,0x3d690000,0xff902d69,0x0004ff94
+ .long 0x2d690008,0xff9861ff,0x000004e6,0x426eff84
+ .long 0x42aeff88,0x42aeff8c,0x6000fca8,0x10280000
+ .long 0x12290000,0xb3006aff,0x00000584,0xf228d080
+ .long 0x0000f200,0x001af293,0x001e1d7c,0x000aff64
+ .long 0x4e75f229,0xd0800000,0x4a290000,0x6a081d7c
+ .long 0x000aff64,0x4e751d7c,0x0002ff64,0x4e750200
+ .long 0x00300000,0x00406008,0x02000030,0x00000080
+ .long 0x2d40ff5c,0x4241122e,0xff4e6600,0x02744a28
+ .long 0x00006bff,0x00000528,0x020000c0,0x6648f22e
+ .long 0x9000ff5c,0xf23c8800,0x00000000,0xf2104804
+ .long 0xf201a800,0x83aeff64,0x4e754a28,0x00006bff
+ .long 0x000004fc,0x020000c0,0x661c3d68,0x0000ff84
+ .long 0x2d680004,0xff882d68,0x0008ff8c,0x61ff0000
+ .long 0x03ae6000,0x003e0c00,0x00406600,0x00843d68
+ .long 0x0000ff84,0x2d680004,0xff882d68,0x0008ff8c
+ .long 0x61ff0000,0x038a0c80,0x0000007e,0x67000098
+ .long 0x6e00009e,0x0c80ffff,0xff806700,0x01a46d00
+ .long 0x0120f23c,0x88000000,0x0000f22e,0x9000ff5c
+ .long 0xf22e4804,0xff84f201,0xa800f23c,0x90000000
+ .long 0x000083ae,0xff642f02,0xf22ef080,0xff84322e
+ .long 0xff842401,0x02810000,0x7fff9280,0x02428000
+ .long 0x84413d42,0xff84241f,0xf22ed080,0xff844e75
+ .long 0x3d680000,0xff842d68,0x0004ff88,0x2d680008
+ .long 0xff8c61ff,0x00000308,0x0c800000,0x03fe6700
+ .long 0x00166e1c,0x0c80ffff,0xfc006700,0x01246d00
+ .long 0x00a06000,0xff7e082e,0x0000ff85,0x6600ff74
+ .long 0x08ee0003,0xff66f23c,0x90000000,0x0010f23c
+ .long 0x88000000,0x0000f22e,0x4804ff84,0xf201a800
+ .long 0xf23c9000,0x00000000,0x83aeff64,0x122eff62
+ .long 0x0201000b,0x6620f22e,0xf080ff84,0x41eeff84
+ .long 0x222eff5c,0x61ff0000,0x2d28812e,0xff64f22e
+ .long 0xd080ff84,0x4e752d6e,0xff88ff94,0x2d6eff8c
+ .long 0xff98322e,0xff842f02,0x24010281,0x00007fff
+ .long 0x02428000,0x92800681,0x00006000,0x02417fff
+ .long 0x82423d41,0xff90f22e,0xd040ff90,0x241f60a6
+ .long 0xf23c8800,0x00000000,0xf22e9000,0xff5cf22e
+ .long 0x4804ff84,0xf23c9000,0x00000000,0xf201a800
+ .long 0x83aeff64,0x00ae0000,0x1048ff64,0x122eff62
+ .long 0x02010013,0x661c082e,0x0003ff64,0x56c1202e
+ .long 0xff5c61ff,0x00002d98,0x812eff64,0xf210d080
+ .long 0x4e752f02,0x322eff84,0x24010281,0x00007fff
+ .long 0x02428000,0x92800481,0x00006000,0x02417fff
+ .long 0x82423d41,0xff84f22e,0xd040ff84,0x241f60b6
+ .long 0x082e0000,0xff856600,0xff78f23c,0x88000000
+ .long 0x0000f22e,0x9000ff5c,0xf22e4804,0xff84f201
+ .long 0xa800f23c,0x90000000,0x000083ae,0xff64f200
+ .long 0x0080f23c,0x58b80001,0xf293ff6a,0x6000fe48
+ .long 0x0c010004,0x6700fdb4,0x0c010001,0x67160c01
+ .long 0x00026736,0x0c010005,0x67ff0000,0x023660ff
+ .long 0x00000244,0x4a280000,0x6b10f23c,0x44000000
+ .long 0x00001d7c,0x0004ff64,0x4e75f23c,0x44008000
+ .long 0x00001d7c,0x000cff64,0x4e754a28,0x00006bff
+ .long 0x0000026c,0xf228d080,0x00001d7c,0x0002ff64
+ .long 0x4e752d68,0x0004ff88,0x2d690004,0xff942d68
+ .long 0x0008ff8c,0x2d690008,0xff983028,0x00003229
+ .long 0x00003d40,0xff843d41,0xff900240,0x7fff0241
+ .long 0x7fff3d40,0xff543d41,0xff56b041,0x6cff0000
+ .long 0x005c61ff,0x0000015a,0x2f000c2e,0x0004ff4e
+ .long 0x661041ee,0xff8461ff,0x00002940,0x44403d40
+ .long 0xff54302e,0xff560440,0x0042b06e,0xff546c1a
+ .long 0x302eff54,0xd06f0002,0x322eff84,0x02418000
+ .long 0x80413d40,0xff84201f,0x4e75026e,0x8000ff84
+ .long 0x08ee0000,0xff85201f,0x4e7561ff,0x00000056
+ .long 0x2f000c2e,0x0004ff4f,0x661041ee,0xff9061ff
+ .long 0x000028e8,0x44403d40,0xff56302e,0xff540440
+ .long 0x0042b06e,0xff566c1a,0x302eff56,0xd06f0002
+ .long 0x322eff90,0x02418000,0x80413d40,0xff90201f
+ .long 0x4e75026e,0x8000ff90,0x08ee0000,0xff91201f
+ .long 0x4e75322e,0xff843001,0x02810000,0x7fff0240
+ .long 0x80000040,0x3fff3d40,0xff840c2e,0x0004ff4e
+ .long 0x670a203c,0x00003fff,0x90814e75,0x41eeff84
+ .long 0x61ff0000,0x28764480,0x220060e6,0x0c2e0004
+ .long 0xff4e673a,0x322eff84,0x02810000,0x7fff026e
+ .long 0x8000ff84,0x08010000,0x6712006e,0x3fffff84
+ .long 0x203c0000,0x3fff9081,0xe2804e75,0x006e3ffe
+ .long 0xff84203c,0x00003ffe,0x9081e280,0x4e7541ee
+ .long 0xff8461ff,0x00002824,0x08000000,0x6710006e
+ .long 0x3fffff84,0x06800000,0x3fffe280,0x4e75006e
+ .long 0x3ffeff84,0x06800000,0x3ffee280,0x4e75322e
+ .long 0xff903001,0x02810000,0x7fff0240,0x80000040
+ .long 0x3fff3d40,0xff900c2e,0x0004ff4f,0x670a203c
+ .long 0x00003fff,0x90814e75,0x41eeff90,0x61ff0000
+ .long 0x27ca4480,0x220060e6,0x0c2e0005,0xff4f6732
+ .long 0x0c2e0003,0xff4f673e,0x0c2e0003,0xff4e6714
+ .long 0x08ee0006,0xff7000ae,0x01004080,0xff6441ee
+ .long 0xff6c6042,0x00ae0100,0x0000ff64,0x41eeff6c
+ .long 0x603400ae,0x01004080,0xff6408ee,0x0006ff7c
+ .long 0x41eeff78,0x602041ee,0xff780c2e,0x0005ff4e
+ .long 0x66ff0000,0x000c00ae,0x00004080,0xff6400ae
+ .long 0x01000000,0xff640828,0x00070000,0x670800ae
+ .long 0x08000000,0xff64f210,0xd0804e75,0x00ae0100
+ .long 0x2080ff64,0xf23bd080,0x01700000,0x00084e75
+ .long 0x7fff0000,0xffffffff,0xffffffff,0x2d40ff54
+ .long 0x302eff42,0x4281122e,0xff64e099,0xf2018800
+ .long 0x323b0206,0x4efb1002,0x02340040,0x02f8030c
+ .long 0x03200334,0x0348035c,0x03660352,0x033e032a
+ .long 0x03160302,0x004a0238,0x023a0276,0x0054009e
+ .long 0x0102014c,0x01b201fc,0x021801d8,0x018c0128
+ .long 0x00de007a,0x02b6025a,0xf2810006,0x6000032a
+ .long 0x4e75f28e,0x00066000,0x03204e75,0xf2920022
+ .long 0x082e0000,0xff646700,0x031000ae,0x00008080
+ .long 0xff64082e,0x0007ff62,0x6600032c,0x600002fa
+ .long 0x4e75f29d,0x00066000,0x02f0082e,0x0000ff64
+ .long 0x671200ae,0x00008080,0xff64082e,0x0007ff62
+ .long 0x66000304,0x4e75f293,0x0022082e,0x0000ff64
+ .long 0x670002c6,0x00ae0000,0x8080ff64,0x082e0007
+ .long 0xff626600,0x02e26000,0x02b0082e,0x0000ff64
+ .long 0x671200ae,0x00008080,0xff64082e,0x0007ff62
+ .long 0x660002c4,0x4e75f29c,0x00066000,0x028c082e
+ .long 0x0000ff64,0x671200ae,0x00008080,0xff64082e
+ .long 0x0007ff62,0x660002a0,0x4e75f294,0x0022082e
+ .long 0x0000ff64,0x67000262,0x00ae0000,0x8080ff64
+ .long 0x082e0007,0xff626600,0x027e6000,0x024c4e75
+ .long 0xf29b0006,0x60000242,0x082e0000,0xff646712
+ .long 0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+ .long 0x02564e75,0xf2950022,0x082e0000,0xff646700
+ .long 0x021800ae,0x00008080,0xff64082e,0x0007ff62
+ .long 0x66000234,0x60000202,0x082e0000,0xff646712
+ .long 0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+ .long 0x02164e75,0xf29a0006,0x600001de,0x082e0000
+ .long 0xff646700,0x001400ae,0x00008080,0xff64082e
+ .long 0x0007ff62,0x660001f0,0x4e75f296,0x0022082e
+ .long 0x0000ff64,0x670001b2,0x00ae0000,0x8080ff64
+ .long 0x082e0007,0xff626600,0x01ce6000,0x019c4e75
+ .long 0xf2990006,0x60000192,0x082e0000,0xff646712
+ .long 0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+ .long 0x01a64e75,0xf2970018,0x00ae0000,0x8080ff64
+ .long 0x082e0007,0xff626600,0x018e6000,0x015c4e75
+ .long 0xf2980006,0x60000152,0x00ae0000,0x8080ff64
+ .long 0x082e0007,0xff626600,0x016e4e75,0x6000013a
+ .long 0x4e75082e,0x0000ff64,0x6700012e,0x00ae0000
+ .long 0x8080ff64,0x082e0007,0xff626600,0x014a6000
+ .long 0x0118082e,0x0000ff64,0x671200ae,0x00008080
+ .long 0xff64082e,0x0007ff62,0x6600012c,0x4e75f291
+ .long 0x0022082e,0x0000ff64,0x670000ee,0x00ae0000
+ .long 0x8080ff64,0x082e0007,0xff626600,0x010a6000
+ .long 0x00d8082e,0x0000ff64,0x671200ae,0x00008080
+ .long 0xff64082e,0x0007ff62,0x660000ec,0x4e75f29e
+ .long 0x0022082e,0x0000ff64,0x670000ae,0x00ae0000
+ .long 0x8080ff64,0x082e0007,0xff626600,0x00ca6000
+ .long 0x0098082e,0x0000ff64,0x67000014,0x00ae0000
+ .long 0x8080ff64,0x082e0007,0xff626600,0x00aa4e75
+ .long 0xf2820006,0x60000072,0x4e75f28d,0x00066000
+ .long 0x00684e75,0xf2830006,0x6000005e,0x4e75f28c
+ .long 0x00066000,0x00544e75,0xf2840006,0x6000004a
+ .long 0x4e75f28b,0x00066000,0x00404e75,0xf2850006
+ .long 0x60000036,0x4e75f28a,0x00066000,0x002c4e75
+ .long 0xf2860006,0x60000022,0x4e75f289,0x00066000
+ .long 0x00184e75,0xf2870006,0x6000000e,0x4e75f288
+ .long 0x00066000,0x00044e75,0x122eff41,0x02410007
+ .long 0x61ff0000,0x1d665340,0x61ff0000,0x1dd00c40
+ .long 0xffff6602,0x4e75202e,0xff54d0ae,0xff685880
+ .long 0x2d400006,0x4e751d7c,0x0002ff4a,0x4e75302e
+ .long 0xff424281,0x122eff64,0xe099f201,0x8800323b
+ .long 0x02064efb,0x1002021e,0x004002e4,0x02f002fc
+ .long 0x03080314,0x03200326,0x031a030e,0x030202f6
+ .long 0x02ea0046,0x02200224,0x0260004c,0x009200f8
+ .long 0x013e01a4,0x01ea0202,0x01c4017e,0x011800d2
+ .long 0x006c02a2,0x0240f281,0x02ea4e75,0xf28e02e4
+ .long 0x4e75f292,0x02de082e,0x0000ff64,0x671200ae
+ .long 0x00008080,0xff64082e,0x0007ff62,0x660002cc
+ .long 0x4e75f29d,0x00044e75,0x082e0000,0xff646700
+ .long 0x02b200ae,0x00008080,0xff64082e,0x0007ff62
+ .long 0x660002a8,0x6000029c,0xf293001e,0x082e0000
+ .long 0xff646712,0x00ae0000,0x8080ff64,0x082e0007
+ .long 0xff626600,0x02864e75,0x082e0000,0xff646700
+ .long 0x027200ae,0x00008080,0xff64082e,0x0007ff62
+ .long 0x66000268,0x6000025c,0xf29c0004,0x4e75082e
+ .long 0x0000ff64,0x6700024c,0x00ae0000,0x8080ff64
+ .long 0x082e0007,0xff626600,0x02426000,0x0236f294
+ .long 0x0232082e,0x0000ff64,0x671200ae,0x00008080
+ .long 0xff64082e,0x0007ff62,0x66000220,0x4e75f29b
+ .long 0x00044e75,0x082e0000,0xff646700,0x020600ae
+ .long 0x00008080,0xff64082e,0x0007ff62,0x660001fc
+ .long 0x600001f0,0xf295001e,0x082e0000,0xff646712
+ .long 0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+ .long 0x01da4e75,0x082e0000,0xff646700,0x01c600ae
+ .long 0x00008080,0xff64082e,0x0007ff62,0x660001bc
+ .long 0x600001b0,0xf29a0004,0x4e75082e,0x0000ff64
+ .long 0x670001a0,0x00ae0000,0x8080ff64,0x082e0007
+ .long 0xff626600,0x01966000,0x018af296,0x0186082e
+ .long 0x0000ff64,0x671200ae,0x00008080,0xff64082e
+ .long 0x0007ff62,0x66000174,0x4e75f299,0x00044e75
+ .long 0x082e0000,0xff646700,0x015a00ae,0x00008080
+ .long 0xff64082e,0x0007ff62,0x66000150,0x60000144
+ .long 0xf2970140,0x00ae0000,0x8080ff64,0x082e0007
+ .long 0xff626600,0x01364e75,0xf2980004,0x4e7500ae
+ .long 0x00008080,0xff64082e,0x0007ff62,0x6600011c
+ .long 0x60000110,0x4e756000,0x010a082e,0x0000ff64
+ .long 0x671200ae,0x00008080,0xff64082e,0x0007ff62
+ .long 0x660000f8,0x4e75082e,0x0000ff64,0x670000e4
+ .long 0x00ae0000,0x8080ff64,0x082e0007,0xff626600
+ .long 0x00da6000,0x00cef291,0x0020082e,0x0000ff64
+ .long 0x67000014,0x00ae0000,0x8080ff64,0x082e0007
+ .long 0xff626600,0x00b64e75,0x082e0000,0xff646700
+ .long 0x00a200ae,0x00008080,0xff64082e,0x0007ff62
+ .long 0x66000098,0x6000008c,0xf29e0020,0x082e0000
+ .long 0xff646700,0x001400ae,0x00008080,0xff64082e
+ .long 0x0007ff62,0x66000074,0x4e75082e,0x0000ff64
+ .long 0x67000060,0x00ae0000,0x8080ff64,0x082e0007
+ .long 0xff626600,0x00566000,0x004af282,0x00464e75
+ .long 0xf28d0040,0x4e75f283,0x003a4e75,0xf28c0034
+ .long 0x4e75f284,0x002e4e75,0xf28b0028,0x4e75f285
+ .long 0x00224e75,0xf28a001c,0x4e75f286,0x00164e75
+ .long 0xf2890010,0x4e75f287,0x000a4e75,0xf2880004
+ .long 0x4e751d7c,0x0001ff4a,0x4e751d7c,0x0002ff4a
+ .long 0x4e75302e,0xff424281,0x122eff64,0xe099f201
+ .long 0x8800323b,0x02064efb,0x10020208,0x004002ac
+ .long 0x02cc02ec,0x030c032c,0x034c035c,0x033c031c
+ .long 0x02fc02dc,0x02bc0050,0x020e0214,0x02440060
+ .long 0x00a400fa,0x013e0194,0x01d801f0,0x01b60172
+ .long 0x011c00d8,0x00820278,0x022cf281,0x00084200
+ .long 0x6000032e,0x50c06000,0x0328f28e,0x00084200
+ .long 0x6000031e,0x50c06000,0x0318f292,0x001a4200
+ .long 0x082e0000,0xff646700,0x030800ae,0x00008080
+ .long 0xff646000,0x02f250c0,0x600002f6,0xf29d0008
+ .long 0x42006000,0x02ec50c0,0x082e0000,0xff646700
+ .long 0x02e000ae,0x00008080,0xff646000,0x02caf293
+ .long 0x001a4200,0x082e0000,0xff646700,0x02c400ae
+ .long 0x00008080,0xff646000,0x02ae50c0,0x082e0000
+ .long 0xff646700,0x02ac00ae,0x00008080,0xff646000
+ .long 0x0296f29c,0x00084200,0x60000296,0x50c0082e
+ .long 0x0000ff64,0x6700028a,0x00ae0000,0x8080ff64
+ .long 0x60000274,0xf294001a,0x4200082e,0x0000ff64
+ .long 0x6700026e,0x00ae0000,0x8080ff64,0x60000258
+ .long 0x50c06000,0x025cf29b,0x00084200,0x60000252
+ .long 0x50c0082e,0x0000ff64,0x67000246,0x00ae0000
+ .long 0x8080ff64,0x60000230,0xf295001a,0x4200082e
+ .long 0x0000ff64,0x6700022a,0x00ae0000,0x8080ff64
+ .long 0x60000214,0x50c0082e,0x0000ff64,0x67000212
+ .long 0x00ae0000,0x8080ff64,0x600001fc,0xf29a0008
+ .long 0x42006000,0x01fc50c0,0x082e0000,0xff646700
+ .long 0x01f000ae,0x00008080,0xff646000,0x01daf296
+ .long 0x001a4200,0x082e0000,0xff646700,0x01d400ae
+ .long 0x00008080,0xff646000,0x01be50c0,0x600001c2
+ .long 0xf2990008,0x42006000,0x01b850c0,0x082e0000
+ .long 0xff646700,0x01ac00ae,0x00008080,0xff646000
+ .long 0x0196f297,0x00104200,0x00ae0000,0x8080ff64
+ .long 0x60000184,0x50c06000,0x0188f298,0x00084200
+ .long 0x6000017e,0x50c000ae,0x00008080,0xff646000
+ .long 0x01664200,0x6000016a,0x50c06000,0x01644200
+ .long 0x082e0000,0xff646700,0x015800ae,0x00008080
+ .long 0xff646000,0x014250c0,0x082e0000,0xff646700
+ .long 0x014000ae,0x00008080,0xff646000,0x012af291
+ .long 0x001a4200,0x082e0000,0xff646700,0x012400ae
+ .long 0x00008080,0xff646000,0x010e50c0,0x082e0000
+ .long 0xff646700,0x010c00ae,0x00008080,0xff646000
+ .long 0x00f6f29e,0x001a4200,0x082e0000,0xff646700
+ .long 0x00f000ae,0x00008080,0xff646000,0x00da50c0
+ .long 0x082e0000,0xff646700,0x00d800ae,0x00008080
+ .long 0xff646000,0x00c2f282,0x00084200,0x600000c2
+ .long 0x50c06000,0x00bcf28d,0x00084200,0x600000b2
+ .long 0x50c06000,0x00acf283,0x00084200,0x600000a2
+ .long 0x50c06000,0x009cf28c,0x00084200,0x60000092
+ .long 0x50c06000,0x008cf284,0x00084200,0x60000082
+ .long 0x50c06000,0x007cf28b,0x00084200,0x60000072
+ .long 0x50c06000,0x006cf285,0x00084200,0x60000062
+ .long 0x50c06000,0x005cf28a,0x00084200,0x60000052
+ .long 0x50c06000,0x004cf286,0x00084200,0x60000042
+ .long 0x50c06000,0x003cf289,0x00084200,0x60000032
+ .long 0x50c06000,0x002cf287,0x00084200,0x60000022
+ .long 0x50c06000,0x001cf288,0x00084200,0x60000012
+ .long 0x50c06000,0x000c082e,0x0007ff62,0x66000088
+ .long 0x2040122e,0xff412001,0x02010038,0x66102200
+ .long 0x02410007,0x200861ff,0x0000172a,0x4e750c01
+ .long 0x0018671a,0x0c010020,0x67382008,0x206e000c
+ .long 0x61ffffff,0x5a7c4a81,0x66000054,0x4e752008
+ .long 0x206e000c,0x61ffffff,0x5a684a81,0x66000040
+ .long 0x122eff41,0x02410007,0x700161ff,0x00001722
+ .long 0x4e752008,0x206e000c,0x61ffffff,0x5a444a81
+ .long 0x6600001c,0x122eff41,0x02410007,0x700161ff
+ .long 0x0000174e,0x4e751d7c,0x0002ff4a,0x4e753d7c
+ .long 0x00a1000a,0x60ff0000,0x2b86122e,0xff430241
+ .long 0x0070e809,0x61ff0000,0x15b20280,0x000000ff
+ .long 0x2f00103b,0x09200148,0x2f0061ff,0x00000340
+ .long 0x201f221f,0x67000134,0x082e0005,0xff426700
+ .long 0x00b8082e,0x0004ff42,0x6600001a,0x123b1120
+ .long 0x021e082e,0x00050004,0x670a0c2e,0x0008ff4a
+ .long 0x66024e75,0x22489fc0,0x41d74a01,0x6a0c20ee
+ .long 0xffdc20ee,0xffe020ee,0xffe4e309,0x6a0c20ee
+ .long 0xffe820ee,0xffec20ee,0xfff0e309,0x6a0af210
+ .long 0xf020d1fc,0x0000000c,0xe3096a0a,0xf210f010
+ .long 0xd1fc0000,0x000ce309,0x6a0af210,0xf008d1fc
+ .long 0x0000000c,0xe3096a0a,0xf210f004,0xd1fc0000
+ .long 0x000ce309,0x6a0af210,0xf002d1fc,0x0000000c
+ .long 0xe3096a0a,0xf210f001,0xd1fc0000,0x000c2d49
+ .long 0xff5441d7,0x2f0061ff,0xffff58b2,0x201fdfc0
+ .long 0x4a816600,0x071e4e75,0x2d48ff54,0x9fc043d7
+ .long 0x2f012f00,0x61ffffff,0x587e201f,0x4a816600
+ .long 0x070e221f,0x41d74a01,0x6a0c2d58,0xffdc2d58
+ .long 0xffe02d58,0xffe4e309,0x6a0c2d58,0xffe82d58
+ .long 0xffec2d58,0xfff0e309,0x6a04f218,0xd020e309
+ .long 0x6a04f218,0xd010e309,0x6a04f218,0xd008e309
+ .long 0x6a04f218,0xd004e309,0x6a04f218,0xd002e309
+ .long 0x6a04f218,0xd001dfc0,0x4e754e75,0x000c0c18
+ .long 0x0c181824,0x0c181824,0x18242430,0x0c181824
+ .long 0x18242430,0x18242430,0x2430303c,0x0c181824
+ .long 0x18242430,0x18242430,0x2430303c,0x18242430
+ .long 0x2430303c,0x2430303c,0x303c3c48,0x0c181824
+ .long 0x18242430,0x18242430,0x2430303c,0x18242430
+ .long 0x2430303c,0x2430303c,0x303c3c48,0x18242430
+ .long 0x2430303c,0x2430303c,0x303c3c48,0x2430303c
+ .long 0x303c3c48,0x303c3c48,0x3c484854,0x0c181824
+ .long 0x18242430,0x18242430,0x2430303c,0x18242430
+ .long 0x2430303c,0x2430303c,0x303c3c48,0x18242430
+ .long 0x2430303c,0x2430303c,0x303c3c48,0x2430303c
+ .long 0x303c3c48,0x303c3c48,0x3c484854,0x18242430
+ .long 0x2430303c,0x2430303c,0x303c3c48,0x2430303c
+ .long 0x303c3c48,0x303c3c48,0x3c484854,0x2430303c
+ .long 0x303c3c48,0x303c3c48,0x3c484854,0x303c3c48
+ .long 0x3c484854,0x3c484854,0x48545460,0x008040c0
+ .long 0x20a060e0,0x109050d0,0x30b070f0,0x088848c8
+ .long 0x28a868e8,0x189858d8,0x38b878f8,0x048444c4
+ .long 0x24a464e4,0x149454d4,0x34b474f4,0x0c8c4ccc
+ .long 0x2cac6cec,0x1c9c5cdc,0x3cbc7cfc,0x028242c2
+ .long 0x22a262e2,0x129252d2,0x32b272f2,0x0a8a4aca
+ .long 0x2aaa6aea,0x1a9a5ada,0x3aba7afa,0x068646c6
+ .long 0x26a666e6,0x169656d6,0x36b676f6,0x0e8e4ece
+ .long 0x2eae6eee,0x1e9e5ede,0x3ebe7efe,0x018141c1
+ .long 0x21a161e1,0x119151d1,0x31b171f1,0x098949c9
+ .long 0x29a969e9,0x199959d9,0x39b979f9,0x058545c5
+ .long 0x25a565e5,0x159555d5,0x35b575f5,0x0d8d4dcd
+ .long 0x2dad6ded,0x1d9d5ddd,0x3dbd7dfd,0x038343c3
+ .long 0x23a363e3,0x139353d3,0x33b373f3,0x0b8b4bcb
+ .long 0x2bab6beb,0x1b9b5bdb,0x3bbb7bfb,0x078747c7
+ .long 0x27a767e7,0x179757d7,0x37b777f7,0x0f8f4fcf
+ .long 0x2faf6fef,0x1f9f5fdf,0x3fbf7fff,0x2040302e
+ .long 0xff403200,0x0240003f,0x02810000,0x0007303b
+ .long 0x020a4efb,0x00064afc,0x00400000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000080,0x0086008c
+ .long 0x00900094,0x0098009c,0x00a000a6,0x00b600c6
+ .long 0x00d200de,0x00ea00f6,0x01020118,0x01260134
+ .long 0x013e0148,0x0152015c,0x0166017a,0x019801b6
+ .long 0x01d201ee,0x020a0226,0x02420260,0x02600260
+ .long 0x02600260,0x02600260,0x026002c0,0x02da02f4
+ .long 0x03140000,0x00000000,0x0000206e,0xffa44e75
+ .long 0x206effa8,0x4e75204a,0x4e75204b,0x4e75204c
+ .long 0x4e75204d,0x4e752056,0x4e75206e,0xffd84e75
+ .long 0x202effa4,0x2200d288,0x2d41ffa4,0x20404e75
+ .long 0x202effa8,0x2200d288,0x2d41ffa8,0x20404e75
+ .long 0x200a2200,0xd2882441,0x20404e75,0x200b2200
+ .long 0xd2882641,0x20404e75,0x200c2200,0xd2882841
+ .long 0x20404e75,0x200d2200,0xd2882a41,0x20404e75
+ .long 0x20162200,0xd2882c81,0x20404e75,0x1d7c0004
+ .long 0xff4a202e,0xffd82200,0xd2882d41,0xffd82040
+ .long 0x4e75202e,0xffa49088,0x2d40ffa4,0x20404e75
+ .long 0x202effa8,0x90882d40,0xffa82040,0x4e75200a
+ .long 0x90882440,0x20404e75,0x200b9088,0x26402040
+ .long 0x4e75200c,0x90882840,0x20404e75,0x200d9088
+ .long 0x2a402040,0x4e752016,0x90882c80,0x20404e75
+ .long 0x1d7c0008,0xff4a202e,0xffd89088,0x2d40ffd8
+ .long 0x20404e75,0x206eff44,0x54aeff44,0x61ffffff
+ .long 0x54a24a81,0x66ffffff,0x68203040,0xd1eeffa4
+ .long 0x4e75206e,0xff4454ae,0xff4461ff,0xffff5484
+ .long 0x4a8166ff,0xffff6802,0x3040d1ee,0xffa84e75
+ .long 0x206eff44,0x54aeff44,0x61ffffff,0x54664a81
+ .long 0x66ffffff,0x67e43040,0xd1ca4e75,0x206eff44
+ .long 0x54aeff44,0x61ffffff,0x544a4a81,0x66ffffff
+ .long 0x67c83040,0xd1cb4e75,0x206eff44,0x54aeff44
+ .long 0x61ffffff,0x542e4a81,0x66ffffff,0x67ac3040
+ .long 0xd1cc4e75,0x206eff44,0x54aeff44,0x61ffffff
+ .long 0x54124a81,0x66ffffff,0x67903040,0xd1cd4e75
+ .long 0x206eff44,0x54aeff44,0x61ffffff,0x53f64a81
+ .long 0x66ffffff,0x67743040,0xd1d64e75,0x206eff44
+ .long 0x54aeff44,0x61ffffff,0x53da4a81,0x66ffffff
+ .long 0x67583040,0xd1eeffd8,0x4e755081,0x61ff0000
+ .long 0x0fda2f00,0x206eff44,0x54aeff44,0x61ffffff
+ .long 0x53b24a81,0x66ffffff,0x6730205f,0x08000008
+ .long 0x660000e6,0x2d40ff54,0x2200e959,0x0241000f
+ .long 0x61ff0000,0x0fa62f02,0x242eff54,0x0802000b
+ .long 0x660248c0,0x2202ef59,0x02810000,0x0003e3a8
+ .long 0x49c2d082,0xd1c0241f,0x4e75206e,0xff4454ae
+ .long 0xff4461ff,0xffff535c,0x4a8166ff,0xffff66da
+ .long 0x30404e75,0x206eff44,0x58aeff44,0x61ffffff
+ .long 0x53584a81,0x66ffffff,0x66c02040,0x4e75206e
+ .long 0xff4454ae,0xff4461ff,0xffff5328,0x4a8166ff
+ .long 0xffff66a6,0x3040d1ee,0xff445588,0x4e75206e
+ .long 0xff4454ae,0xff4461ff,0xffff5308,0x4a8166ff
+ .long 0xffff6686,0x206eff44,0x55880800,0x00086600
+ .long 0x00382d40,0xff542200,0xe9590241,0x000f61ff
+ .long 0x00000ef8,0x2f02242e,0xff540802,0x000b6602
+ .long 0x48c02202,0xef590281,0x00000003,0xe3a849c2
+ .long 0xd082d1c0,0x241f4e75,0x08000006,0x670c48e7
+ .long 0x3c002a00,0x26084282,0x60282d40,0xff54e9c0
+ .long 0x140461ff,0x00000eb4,0x48e73c00,0x24002a2e
+ .long 0xff542608,0x0805000b,0x660248c2,0xe9c50542
+ .long 0xe1aa0805,0x00076702,0x4283e9c5,0x06820c00
+ .long 0x00026d34,0x6718206e,0xff4458ae,0xff4461ff
+ .long 0xffff5276,0x4a8166ff,0x000000b0,0x6018206e
+ .long 0xff4454ae,0xff4461ff,0xffff5248,0x4a8166ff
+ .long 0x00000098,0x48c0d680,0xe9c50782,0x6700006e
+ .long 0x0c000002,0x6d346718,0x206eff44,0x58aeff44
+ .long 0x61ffffff,0x52344a81,0x66ff0000,0x006e601c
+ .long 0x206eff44,0x54aeff44,0x61ffffff,0x52064a81
+ .long 0x66ff0000,0x005648c0,0x60024280,0x28000805
+ .long 0x00026714,0x204361ff,0xffff5240,0x4a816600
+ .long 0x0028d082,0xd0846018,0xd6822043,0x61ffffff
+ .long 0x522a4a81,0x66000012,0xd0846004,0xd6822003
+ .long 0x20404cdf,0x003c4e75,0x20434cdf,0x003c303c
+ .long 0x010160ff,0xffff6582,0x4cdf003c,0x60ffffff
+ .long 0x652861ff,0x000023c6,0x303c00e1,0x600a61ff
+ .long 0x000023ba,0x303c0161,0x206eff54,0x60ffffff
+ .long 0x6558102e,0xff420c00,0x009c6700,0x00b20c00
+ .long 0x00986700,0x00740c00,0x00946736,0x206eff44
+ .long 0x58aeff44,0x61ffffff,0x51704a81,0x66ffffff
+ .long 0x64d82d40,0xff64206e,0xff4458ae,0xff4461ff
+ .long 0xffff5156,0x4a8166ff,0xffff64be,0x2d40ff68
+ .long 0x4e75206e,0xff4458ae,0xff4461ff,0xffff513a
+ .long 0x4a8166ff,0xffff64a2,0x2d40ff60,0x206eff44
+ .long 0x58aeff44,0x61ffffff,0x51204a81,0x66ffffff
+ .long 0x64882d40,0xff684e75,0x206eff44,0x58aeff44
+ .long 0x61ffffff,0x51044a81,0x66ffffff,0x646c2d40
+ .long 0xff60206e,0xff4458ae,0xff4461ff,0xffff50ea
+ .long 0x4a8166ff,0xffff6452,0x2d40ff64,0x4e75206e
+ .long 0xff4458ae,0xff4461ff,0xffff50ce,0x4a8166ff
+ .long 0xffff6436,0x2d40ff60,0x206eff44,0x58aeff44
+ .long 0x61ffffff,0x50b44a81,0x66ffffff,0x641c2d40
+ .long 0xff64206e,0xff4458ae,0xff4461ff,0xffff509a
+ .long 0x4a8166ff,0xffff6402,0x2d40ff68,0x4e752040
+ .long 0x102eff41,0x22000240,0x00380281,0x00000007
+ .long 0x0c000018,0x67240c00,0x0020672c,0x80410c00
+ .long 0x003c6706,0x206e000c,0x4e751d7c,0x0080ff4a
+ .long 0x41f60162,0xff680004,0x4e752008,0x61ff0000
+ .long 0x0d70206e,0x000c4e75,0x200861ff,0x00000db2
+ .long 0x206e000c,0x0c00000c,0x67024e75,0x51882d48
+ .long 0x000c4e75,0x102eff41,0x22000240,0x00380281
+ .long 0x00000007,0x0c000018,0x670e0c00,0x00206700
+ .long 0x0076206e,0x000c4e75,0x323b120e,0x206e000c
+ .long 0x4efb1006,0x4afc0008,0x0010001a,0x0024002c
+ .long 0x0034003c,0x0044004e,0x06ae0000,0x000cffa4
+ .long 0x4e7506ae,0x0000000c,0xffa84e75,0xd5fc0000
+ .long 0x000c4e75,0xd7fc0000,0x000c4e75,0xd9fc0000
+ .long 0x000c4e75,0xdbfc0000,0x000c4e75,0x06ae0000
+ .long 0x000cffd4,0x4e751d7c,0x0004ff4a,0x06ae0000
+ .long 0x000cffd8,0x4e75323b,0x1214206e,0x000c5188
+ .long 0x51ae000c,0x4efb1006,0x4afc0008,0x00100016
+ .long 0x001c0020,0x00240028,0x002c0032,0x2d48ffa4
+ .long 0x4e752d48,0xffa84e75,0x24484e75,0x26484e75
+ .long 0x28484e75,0x2a484e75,0x2d48ffd4,0x4e752d48
+ .long 0xffd81d7c,0x0008ff4a,0x4e75082e,0x0006ff42
+ .long 0x6664102e,0xff430800,0x0005672c,0x08000004
+ .long 0x670a0240,0x007f0c40,0x0038661c,0xe9ee0183
+ .long 0xff4261ff,0x00000d6a,0x61ff0000,0x12060c00
+ .long 0x00066722,0x1d40ff4f,0xe9ee00c3,0xff4261ff
+ .long 0x00000cbe,0x61ff0000,0x11ea0c00,0x0006670e
+ .long 0x1d40ff4e,0x4e7561ff,0x00001148,0x60d661ff
+ .long 0x00001140,0x60ea302e,0xff420800,0x0005672c
+ .long 0x08000004,0x670a0240,0x007f0c40,0x0038661c
+ .long 0xe9ee0183,0xff4261ff,0x00000d06,0x61ff0000
+ .long 0x11a20c00,0x00066726,0x1d40ff4f,0xe9ee00c3
+ .long 0xff42e9ee,0x1283ff40,0x660000be,0x422eff4e
+ .long 0xe9ee1343,0xff40303b,0x02124efb,0x000e61ff
+ .long 0x000010e0,0x60d24afc,0x00080010,0x006a0000
+ .long 0x0000002e,0x0000004c,0x000061ff,0x00000a5c
+ .long 0xf2004000,0xf22ef080,0xff6cf281,0x00044e75
+ .long 0x1d7c0001,0xff4e4e75,0x61ff0000,0x0a3ef200
+ .long 0x5000f22e,0xf080ff6c,0xf2810004,0x4e751d7c
+ .long 0x0001ff4e,0x4e7561ff,0x00000a20,0xf2005800
+ .long 0xf22ef080,0xff6cf281,0x00044e75,0x1d7c0001
+ .long 0xff4e4e75,0x61ff0000,0x0a022d40,0xff5441ee
+ .long 0xff5461ff,0x000011de,0x1d40ff4e,0x0c000005
+ .long 0x670001a4,0x0c000004,0x6700015e,0xf2104400
+ .long 0xf22ef080,0xff6c4e75,0x422eff4e,0x303b020a
+ .long 0x4efb0006,0x4afc0008,0x001000e2,0x027202b0
+ .long 0x005601a0,0x009c0000,0x700461ff,0xfffffd22
+ .long 0x0c2e0080,0xff4a6726,0x61ffffff,0x4dde4a81
+ .long 0x66ff0000,0x1eecf200,0x4000f22e,0xf080ff6c
+ .long 0xf2810004,0x4e751d7c,0x0001ff4e,0x4e7561ff
+ .long 0xffff4d76,0x4a8166ff,0xffff6e8a,0x60d87002
+ .long 0x61ffffff,0xfcdc0c2e,0x0080ff4a,0x672661ff
+ .long 0xffff4d82,0x4a8166ff,0x00001e98,0xf2005000
+ .long 0xf22ef080,0xff6cf281,0x00044e75,0x1d7c0001
+ .long 0xff4e4e75,0x61ffffff,0x4d1a4a81,0x66ffffff
+ .long 0x6e4460d8,0x700161ff,0xfffffc96,0x0c2e0080
+ .long 0xff4a6726,0x61ffffff,0x4d264a81,0x66ff0000
+ .long 0x1e42f200,0x5800f22e,0xf080ff6c,0xf2810004
+ .long 0x4e751d7c,0x0001ff4e,0x4e7561ff,0xffff4cd4
+ .long 0x4a8166ff,0xffff6dfe,0x60d87004,0x61ffffff
+ .long 0xfc500c2e,0x0080ff4a,0x673e61ff,0xffff4d0c
+ .long 0x2d40ff54,0x4a8166ff,0x00001e16,0x41eeff54
+ .long 0x61ff0000,0x10a01d40,0xff4e0c00,0x00046700
+ .long 0x00280c00,0x00056700,0x005ef22e,0x4400ff54
+ .long 0xf22ef080,0xff6c4e75,0x61ffffff,0x4c8c4a81
+ .long 0x66ffffff,0x6da060c4,0x426eff6c,0xe9d00257
+ .long 0xe1882d40,0xff7042ae,0xff74426e,0xff6c0810
+ .long 0x00076706,0x08ee0007,0xff6c41ee,0xff6c61ff
+ .long 0x00000e78,0x323c3f81,0x9240836e,0xff6c1d7c
+ .long 0x0000ff4e,0x4e753d7c,0x7fffff6c,0xe9d00257
+ .long 0xe1882d40,0xff7042ae,0xff740810,0x00076706
+ .long 0x08ee0007,0xff6c4e75,0x700861ff,0xfffffb92
+ .long 0x0c2e0080,0xff4a6740,0x43eeff54,0x700861ff
+ .long 0xffff4bc4,0x4a8166ff,0x00001d64,0x41eeff54
+ .long 0x61ff0000,0x0f701d40,0xff4e0c00,0x00046700
+ .long 0x002e0c00,0x00056700,0x0068f22e,0x5400ff54
+ .long 0xf22ef080,0xff6c4e75,0x43eeff54,0x700861ff
+ .long 0xffff4b6e,0x4a8166ff,0xffff6cda,0x60be426e
+ .long 0xff6ce9d0,0x031f2d40,0xff70e9e8,0x02d50004
+ .long 0x720be3a8,0x2d40ff74,0x08100007,0x670608ee
+ .long 0x0007ff6c,0x41eeff6c,0x61ff0000,0x0dae323c
+ .long 0x3c019240,0x836eff6c,0x1d7c0000,0xff4e4e75
+ .long 0x3d7c7fff,0xff6ce9d0,0x031f2d40,0xff70e9e8
+ .long 0x02d50004,0x720be3a8,0x2d40ff74,0x08100007
+ .long 0x670608ee,0x0007ff6c,0x4e75700c,0x61ffffff
+ .long 0xfac043ee,0xff6c700c,0x61ffffff,0x4afa4a81
+ .long 0x66ff0000,0x1ca841ee,0xff6c61ff,0x00000e24
+ .long 0x0c000006,0x67061d40,0xff4e4e75,0x61ff0000
+ .long 0x0d821d40,0xff4e4e75,0x61ff0000,0x125441ee
+ .long 0xff6c61ff,0x00000dfc,0x0c000006,0x67061d40
+ .long 0xff4e4e75,0x61ff0000,0x0d5a1d40,0xff4e4e75
+ .long 0xe9ee10c3,0xff42327b,0x120a4efb,0x98064afc
+ .long 0x000800e0,0x01e00148,0x06200078,0x041a0010
+ .long 0x06204a2e,0xff4e664c,0xf228d080,0x0000f200
+ .long 0x9000f200,0x7800f23c,0x90000000,0x0000f201
+ .long 0xa800836e,0xff66122e,0xff410201,0x00386714
+ .long 0x206e000c,0x61ffffff,0x4ae84a81,0x66ff0000
+ .long 0x1c0a4e75,0x122eff41,0x02410007,0x61ff0000
+ .long 0x07644e75,0x22280000,0x02818000,0x00000081
+ .long 0x00800000,0xf2014400,0x60a44a2e,0xff4e664c
+ .long 0xf228d080,0x0000f200,0x9000f200,0x7000f23c
+ .long 0x90000000,0x0000f201,0xa800836e,0xff66122e
+ .long 0xff410201,0x00386714,0x206e000c,0x61ffffff
+ .long 0x4a964a81,0x66ff0000,0x1bb04e75,0x122eff41
+ .long 0x02410007,0x61ff0000,0x06c04e75,0x22280000
+ .long 0x02818000,0x00000081,0x00800000,0xf2014400
+ .long 0x60a44a2e,0xff4e664c,0xf228d080,0x0000f200
+ .long 0x9000f200,0x6000f23c,0x90000000,0x0000f201
+ .long 0xa800836e,0xff66122e,0xff410201,0x00386714
+ .long 0x206e000c,0x61ffffff,0x4a444a81,0x66ff0000
+ .long 0x1b564e75,0x122eff41,0x02410007,0x61ff0000
+ .long 0x061c4e75,0x22280000,0x02818000,0x00000081
+ .long 0x00800000,0xf2014400,0x60a43d68,0x0000ff84
+ .long 0x426eff86,0x2d680004,0xff882d68,0x0008ff8c
+ .long 0xf228d080,0x000061ff,0xfffff94c,0x224841ee
+ .long 0xff84700c,0x0c2e0008,0xff4a6726,0x61ffffff
+ .long 0x492c4a81,0x66000052,0x4a2eff4e,0x66024e75
+ .long 0x08ee0003,0xff66102e,0xff620200,0x000a6616
+ .long 0x4e7561ff,0xffff5788,0x4a816600,0x002c4a2e
+ .long 0xff4e66dc,0x4e7541ee,0xff8461ff,0x00000b3c
+ .long 0x44400240,0x7fff026e,0x8000ff84,0x816eff84
+ .long 0xf22ed040,0xff844e75,0x2caeffd4,0x60ff0000
+ .long 0x1ab20200,0x00300000,0x00402d40,0xff5c3028
+ .long 0x00000240,0x7fff0c40,0x407e6e00,0x00e66700
+ .long 0x01520c40,0x3f816d00,0x0058f228,0xd0800000
+ .long 0xf22e9000,0xff5cf23c,0x88000000,0x0000f200
+ .long 0x6400f23c,0x90000000,0x0000f201,0xa800836e
+ .long 0xff66122e,0xff410201,0x00386714,0x206e000c
+ .long 0x61ffffff,0x49184a81,0x66ff0000,0x1a2a4e75
+ .long 0x122eff41,0x02410007,0x61ff0000,0x04f04e75
+ .long 0x08ee0003,0xff663d68,0x0000ff84,0x2d680004
+ .long 0xff882d68,0x0008ff8c,0x2f084280,0x0c2e0004
+ .long 0xff4e660a,0x41eeff84,0x61ff0000,0x0a6e41ee
+ .long 0xff84222e,0xff5c61ff,0x00000c86,0x41eeff84
+ .long 0x61ff0000,0x034c122e,0xff410201,0x00386714
+ .long 0x206e000c,0x61ffffff,0x48a44a81,0x66ff0000
+ .long 0x19b6600e,0x122eff41,0x02410007,0x61ff0000
+ .long 0x047c122e,0xff620201,0x000a6600,0x00b8588f
+ .long 0x4e754a28,0x0007660e,0x4aa80008,0x6608006e
+ .long 0x1048ff66,0x6006006e,0x1248ff66,0x2f084a28
+ .long 0x00005bc1,0x202eff5c,0x61ff0000,0x0d12f210
+ .long 0xd080f200,0x6400122e,0xff410201,0x00386714
+ .long 0x206e000c,0x61ffffff,0x48344a81,0x66ff0000
+ .long 0x1946600e,0x122eff41,0x02410007,0x61ff0000
+ .long 0x040c122e,0xff620201,0x000a6600,0x007c588f
+ .long 0x4e753228,0x00000241,0x80000041,0x3fff3d41
+ .long 0xff842d68,0x0004ff88,0x2d680008,0xff8cf22e
+ .long 0x9000ff5c,0xf22e4800,0xff84f23c,0x90000000
+ .long 0x0000f200,0x0018f23c,0x58380002,0xf294fe7c
+ .long 0x6000ff50,0x205f3d68,0x0000ff84,0x2d680004
+ .long 0xff882d68,0x0008ff8c,0x0c2e0004,0xff4e662c
+ .long 0x41eeff84,0x61ff0000,0x09424480,0x02407fff
+ .long 0xefee004f,0xff846014,0x205f3d68,0x0000ff84
+ .long 0x2d680004,0xff882d68,0x0008ff8c,0x08ae0007
+ .long 0xff8456ee,0xff8641ee,0xff84122e,0xff5fe809
+ .long 0x0241000c,0x4841122e,0xff5fe809,0x02410003
+ .long 0x428061ff,0x00000782,0x4a2eff86,0x670608ee
+ .long 0x0007ff84,0xf22ed040,0xff844e75,0x02000030
+ .long 0x00000080,0x2d40ff5c,0x30280000,0x02407fff
+ .long 0x0c4043fe,0x6e0000c8,0x67000120,0x0c403c01
+ .long 0x6d000046,0xf228d080,0x0000f22e,0x9000ff5c
+ .long 0xf23c8800,0x00000000,0xf22e7400,0xff54f23c
+ .long 0x90000000,0x0000f200,0xa800816e,0xff66226e
+ .long 0x000c41ee,0xff547008,0x61ffffff,0x46304a81
+ .long 0x66ff0000,0x18004e75,0x08ee0003,0xff663d68
+ .long 0x0000ff84,0x2d680004,0xff882d68,0x0008ff8c
+ .long 0x2f084280,0x0c2e0004,0xff4e660a,0x41eeff84
+ .long 0x61ff0000,0x084641ee,0xff84222e,0xff5c61ff
+ .long 0x00000a5e,0x41eeff84,0x61ff0000,0x00d22d40
+ .long 0xff542d41,0xff58226e,0x000c41ee,0xff547008
+ .long 0x61ffffff,0x45c84a81,0x66ff0000,0x1798122e
+ .long 0xff620201,0x000a6600,0xfe9c588f,0x4e753028
+ .long 0x000a0240,0x07ff6608,0x006e1048,0xff666006
+ .long 0x006e1248,0xff662f08,0x4a280000,0x5bc1202e
+ .long 0xff5c61ff,0x00000af8,0xf210d080,0xf22e7400
+ .long 0xff54226e,0x000c41ee,0xff547008,0x61ffffff
+ .long 0x456c4a81,0x66ff0000,0x173c122e,0xff620201
+ .long 0x000a6600,0xfe74588f,0x4e753228,0x00000241
+ .long 0x80000041,0x3fff3d41,0xff842d68,0x0004ff88
+ .long 0x2d680008,0xff8cf22e,0x9000ff5c,0xf22e4800
+ .long 0xff84f23c,0x90000000,0x0000f200,0x0018f23c
+ .long 0x58380002,0xf294feae,0x6000ff64,0x42803028
+ .long 0x00000440,0x3fff0640,0x03ff4a28,0x00046b02
+ .long 0x53404840,0xe9884a28,0x00006a04,0x08c0001f
+ .long 0x22280004,0xe9c11054,0x80812d40,0xff542228
+ .long 0x00047015,0xe1a92d41,0xff582228,0x0008e9c1
+ .long 0x0015222e,0xff588280,0x202eff54,0x4e754280
+ .long 0x30280000,0x04403fff,0x0640007f,0x4a280004
+ .long 0x6b025340,0x4840ef88,0x4a280000,0x6a0408c0
+ .long 0x001f2228,0x00040281,0x7fffff00,0xe0898081
+ .long 0x4e7561ff,0xfffff490,0x2f08102e,0xff4e6600
+ .long 0x0082082e,0x0004ff42,0x6712122e,0xff43e809
+ .long 0x02410007,0x61ff0000,0x00926004,0x102eff43
+ .long 0xebc00647,0x2f0041ee,0xff6c61ff,0x00000ed0
+ .long 0x02aecfff,0xf00fff84,0x201f4a2e,0xff876616
+ .long 0x4aaeff88,0x66104aae,0xff8c660a,0x4a806606
+ .long 0x026ef000,0xff8441ee,0xff84225f,0x700c0c2e
+ .long 0x0008ff4a,0x670e61ff,0xffff4412,0x4a816600
+ .long 0xfb384e75,0x61ffffff,0x52864a81,0x6600fb2a
+ .long 0x4e750c00,0x00046700,0xff7a41ee,0xff6c426e
+ .long 0xff6e0c00,0x00056702,0x60c0006e,0x4080ff66
+ .long 0x08ee0006,0xff7060b2,0x303b1206,0x4efb0002
+ .long 0x00200026,0x002c0030,0x00340038,0x003c0040
+ .long 0x0044004a,0x00500054,0x0058005c,0x00600064
+ .long 0x202eff9c,0x4e75202e,0xffa04e75,0x20024e75
+ .long 0x20034e75,0x20044e75,0x20054e75,0x20064e75
+ .long 0x20074e75,0x202effa4,0x4e75202e,0xffa84e75
+ .long 0x200a4e75,0x200b4e75,0x200c4e75,0x200d4e75
+ .long 0x20164e75,0x202effd8,0x4e75323b,0x12064efb
+ .long 0x10020010,0x0016001c,0x00200024,0x0028002c
+ .long 0x00302d40,0xff9c4e75,0x2d40ffa0,0x4e752400
+ .long 0x4e752600,0x4e752800,0x4e752a00,0x4e752c00
+ .long 0x4e752e00,0x4e75323b,0x12064efb,0x10020010
+ .long 0x0016001c,0x00200024,0x0028002c,0x00303d40
+ .long 0xff9e4e75,0x3d40ffa2,0x4e753400,0x4e753600
+ .long 0x4e753800,0x4e753a00,0x4e753c00,0x4e753e00
+ .long 0x4e75323b,0x12064efb,0x10020010,0x0016001c
+ .long 0x00200024,0x0028002c,0x00301d40,0xff9f4e75
+ .long 0x1d40ffa3,0x4e751400,0x4e751600,0x4e751800
+ .long 0x4e751a00,0x4e751c00,0x4e751e00,0x4e75323b
+ .long 0x12064efb,0x10020010,0x0016001c,0x00200024
+ .long 0x0028002c,0x0030d1ae,0xffa44e75,0xd1aeffa8
+ .long 0x4e75d5c0,0x4e75d7c0,0x4e75d9c0,0x4e75dbc0
+ .long 0x4e75d196,0x4e751d7c,0x0004ff4a,0x0c000001
+ .long 0x6706d1ae,0xffd84e75,0x54aeffd8,0x4e75323b
+ .long 0x12064efb,0x10020010,0x0016001c,0x00200024
+ .long 0x0028002c,0x003091ae,0xffa44e75,0x91aeffa8
+ .long 0x4e7595c0,0x4e7597c0,0x4e7599c0,0x4e759bc0
+ .long 0x4e759196,0x4e751d7c,0x0008ff4a,0x0c000001
+ .long 0x670691ae,0xffd84e75,0x55aeffd8,0x4e75303b
+ .long 0x02064efb,0x00020010,0x00280040,0x004c0058
+ .long 0x00640070,0x007c2d6e,0xffdcff6c,0x2d6effe0
+ .long 0xff702d6e,0xffe4ff74,0x41eeff6c,0x4e752d6e
+ .long 0xffe8ff6c,0x2d6effec,0xff702d6e,0xfff0ff74
+ .long 0x41eeff6c,0x4e75f22e,0xf020ff6c,0x41eeff6c
+ .long 0x4e75f22e,0xf010ff6c,0x41eeff6c,0x4e75f22e
+ .long 0xf008ff6c,0x41eeff6c,0x4e75f22e,0xf004ff6c
+ .long 0x41eeff6c,0x4e75f22e,0xf002ff6c,0x41eeff6c
+ .long 0x4e75f22e,0xf001ff6c,0x41eeff6c,0x4e75303b
+ .long 0x02064efb,0x00020010,0x00280040,0x004c0058
+ .long 0x00640070,0x007c2d6e,0xffdcff78,0x2d6effe0
+ .long 0xff7c2d6e,0xffe4ff80,0x41eeff78,0x4e752d6e
+ .long 0xffe8ff78,0x2d6effec,0xff7c2d6e,0xfff0ff80
+ .long 0x41eeff78,0x4e75f22e,0xf020ff78,0x41eeff78
+ .long 0x4e75f22e,0xf010ff78,0x41eeff78,0x4e75f22e
+ .long 0xf008ff78,0x41eeff78,0x4e75f22e,0xf004ff78
+ .long 0x41eeff78,0x4e75f22e,0xf002ff78,0x41eeff78
+ .long 0x4e75f22e,0xf001ff78,0x41eeff78,0x4e75303b
+ .long 0x02064efb,0x00020010,0x00180020,0x002a0034
+ .long 0x003e0048,0x0052f22e,0xf080ffdc,0x4e75f22e
+ .long 0xf080ffe8,0x4e75f227,0xe001f21f,0xd0204e75
+ .long 0xf227e001,0xf21fd010,0x4e75f227,0xe001f21f
+ .long 0xd0084e75,0xf227e001,0xf21fd004,0x4e75f227
+ .long 0xe001f21f,0xd0024e75,0xf227e001,0xf21fd001
+ .long 0x4e750000,0x3f813c01,0xe408323b,0x02f63001
+ .long 0x90680000,0x0c400042,0x6a164280,0x082e0001
+ .long 0xff666704,0x08c0001d,0x61ff0000,0x001a4e75
+ .long 0x203c2000,0x00003141,0x000042a8,0x000442a8
+ .long 0x00084e75,0x2d680008,0xff542d40,0xff582001
+ .long 0x92680000,0x6f100c41,0x00206d10,0x0c410040
+ .long 0x6d506000,0x009a202e,0xff584e75,0x2f023140
+ .long 0x00007020,0x90410c41,0x001d6d08,0x142eff58
+ .long 0x852eff57,0xe9e82020,0x0004e9e8,0x18000004
+ .long 0xe9ee0800,0xff542142,0x00042141,0x0008e8c0
+ .long 0x009e6704,0x08c0001d,0x0280e000,0x0000241f
+ .long 0x4e752f02,0x31400000,0x04410020,0x70209041
+ .long 0x142eff58,0x852eff57,0xe9e82020,0x0004e9e8
+ .long 0x18000004,0xe8c1009e,0x660ce8ee,0x081fff54
+ .long 0x66042001,0x60062001,0x08c0001d,0x42a80004
+ .long 0x21420008,0x0280e000,0x0000241f,0x4e753140
+ .long 0x00000c41,0x00416d12,0x672442a8,0x000442a8
+ .long 0x0008203c,0x20000000,0x4e752028,0x00042200
+ .long 0x0280c000,0x00000281,0x3fffffff,0x60122028
+ .long 0x00040280,0x80000000,0xe2880281,0x7fffffff
+ .long 0x66164aa8,0x00086610,0x4a2eff58,0x660a42a8
+ .long 0x000442a8,0x00084e75,0x08c0001d,0x42a80004
+ .long 0x42a80008,0x4e7561ff,0x00000110,0x4a806700
+ .long 0x00fa006e,0x0208ff66,0x327b1206,0x4efb9802
+ .long 0x004000ea,0x00240008,0x4a280002,0x6b0000dc
+ .long 0x70ff4841,0x0c010004,0x6700003e,0x6e000094
+ .long 0x60000064,0x4a280002,0x6a0000c0,0x70ff4841
+ .long 0x0c010004,0x67000022,0x6e000078,0x60000048
+ .long 0xe3806400,0x00a64841,0x0c010004,0x6700000a
+ .long 0x6e000060,0x60000030,0x06a80000,0x01000004
+ .long 0x640ce4e8,0x0004e4e8,0x00065268,0x00004a80
+ .long 0x66060268,0xfe000006,0x02a8ffff,0xff000004
+ .long 0x42a80008,0x4e7552a8,0x0008641a,0x52a80004
+ .long 0x6414e4e8,0x0004e4e8,0x0006e4e8,0x0008e4e8
+ .long 0x000a5268,0x00004a80,0x66060228,0x00fe000b
+ .long 0x4e7506a8,0x00000800,0x0008641a,0x52a80004
+ .long 0x6414e4e8,0x0004e4e8,0x0006e4e8,0x0008e4e8
+ .long 0x000a5268,0x00004a80,0x66060268,0xf000000a
+ .long 0x02a8ffff,0xf8000008,0x4e754841,0x0c010004
+ .long 0x6700ff86,0x6eea4e75,0x48414a01,0x66044841
+ .long 0x4e7548e7,0x30000c01,0x00046622,0xe9e83602
+ .long 0x0004741e,0xe5ab2428,0x00040282,0x0000003f
+ .long 0x66284aa8,0x00086622,0x4a80661e,0x6020e9e8
+ .long 0x35420008,0x741ee5ab,0x24280008,0x02820000
+ .long 0x01ff6606,0x4a806602,0x600408c3,0x001d2003
+ .long 0x4cdf000c,0x48414e75,0x2f022f03,0x20280004
+ .long 0x22280008,0xedc02000,0x671ae5a8,0xe9c13022
+ .long 0x8083e5a9,0x21400004,0x21410008,0x2002261f
+ .long 0x241f4e75,0xedc12000,0xe5a90682,0x00000020
+ .long 0x21410004,0x42a80008,0x2002261f,0x241f4e75
+ .long 0xede80000,0x0004660e,0xede80000,0x00086700
+ .long 0x00740640,0x00204281,0x32280000,0x02417fff
+ .long 0xb0416e1c,0x92403028,0x00000240,0x80008240
+ .long 0x31410000,0x61ffffff,0xff82103c,0x00004e75
+ .long 0x0c010020,0x6e20e9e8,0x08400004,0x21400004
+ .long 0x20280008,0xe3a82140,0x00080268,0x80000000
+ .long 0x103c0004,0x4e750441,0x00202028,0x0008e3a8
+ .long 0x21400004,0x42a80008,0x02688000,0x0000103c
+ .long 0x00044e75,0x02688000,0x0000103c,0x00014e75
+ .long 0x30280000,0x02407fff,0x0c407fff,0x67480828
+ .long 0x00070004,0x6706103c,0x00004e75,0x4a406618
+ .long 0x4aa80004,0x660c4aa8,0x00086606,0x103c0001
+ .long 0x4e75103c,0x00044e75,0x4aa80004,0x66124aa8
+ .long 0x0008660c,0x02688000,0x0000103c,0x00014e75
+ .long 0x103c0006,0x4e754aa8,0x00086612,0x20280004
+ .long 0x02807fff,0xffff6606,0x103c0002,0x4e750828
+ .long 0x00060004,0x6706103c,0x00034e75,0x103c0005
+ .long 0x4e752028,0x00002200,0x02807ff0,0x0000670e
+ .long 0x0c807ff0,0x00006728,0x103c0000,0x4e750281
+ .long 0x000fffff,0x66ff0000,0x00144aa8,0x000466ff
+ .long 0x0000000a,0x103c0001,0x4e75103c,0x00044e75
+ .long 0x0281000f,0xffff66ff,0x00000014,0x4aa80004
+ .long 0x66ff0000,0x000a103c,0x00024e75,0x08010013
+ .long 0x66ff0000,0x000a103c,0x00054e75,0x103c0003
+ .long 0x4e752028,0x00002200,0x02807f80,0x0000670e
+ .long 0x0c807f80,0x0000671e,0x103c0000,0x4e750281
+ .long 0x007fffff,0x66ff0000,0x000a103c,0x00014e75
+ .long 0x103c0004,0x4e750281,0x007fffff,0x66ff0000
+ .long 0x000a103c,0x00024e75,0x08010016,0x66ff0000
+ .long 0x000a103c,0x00054e75,0x103c0003,0x4e752f01
+ .long 0x08280007,0x000056e8,0x00023228,0x00000241
+ .long 0x7fff9240,0x31410000,0x2f08202f,0x00040240
+ .long 0x00c0e848,0x61ffffff,0xfae22057,0x322f0006
+ .long 0x024100c0,0xe8494841,0x322f0006,0x02410030
+ .long 0xe84961ff,0xfffffc22,0x205f08a8,0x00070000
+ .long 0x4a280002,0x670a08e8,0x00070000,0x42280002
+ .long 0x42804aa8,0x0004660a,0x4aa80008,0x660408c0
+ .long 0x0002082e,0x0001ff66,0x670608ee,0x0005ff67
+ .long 0x588f4e75,0x2f010828,0x00070000,0x56e80002
+ .long 0x32280000,0x02417fff,0x92403141,0x00002f08
+ .long 0x428061ff,0xfffffa64,0x2057323c,0x00044841
+ .long 0x322f0006,0x02410030,0xe84961ff,0xfffffbaa
+ .long 0x205f08a8,0x00070000,0x4a280002,0x670a08e8
+ .long 0x00070000,0x42280002,0x42804aa8,0x0004660a
+ .long 0x4aa80008,0x660408c0,0x0002082e,0x0001ff66
+ .long 0x670608ee,0x0005ff67,0x588f4e75,0x02410010
+ .long 0xe8088200,0x3001e309,0x600e0241,0x00108200
+ .long 0x48408200,0x3001e309,0x103b0008,0x41fb1620
+ .long 0x4e750200,0x00020200,0x00020200,0x00020000
+ .long 0x00000a08,0x0a080a08,0x0a080a08,0x0a087fff
+ .long 0x00000000,0x00000000,0x00000000,0x00007ffe
+ .long 0x0000ffff,0xffffffff,0xffff0000,0x00007ffe
+ .long 0x0000ffff,0xffffffff,0xffff0000,0x00007fff
+ .long 0x00000000,0x00000000,0x00000000,0x00007fff
+ .long 0x00000000,0x00000000,0x00000000,0x0000407e
+ .long 0x0000ffff,0xff000000,0x00000000,0x0000407e
+ .long 0x0000ffff,0xff000000,0x00000000,0x00007fff
+ .long 0x00000000,0x00000000,0x00000000,0x00007fff
+ .long 0x00000000,0x00000000,0x00000000,0x000043fe
+ .long 0x0000ffff,0xffffffff,0xf8000000,0x000043fe
+ .long 0x0000ffff,0xffffffff,0xf8000000,0x00007fff
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x0000ffff
+ .long 0x00000000,0x00000000,0x00000000,0x0000fffe
+ .long 0x0000ffff,0xffffffff,0xffff0000,0x0000ffff
+ .long 0x00000000,0x00000000,0x00000000,0x0000fffe
+ .long 0x0000ffff,0xffffffff,0xffff0000,0x0000ffff
+ .long 0x00000000,0x00000000,0x00000000,0x0000c07e
+ .long 0x0000ffff,0xff000000,0x00000000,0x0000ffff
+ .long 0x00000000,0x00000000,0x00000000,0x0000c07e
+ .long 0x0000ffff,0xff000000,0x00000000,0x0000ffff
+ .long 0x00000000,0x00000000,0x00000000,0x0000c3fe
+ .long 0x0000ffff,0xffffffff,0xf8000000,0x0000ffff
+ .long 0x00000000,0x00000000,0x00000000,0x0000c3fe
+ .long 0x0000ffff,0xffffffff,0xf8000000,0x0000700c
+ .long 0x61ffffff,0xe82c43ee,0xff6c700c,0x61ffffff
+ .long 0x38664a81,0x66ff0000,0x0a14e9ee,0x004fff6c
+ .long 0x0c407fff,0x66024e75,0x102eff6f,0x0200000f
+ .long 0x660e4aae,0xff706608,0x4aaeff74,0x66024e75
+ .long 0x41eeff6c,0x61ff0000,0x001cf22e,0xf080ff6c
+ .long 0x4e750000,0x00000203,0x02030203,0x03020302
+ .long 0x02032d68,0x0000ff84,0x2d680004,0xff882d68
+ .long 0x0008ff8c,0x41eeff84,0x48e73c00,0xf227e001
+ .long 0x74027604,0x28104281,0x4c3c1001,0x0000000a
+ .long 0xe9c408c4,0xd2805803,0x51caffee,0x0804001e
+ .long 0x67024481,0x04810000,0x00106c0e,0x44810084
+ .long 0x40000000,0x00904000,0x00002f01,0x7201f23c
+ .long 0x44000000,0x0000e9d0,0x0704f200,0x58222830
+ .long 0x1c007600,0x7407f23c,0x44234120,0x0000e9c4
+ .long 0x08c4f200,0x58225803,0x51caffec,0x52810c81
+ .long 0x00000002,0x6fd80810,0x001f6704,0xf200001a
+ .long 0x22170c81,0x0000001b,0x6f0000e4,0x0810001e
+ .long 0x66744281,0x2810e9c4,0x07046624,0x52817a01
+ .long 0x28305c00,0x66085081,0x52852830,0x5c004283
+ .long 0x7407e9c4,0x08c46608,0x58835281,0x51cafff4
+ .long 0x20012217,0x92806c10,0x44812810,0x00844000
+ .long 0x00000090,0x40000000,0x43fb0170,0x00000666
+ .long 0x4283f23c,0x44803f80,0x00007403,0xe2806406
+ .long 0xf23148a3,0x38000683,0x0000000c,0x4a8066ec
+ .long 0xf2000423,0x60684281,0x7a022830,0x5c006608
+ .long 0x53855081,0x28305c00,0x761c7407,0xe9c408c4
+ .long 0x66085983,0x528151ca,0xfff42001,0x22179280
+ .long 0x6e104481,0x28100284,0xbfffffff,0x0290bfff
+ .long 0xffff43fb,0x01700000,0x05fc4283,0xf23c4480
+ .long 0x3f800000,0x7403e280,0x6406f231,0x48a33800
+ .long 0x06830000,0x000c4a80,0x66ecf200,0x0420262e
+ .long 0xff60e9c3,0x26822810,0xe582e9c4,0x0002d480
+ .long 0x43fafe50,0x10312800,0x4283efc3,0x0682f203
+ .long 0x9000e280,0x640a43fb,0x01700000,0x06446016
+ .long 0xe280640a,0x43fb0170,0x000006d2,0x600843fb
+ .long 0x01700000,0x05902001,0x6a084480,0x00904000
+ .long 0x00004283,0xf23c4480,0x3f800000,0xe2806406
+ .long 0xf23148a3,0x38000683,0x0000000c,0x4a8066ec
+ .long 0x0810001e,0x6706f200,0x04206004,0xf2000423
+ .long 0xf200a800,0x08800009,0x6706006e,0x0108ff66
+ .long 0x588ff21f,0xd0404cdf,0x003cf23c,0x90000000
+ .long 0x0000f23c,0x88000000,0x00004e75,0x3ffd0000
+ .long 0x9a209a84,0xfbcff798,0x00000000,0x3ffd0000
+ .long 0x9a209a84,0xfbcff799,0x00000000,0x3f800000
+ .long 0x00000000,0x00000000,0x00000000,0x40000000
+ .long 0x00000000,0x00000000,0x00000000,0x41200000
+ .long 0x00000000,0x00000000,0x00000000,0x459a2800
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x03030202,0x03020203,0x02030302,0x48e73f20
+ .long 0xf227e007,0xf23c9000,0x00000020,0x2d50ff58
+ .long 0x2e00422e,0xff500c2e,0x0004ff4e,0x66000030
+ .long 0x30100240,0x7fff2228,0x00042428,0x00085340
+ .long 0xe38ae391,0x4a816cf6,0x4a406e04,0x50eeff50
+ .long 0x02407fff,0x30802141,0x00042142,0x00082d50
+ .long 0xff902d68,0x0004ff94,0x2d680008,0xff9802ae
+ .long 0x7fffffff,0xff904a2e,0xff506708,0x2c3cffff
+ .long 0xecbb6038,0x302eff90,0x3d7c3fff,0xff90f22e
+ .long 0x4800ff90,0x04403fff,0xf2005022,0xf23a4428
+ .long 0xff1cf293,0x000ef23a,0x4823ff02,0xf2066000
+ .long 0x600af23a,0x4823fee6,0xf2066000,0xf23c8800
+ .long 0x00000000,0x42454a87,0x6f042807,0x60062806
+ .long 0x98875284,0x4a846f18,0x0c840000,0x00116f12
+ .long 0x78114a87,0x6f0c00ae,0x00002080,0xff646002
+ .long 0x78014a87,0x6e06be86,0x6d022c07,0x20065280
+ .long 0x90844845,0x42454242,0x4a806c14,0x52450c80
+ .long 0xffffecd4,0x6e080680,0x00000018,0x74184480
+ .long 0xf23a4480,0xfe98e9ee,0x1682ff60,0xe349d245
+ .long 0xe3494aae,0xff586c02,0x528145fa,0xfec01632
+ .long 0x1800e98b,0xf2039000,0xe88b4a03,0x660a43fb
+ .long 0x01700000,0x03706016,0xe20b640a,0x43fb0170
+ .long 0x000003fe,0x600843fb,0x01700000,0x04904283
+ .long 0xe2886406,0xf23148a3,0x38000683,0x0000000c
+ .long 0x4a8066ec,0xf23c8800,0x00000000,0xf23c9000
+ .long 0x00000010,0xf2104800,0xf2000018,0x4a456608
+ .long 0xf2000420,0x6000008e,0x4a2eff50,0x67000072
+ .long 0xf227e002,0x36170243,0x7fff0050,0x8000d650
+ .long 0x04433fff,0xd6690024,0x04433fff,0xd6690030
+ .long 0x04433fff,0x6b000048,0x02578000,0x87570250
+ .long 0x7fff2f28,0x00082f28,0x00042f3c,0x3fff0000
+ .long 0xf21fd080,0xf21f4823,0x2f29002c,0x2f290028
+ .long 0x2f3c3fff,0x00002f29,0x00382f29,0x00342f3c
+ .long 0x3fff0000,0xf21f4823,0xf21f4823,0x601660fe
+ .long 0x4a42670c,0xf2294823,0x0024f229,0x48230030
+ .long 0xf2000423,0xf200a800,0xf22e6800,0xff9045ee
+ .long 0xff900800,0x0009670e,0x00aa0000,0x00010008
+ .long 0xf22e4800,0xff902d6e,0xff60ff54,0x02ae0000
+ .long 0x0030ff60,0x48e7c0c0,0x2f2eff54,0x2f2eff58
+ .long 0x41eeff90,0xf2106800,0x4aaeff58,0x6c060090
+ .long 0x80000000,0x2f2eff64,0xf22e9000,0xff60f23c
+ .long 0x88000000,0x0000f22e,0x4801ff90,0xf200a800
+ .long 0x816eff66,0x1d57ff64,0x588f2d5f,0xff582d5f
+ .long 0xff544cdf,0x03032d6e,0xff58ff90,0x2d6eff54
+ .long 0xff604845,0x4a4566ff,0x00000086,0xf23a4500
+ .long 0xfcec2004,0x53804283,0xe2886406,0xf2314923
+ .long 0x38000683,0x0000000c,0x4a8066ec,0x4a2eff50
+ .long 0x670af200,0x001860ff,0x00000028,0xf2000018
+ .long 0xf2000838,0xf293001a,0x53863a3c,0x0001f23c
+ .long 0x90000000,0x0020f23a,0x4523fcc2,0x6000fda8
+ .long 0xf23a4523,0xfcb8f200,0x0838f294,0x005cf292
+ .long 0x000cf23a,0x4420fca6,0x5286604c,0x52863a3c
+ .long 0x0001f23c,0x90000000,0x00206000,0xfd7af23a
+ .long 0x4500fc6a,0x20044283,0xe2886406,0xf2314923
+ .long 0x38000683,0x0000000c,0x4a8066ec,0xf2000018
+ .long 0xf2000838,0xf28e0012,0xf23a4420,0xfc605286
+ .long 0x5284f23a,0x4523fc56,0xf23c9000,0x00000010
+ .long 0xf2000820,0x41eeff84,0xf2106800,0x24280004
+ .long 0x26280008,0x42a80004,0x42a80008,0x20104840
+ .long 0x67140480,0x00003ffd,0x4a806e0a,0x4480e28a
+ .long 0xe29351c8,0xfffa4a82,0x66044a83,0x67104281
+ .long 0x06830000,0x0080d581,0x0283ffff,0xff802004
+ .long 0x568861ff,0x000002b0,0x4a2eff50,0x6728f200
+ .long 0x003af281,0x000cf206,0x4000f200,0x0018602e
+ .long 0x4a876d08,0xf23a4400,0xfbe46022,0xf2064000
+ .long 0xf2000018,0x6018f200,0x003af28e,0x000af23a
+ .long 0x4400fb9a,0x6008f206,0x4000f200,0x0018f229
+ .long 0x48200018,0xf22e6800,0xff90242a,0x0004262a
+ .long 0x00083012,0x670e0440,0x3ffd4440,0xe28ae293
+ .long 0x51c8fffa,0x42810683,0x00000080,0xd5810283
+ .long 0xffffff80,0x700441ee,0xff5461ff,0x00000228
+ .long 0x202eff54,0x720ce2a8,0xefee010c,0xff84e2a8
+ .long 0xefee0404,0xff844a00,0x670800ae,0x00002080
+ .long 0xff644280,0x022e000f,0xff844aae,0xff586c02
+ .long 0x70024a86,0x6c025280,0xefee0002,0xff84f23c
+ .long 0x88000000,0x0000f21f,0xd0e04cdf,0x04fc4e75
+ .long 0x40020000,0xa0000000,0x00000000,0x40050000
+ .long 0xc8000000,0x00000000,0x400c0000,0x9c400000
+ .long 0x00000000,0x40190000,0xbebc2000,0x00000000
+ .long 0x40340000,0x8e1bc9bf,0x04000000,0x40690000
+ .long 0x9dc5ada8,0x2b70b59e,0x40d30000,0xc2781f49
+ .long 0xffcfa6d5,0x41a80000,0x93ba47c9,0x80e98ce0
+ .long 0x43510000,0xaa7eebfb,0x9df9de8e,0x46a30000
+ .long 0xe319a0ae,0xa60e91c7,0x4d480000,0xc9767586
+ .long 0x81750c17,0x5a920000,0x9e8b3b5d,0xc53d5de5
+ .long 0x75250000,0xc4605202,0x8a20979b,0x40020000
+ .long 0xa0000000,0x00000000,0x40050000,0xc8000000
+ .long 0x00000000,0x400c0000,0x9c400000,0x00000000
+ .long 0x40190000,0xbebc2000,0x00000000,0x40340000
+ .long 0x8e1bc9bf,0x04000000,0x40690000,0x9dc5ada8
+ .long 0x2b70b59e,0x40d30000,0xc2781f49,0xffcfa6d6
+ .long 0x41a80000,0x93ba47c9,0x80e98ce0,0x43510000
+ .long 0xaa7eebfb,0x9df9de8e,0x46a30000,0xe319a0ae
+ .long 0xa60e91c7,0x4d480000,0xc9767586,0x81750c18
+ .long 0x5a920000,0x9e8b3b5d,0xc53d5de5,0x75250000
+ .long 0xc4605202,0x8a20979b,0x40020000,0xa0000000
+ .long 0x00000000,0x40050000,0xc8000000,0x00000000
+ .long 0x400c0000,0x9c400000,0x00000000,0x40190000
+ .long 0xbebc2000,0x00000000,0x40340000,0x8e1bc9bf
+ .long 0x04000000,0x40690000,0x9dc5ada8,0x2b70b59d
+ .long 0x40d30000,0xc2781f49,0xffcfa6d5,0x41a80000
+ .long 0x93ba47c9,0x80e98cdf,0x43510000,0xaa7eebfb
+ .long 0x9df9de8d,0x46a30000,0xe319a0ae,0xa60e91c6
+ .long 0x4d480000,0xc9767586,0x81750c17,0x5a920000
+ .long 0x9e8b3b5d,0xc53d5de4,0x75250000,0xc4605202
+ .long 0x8a20979a,0x48e7ff00,0x7e015380,0x28022a03
+ .long 0xe9c21003,0xe782e9c3,0x6003e783,0x8486e385
+ .long 0xe3944846,0xd346d685,0x4e71d584,0x4e71d346
+ .long 0x48464a47,0x67124847,0xe947de41,0x10c74847
+ .long 0x424751c8,0xffc86012,0x48473e01,0x48475247
+ .long 0x51c8ffba,0x4847e94f,0x10c74cdf,0x00ff4e75
+ .long 0x70016100,0x00d63d7c,0x0121000a,0x6000007e
+ .long 0x70026100,0x00c63d7c,0x0141000a,0x606e7004
+ .long 0x610000b8,0x3d7c0101,0x000a6060,0x70086100
+ .long 0x00aa3d7c,0x0161000a,0x6052700c,0x6100009c
+ .long 0x3d7c0161,0x000a6044,0x70016100,0x008e3d7c
+ .long 0x00a1000a,0x60367002,0x61000080,0x3d7c00c1
+ .long 0x000a6028,0x70046100,0x00723d7c,0x0081000a
+ .long 0x601a7008,0x61000064,0x3d7c00e1,0x000a600c
+ .long 0x700c6100,0x00563d7c,0x00e1000a,0x2d6eff68
+ .long 0x0006f22e,0xd0c0ffdc,0xf22e9c00,0xff604cee
+ .long 0x0303ff9c,0x4e5e2f17,0x2f6f0008,0x00042f6f
+ .long 0x000c0008,0x2f7c0000,0x0001000c,0x3f6f0006
+ .long 0x000c3f7c,0x40080006,0x08170005,0x670608ef
+ .long 0x0002000d,0x60ffffff,0x2d82122e,0xff410201
+ .long 0x00380c01,0x00186700,0x000c0c01,0x00206700
+ .long 0x00604e75,0x122eff41,0x02410007,0x323b1206
+ .long 0x4efb1002,0x00100016,0x001c0020,0x00240028
+ .long 0x002c0030,0x91aeffa4,0x4e7591ae,0xffa84e75
+ .long 0x95c04e75,0x97c04e75,0x99c04e75,0x9bc04e75
+ .long 0x91964e75,0x0c2e0030,0x000a6612,0x082e0005
+ .long 0x0004660a,0x4e7a8800,0x91c04e7b,0x88004e75
+ .long 0x448060a0,0x00000000,0x00000000,0x00000000
diff --git a/arch/m68k/ifpsp060/fskeleton.S b/arch/m68k/ifpsp060/fskeleton.S
new file mode 100644
index 00000000000..a45a4ff9d2a
--- /dev/null
+++ b/arch/m68k/ifpsp060/fskeleton.S
@@ -0,0 +1,342 @@
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+|M68000 Hi-Performance Microprocessor Division
+|M68060 Software Package
+|Production Release P1.00 -- October 10, 1994
+|
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+|
+|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+|To the maximum extent permitted by applicable law,
+|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+|and any warranty against infringement with regard to the SOFTWARE
+|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+|
+|To the maximum extent permitted by applicable law,
+|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+|
+|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+|so long as this entire notice is retained without alteration in any modified and/or
+|redistributed versions, and that such modified versions are clearly identified as such.
+|No licenses are granted by implication, estoppel or otherwise under any patents
+|or trademarks of Motorola, Inc.
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+| fskeleton.s
+|
+| This file contains:
+| (1) example "Call-out"s
+| (2) example package entry code
+| (3) example "Call-out" table
+|
+
+#include <linux/linkage.h>
+
+|################################
+| (1) EXAMPLE CALL-OUTS #
+| #
+| _060_fpsp_done() #
+| _060_real_ovfl() #
+| _060_real_unfl() #
+| _060_real_operr() #
+| _060_real_snan() #
+| _060_real_dz() #
+| _060_real_inex() #
+| _060_real_bsun() #
+| _060_real_fline() #
+| _060_real_fpu_disabled() #
+| _060_real_trap() #
+|################################
+
+|
+| _060_fpsp_done():
+|
+| This is the main exit point for the 68060 Floating-Point
+| Software Package. For a normal exit, all 060FPSP routines call this
+| routine. The operating system can do system dependent clean-up or
+| simply execute an "rte" as with the sample code below.
+|
+ .global _060_fpsp_done
+_060_fpsp_done:
+ bral _060_isp_done | do the same as isp_done
+
+|
+| _060_real_ovfl():
+|
+| This is the exit point for the 060FPSP when an enabled overflow exception
+| is present. The routine below should point to the operating system handler
+| for enabled overflow conditions. The exception stack frame is an overflow
+| stack frame. The FP state frame holds the EXCEPTIONAL OPERAND.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+ .global _060_real_ovfl
+_060_real_ovfl:
+ fsave -(%sp)
+ move.w #0x6000,0x2(%sp)
+ frestore (%sp)+
+ bral trap | jump to trap handler
+
+
+|
+| _060_real_unfl():
+|
+| This is the exit point for the 060FPSP when an enabled underflow exception
+| is present. The routine below should point to the operating system handler
+| for enabled underflow conditions. The exception stack frame is an underflow
+| stack frame. The FP state frame holds the EXCEPTIONAL OPERAND.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+ .global _060_real_unfl
+_060_real_unfl:
+ fsave -(%sp)
+ move.w #0x6000,0x2(%sp)
+ frestore (%sp)+
+ bral trap | jump to trap handler
+
+|
+| _060_real_operr():
+|
+| This is the exit point for the 060FPSP when an enabled operand error exception
+| is present. The routine below should point to the operating system handler
+| for enabled operand error exceptions. The exception stack frame is an operand error
+| stack frame. The FP state frame holds the source operand of the faulting
+| instruction.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+ .global _060_real_operr
+_060_real_operr:
+ fsave -(%sp)
+ move.w #0x6000,0x2(%sp)
+ frestore (%sp)+
+ bral trap | jump to trap handler
+
+|
+| _060_real_snan():
+|
+| This is the exit point for the 060FPSP when an enabled signalling NaN exception
+| is present. The routine below should point to the operating system handler
+| for enabled signalling NaN exceptions. The exception stack frame is a signalling NaN
+| stack frame. The FP state frame holds the source operand of the faulting
+| instruction.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+ .global _060_real_snan
+_060_real_snan:
+ fsave -(%sp)
+ move.w #0x6000,0x2(%sp)
+ frestore (%sp)+
+ bral trap | jump to trap handler
+
+|
+| _060_real_dz():
+|
+| This is the exit point for the 060FPSP when an enabled divide-by-zero exception
+| is present. The routine below should point to the operating system handler
+| for enabled divide-by-zero exceptions. The exception stack frame is a divide-by-zero
+| stack frame. The FP state frame holds the source operand of the faulting
+| instruction.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+ .global _060_real_dz
+_060_real_dz:
+ fsave -(%sp)
+ move.w #0x6000,0x2(%sp)
+ frestore (%sp)+
+ bral trap | jump to trap handler
+
+|
+| _060_real_inex():
+|
+| This is the exit point for the 060FPSP when an enabled inexact exception
+| is present. The routine below should point to the operating system handler
+| for enabled inexact exceptions. The exception stack frame is an inexact
+| stack frame. The FP state frame holds the source operand of the faulting
+| instruction.
+|
+| The sample routine below simply clears the exception status bit and
+| does an "rte".
+|
+ .global _060_real_inex
+_060_real_inex:
+ fsave -(%sp)
+ move.w #0x6000,0x2(%sp)
+ frestore (%sp)+
+ bral trap | jump to trap handler
+
+|
+| _060_real_bsun():
+|
+| This is the exit point for the 060FPSP when an enabled bsun exception
+| is present. The routine below should point to the operating system handler
+| for enabled bsun exceptions. The exception stack frame is a bsun
+| stack frame.
+|
+| The sample routine below clears the exception status bit, clears the NaN
+| bit in the FPSR, and does an "rte". The instruction that caused the
+| bsun will now be re-executed but with the NaN FPSR bit cleared.
+|
+ .global _060_real_bsun
+_060_real_bsun:
+| fsave -(%sp)
+
+ fmove.l %fpsr,-(%sp)
+ andi.b #0xfe,(%sp)
+ fmove.l (%sp)+,%fpsr
+
+ bral trap | jump to trap handler
+
+|
+| _060_real_fline():
+|
+| This is the exit point for the 060FPSP when an F-Line Illegal exception is
+| encountered. Three different types of exceptions can enter the F-Line exception
+| vector number 11: FP Unimplemented Instructions, FP implemented instructions when
+| the FPU is disabled, and F-Line Illegal instructions. The 060FPSP module
+| _fpsp_fline() distinguishes between the three and acts appropriately. F-Line
+| Illegals branch here.
+|
+ .global _060_real_fline
+_060_real_fline:
+ bral trap | jump to trap handler
+
+|
+| _060_real_fpu_disabled():
+|
+| This is the exit point for the 060FPSP when an FPU disabled exception is
+| encountered. Three different types of exceptions can enter the F-Line exception
+| vector number 11: FP Unimplemented Instructions, FP implemented instructions when
+| the FPU is disabled, and F-Line Illegal instructions. The 060FPSP module
+| _fpsp_fline() distinguishes between the three and acts appropriately. FPU disabled
+| exceptions branch here.
+|
+| The sample code below enables the FPU, sets the PC field in the exception stack
+| frame to the PC of the instruction causing the exception, and does an "rte".
+| The execution of the instruction then proceeds with an enabled floating-point
+| unit.
+|
+ .global _060_real_fpu_disabled
+_060_real_fpu_disabled:
+ move.l %d0,-(%sp) | enabled the fpu
+ .long 0x4E7A0808 |movec pcr,%d0
+ bclr #0x1,%d0
+ .long 0x4E7B0808 |movec %d0,pcr
+ move.l (%sp)+,%d0
+
+ move.l 0xc(%sp),0x2(%sp) | set "Current PC"
+ rte
+
+|
+| _060_real_trap():
+|
+| This is the exit point for the 060FPSP when an emulated "ftrapcc" instruction
+| discovers that the trap condition is true and it should branch to the operating
+| system handler for the trap exception vector number 7.
+|
+| The sample code below simply executes an "rte".
+|
+ .global _060_real_trap
+_060_real_trap:
+ bral trap | jump to trap handler
+
+|############################################################################
+
+|#################################
+| (2) EXAMPLE PACKAGE ENTRY CODE #
+|#################################
+
+ .global _060_fpsp_snan
+_060_fpsp_snan:
+ bra.l _FP_CALL_TOP+0x80+0x00
+
+ .global _060_fpsp_operr
+_060_fpsp_operr:
+ bra.l _FP_CALL_TOP+0x80+0x08
+
+ .global _060_fpsp_ovfl
+_060_fpsp_ovfl:
+ bra.l _FP_CALL_TOP+0x80+0x10
+
+ .global _060_fpsp_unfl
+_060_fpsp_unfl:
+ bra.l _FP_CALL_TOP+0x80+0x18
+
+ .global _060_fpsp_dz
+_060_fpsp_dz:
+ bra.l _FP_CALL_TOP+0x80+0x20
+
+ .global _060_fpsp_inex
+_060_fpsp_inex:
+ bra.l _FP_CALL_TOP+0x80+0x28
+
+ .global _060_fpsp_fline
+_060_fpsp_fline:
+ bra.l _FP_CALL_TOP+0x80+0x30
+
+ .global _060_fpsp_unsupp
+_060_fpsp_unsupp:
+ bra.l _FP_CALL_TOP+0x80+0x38
+
+ .global _060_fpsp_effadd
+_060_fpsp_effadd:
+ bra.l _FP_CALL_TOP+0x80+0x40
+
+|############################################################################
+
+|###############################
+| (3) EXAMPLE CALL-OUT SECTION #
+|###############################
+
+| The size of this section MUST be 128 bytes!!!
+
+_FP_CALL_TOP:
+ .long _060_real_bsun - _FP_CALL_TOP
+ .long _060_real_snan - _FP_CALL_TOP
+ .long _060_real_operr - _FP_CALL_TOP
+ .long _060_real_ovfl - _FP_CALL_TOP
+ .long _060_real_unfl - _FP_CALL_TOP
+ .long _060_real_dz - _FP_CALL_TOP
+ .long _060_real_inex - _FP_CALL_TOP
+ .long _060_real_fline - _FP_CALL_TOP
+ .long _060_real_fpu_disabled - _FP_CALL_TOP
+ .long _060_real_trap - _FP_CALL_TOP
+ .long _060_real_trace - _FP_CALL_TOP
+ .long _060_real_access - _FP_CALL_TOP
+ .long _060_fpsp_done - _FP_CALL_TOP
+
+ .long 0x00000000, 0x00000000, 0x00000000
+
+ .long _060_imem_read - _FP_CALL_TOP
+ .long _060_dmem_read - _FP_CALL_TOP
+ .long _060_dmem_write - _FP_CALL_TOP
+ .long _060_imem_read_word - _FP_CALL_TOP
+ .long _060_imem_read_long - _FP_CALL_TOP
+ .long _060_dmem_read_byte - _FP_CALL_TOP
+ .long _060_dmem_read_word - _FP_CALL_TOP
+ .long _060_dmem_read_long - _FP_CALL_TOP
+ .long _060_dmem_write_byte - _FP_CALL_TOP
+ .long _060_dmem_write_word - _FP_CALL_TOP
+ .long _060_dmem_write_long - _FP_CALL_TOP
+
+ .long 0x00000000
+
+ .long 0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+|############################################################################
+
+| 060 FPSP KERNEL PACKAGE NEEDS TO GO HERE!!!
+
+#include "fpsp.sa"
diff --git a/arch/m68k/ifpsp060/ftest.sa b/arch/m68k/ifpsp060/ftest.sa
new file mode 100644
index 00000000000..b365bc2fdec
--- /dev/null
+++ b/arch/m68k/ifpsp060/ftest.sa
@@ -0,0 +1,371 @@
+ dc.l $60ff0000,$00d40000,$60ff0000,$016c0000
+ dc.l $60ff0000,$01a80000,$54657374,$696e6720
+ dc.l $36383036,$30204650,$53502073,$74617274
+ dc.l $65643a0a,$00546573,$74696e67,$20363830
+ dc.l $36302046,$50535020,$756e696d,$706c656d
+ dc.l $656e7465,$6420696e,$73747275,$6374696f
+ dc.l $6e207374,$61727465,$643a0a00,$54657374
+ dc.l $696e6720,$36383036,$30204650,$53502065
+ dc.l $78636570,$74696f6e,$20656e61,$626c6564
+ dc.l $20737461,$72746564,$3a0a0070,$61737365
+ dc.l $640a0020,$6661696c,$65640a00,$4a80660e
+ dc.l $487affe9,$61ff0000,$1642588f,$4e752f01
+ dc.l $61ff0000,$164c588f,$487affd9,$61ff0000
+ dc.l $162a588f,$4e754e56,$fe8048e7,$3f3cf227
+ dc.l $e0ff487a,$ff3461ff,$00001610,$588f42ae
+ dc.l $fea0487b,$01700000,$058061ff,$000015fc
+ dc.l $588f61ff,$00000588,$61ffffff,$ffa242ae
+ dc.l $fea0487b,$01700000,$126c61ff,$000015dc
+ dc.l $588f61ff,$00001280,$61ffffff,$ff8242ae
+ dc.l $fea0487b,$01700000,$0b6461ff,$000015bc
+ dc.l $61ff0000,$0b7261ff,$ffffff64,$42aefea0
+ dc.l $487b0170,$00000de2,$61ff0000,$159e61ff
+ dc.l $00000df0,$61ffffff,$ff464cdf,$3cfcf21f
+ dc.l $d0ff4e5e,$4e754e56,$fe8048e7,$3f3cf227
+ dc.l $e0ff487a,$feb161ff,$00001570,$588f42ae
+ dc.l $fea0487b,$01700000,$00fe61ff,$0000155c
+ dc.l $588f61ff,$00000110,$61ffffff,$ff024cdf
+ dc.l $3cfcf21f,$d0ff4e5e,$4e754e56,$fe8048e7
+ dc.l $3f3cf227,$e0ff487a,$fea461ff,$0000152c
+ dc.l $588f42ae,$fea0487b,$01700000,$0f1461ff
+ dc.l $00001518,$61ff0000,$0f1a61ff,$fffffec0
+ dc.l $42aefea0,$487b0170,$00000fd2,$61ff0000
+ dc.l $14fa61ff,$00000fd8,$61ffffff,$fea242ae
+ dc.l $fea0487b,$01700000,$0b6061ff,$000014dc
+ dc.l $61ff0000,$0b6a61ff,$fffffe84,$42aefea0
+ dc.l $487b0170,$00000c22,$61ff0000,$14be61ff
+ dc.l $00000c2c,$61ffffff,$fe6642ae,$fea0487b
+ dc.l $01700000,$105661ff,$000014a0,$61ff0000
+ dc.l $105a61ff,$fffffe48,$42aefea0,$487b0170
+ dc.l $00000da2,$61ff0000,$148261ff,$00000da8
+ dc.l $61ffffff,$fe2a4cdf,$3cfcf21f,$d0ff4e5e
+ dc.l $4e750955,$6e696d70,$6c656d65,$6e746564
+ dc.l $20465020,$696e7374,$72756374,$696f6e73
+ dc.l $2e2e2e00,$52aefea0,$4cfb3fff,$01700000
+ dc.l $1390f23b,$d0ff0170,$000013c6,$f23b9c00
+ dc.l $01700000,$141c3d7c,$0000fea6,$48ee7fff
+ dc.l $ff80f22e,$f0ffff20,$f22ebc00,$feb42d7c
+ dc.l $40000000,$fe802d7c,$c90fdaa2,$fe842d7c
+ dc.l $2168c235,$fe8844fc,$0000f22e,$480efe80
+ dc.l $42eefea4,$48ee7fff,$ffc0f22e,$f0fffec0
+ dc.l $f22ebc00,$fea82d7c,$bfbf0000,$ff202d7c
+ dc.l $80000000,$ff242d7c,$00000000,$ff282d7c
+ dc.l $08000208,$feb841fa,$ffc22d48,$febc61ff
+ dc.l $00001288,$4a0066ff,$000012ae,$61ff0000
+ dc.l $12b04a00,$66ff0000,$12a052ae,$fea04cfb
+ dc.l $3fff0170,$000012da,$f23bd0ff,$01700000
+ dc.l $1310f23b,$9c000170,$00001366,$3d7c0000
+ dc.l $fea648ee,$7fffff80,$f22ef0ff,$ff20f22e
+ dc.l $bc00feb4,$2d7c3ffe,$0000fe80,$2d7cc90f
+ dc.l $daa2fe84,$2d7c2168,$c235fe88,$44fc0000
+ dc.l $f22e480f,$fe8042ee,$fea448ee,$7fffffc0
+ dc.l $f22ef0ff,$fec0f22e,$bc00fea8,$2d7c3fff
+ dc.l $0000ff20,$2d7c8000,$0000ff24,$2d7c0000
+ dc.l $0000ff28,$2d7c0000,$0208feb8,$41faffc2
+ dc.l $2d48febc,$61ff0000,$11d24a00,$66ff0000
+ dc.l $11f861ff,$000011fa,$4a0066ff,$000011ea
+ dc.l $52aefea0,$4cfb3fff,$01700000,$1224f23b
+ dc.l $d0ff0170,$0000125a,$f23b9c00,$01700000
+ dc.l $12b03d7c,$0000fea6,$48ee7fff,$ff80f22e
+ dc.l $f0ffff20,$f22ebc00,$feb444fc,$0000f200
+ dc.l $5c3142ee,$fea448ee,$7fffffc0,$f22ef0ff
+ dc.l $fec0f22e,$bc00fea8,$2d7c4000,$0000ff20
+ dc.l $2d7c935d,$8dddff24,$2d7caaa8,$ac17ff28
+ dc.l $2d7c0000,$0208feb8,$41faffc4,$2d48febc
+ dc.l $61ff0000,$11364a00,$66ff0000,$115c61ff
+ dc.l $0000115e,$4a0066ff,$0000114e,$52aefea0
+ dc.l $4cfb3fff,$01700000,$1188f23b,$d0ff0170
+ dc.l $000011be,$f23b9c00,$01700000,$1214f23c
+ dc.l $88000f00,$00007e00,$3d7c0000,$fea648ee
+ dc.l $7fffff80,$f22ef0ff,$ff20f22e,$bc00feb4
+ dc.l $44fc0000,$f2470012,$42eefea4,$48ee7fff
+ dc.l $ffc0f22e,$f0fffec0,$f22ebc00,$fea82d7c
+ dc.l $0f008080,$feb841fa,$ffdc2d48,$febc61ff
+ dc.l $000010a8,$4a0066ff,$000010ce,$61ff0000
+ dc.l $10d04a00,$66ff0000,$10c052ae,$fea04cfb
+ dc.l $3fff0170,$000010fa,$f23bd0ff,$01700000
+ dc.l $1130f23b,$9c000170,$00001186,$f23c8800
+ dc.l $0f000000,$7e023d7c,$0000fea6,$48ee7fff
+ dc.l $ff80f22e,$f0ffff20,$f22ebc00,$feb444fc
+ dc.l $0000f24f,$0012fffc,$42eefea4,$48ee7fff
+ dc.l $ffc0f22e,$f0fffec0,$f22ebc00,$fea83d7c
+ dc.l $ffffff9e,$2d7c0f00,$8080feb8,$41faffd4
+ dc.l $2d48febc,$61ff0000,$10124a00,$66ff0000
+ dc.l $103861ff,$0000103a,$4a0066ff,$0000102a
+ dc.l $52aefea0,$4cfb3fff,$01700000,$1064f23b
+ dc.l $d0ff0170,$0000109a,$f23b9c00,$01700000
+ dc.l $10f0f23c,$88000f00,$00003d7c,$0000fea6
+ dc.l $48ee7fff,$ff80f22e,$f0ffff20,$f22ebc00
+ dc.l $feb444fc,$0000f27b,$0012abcd,$ef0142ee
+ dc.l $fea448ee,$7fffffc0,$f22ef0ff,$fec0f22e
+ dc.l $bc00fea8,$2d7c0f00,$8080feb8,$41faffd8
+ dc.l $2d48febc,$61ff0000,$0f824a00,$66ff0000
+ dc.l $0fa861ff,$00000faa,$4a0066ff,$00000f9a
+ dc.l $42804e75,$09556e69,$6d706c65,$6d656e74
+ dc.l $6564203c,$65613e2e,$2e2e0000,$52aefea0
+ dc.l $4cfb3fff,$01700000,$0fb8f23b,$d0ff0170
+ dc.l $00000fee,$f23b9c00,$01700000,$10443d7c
+ dc.l $0000fea6,$48ee7fff,$ff80f22e,$f0ffff20
+ dc.l $f22ebc00,$feb4f23c,$58000002,$44fc0000
+ dc.l $f23c4823,$c0000000,$80000000,$00000000
+ dc.l $42eefea4,$48ee7fff,$ffc0f22e,$f0fffec0
+ dc.l $f22ebc00,$fea82d7c,$c0010000,$ff202d7c
+ dc.l $80000000,$ff242d7c,$00000000,$ff282d7c
+ dc.l $08000000,$feb841fa,$ffb82d48,$febc61ff
+ dc.l $00000eb8,$4a0066ff,$00000ede,$61ff0000
+ dc.l $0ee04a00,$66ff0000,$0ed052ae,$fea04cfb
+ dc.l $3fff0170,$00000f0a,$f23bd0ff,$01700000
+ dc.l $0f40f23b,$9c000170,$00000f96,$3d7c0000
+ dc.l $fea648ee,$7fffff80,$f22ef0ff,$ff20f22e
+ dc.l $bc00feb4,$44fc0000,$f23c4c18,$c1230001
+ dc.l $23456789,$12345678,$42eefea4,$48ee7fff
+ dc.l $ffc0f22e,$f0fffec0,$f22ebc00,$fea82d7c
+ dc.l $3e660000,$ff202d7c,$d0ed23e8,$ff242d7c
+ dc.l $d14035bc,$ff282d7c,$00000108,$feb841fa
+ dc.l $ffb82d48,$febc61ff,$00000e10,$4a0066ff
+ dc.l $00000e36,$61ff0000,$0e384a00,$66ff0000
+ dc.l $0e2852ae,$fea04cfb,$3fff0170,$00000e62
+ dc.l $f23bd0ff,$01700000,$0e98f23b,$9c000170
+ dc.l $00000eee,$3d7c0000,$fea644fc,$000048ee
+ dc.l $7fffff80,$f22ef0ff,$ff20f22e,$bc00feb4
+ dc.l $f23c9800,$ffffffff,$ffffffff,$42eefea4
+ dc.l $48ee7fff,$ffc0f22e,$f0fffec0,$f22ebc00
+ dc.l $fea82d7c,$0000fff0,$feb42d7c,$0ffffff8
+ dc.l $feb861ff,$00000d84,$4a0066ff,$00000daa
+ dc.l $61ff0000,$0dac4a00,$66ff0000,$0d9c52ae
+ dc.l $fea04cfb,$3fff0170,$00000dd6,$f23bd0ff
+ dc.l $01700000,$0e0cf23b,$9c000170,$00000e62
+ dc.l $3d7c0000,$fea644fc,$000048ee,$7fffff80
+ dc.l $f22ef0ff,$ff20f22e,$bc00feb4,$f23c9400
+ dc.l $ffffffff,$ffffffff,$42eefea4,$48ee7fff
+ dc.l $ffc0f22e,$f0fffec0,$f22ebc00,$fea82d7c
+ dc.l $0000fff0,$feb42d7c,$ffffffff,$febc61ff
+ dc.l $00000cf8,$4a0066ff,$00000d1e,$61ff0000
+ dc.l $0d204a00,$66ff0000,$0d1052ae,$fea04cfb
+ dc.l $3fff0170,$00000d4a,$f23bd0ff,$01700000
+ dc.l $0d80f23b,$9c000170,$00000dd6,$3d7c0000
+ dc.l $fea644fc,$000048ee,$7fffff80,$f22ef0ff
+ dc.l $ff20f22e,$bc00feb4,$f23c8c00,$ffffffff
+ dc.l $ffffffff,$42eefea4,$48ee7fff,$ffc0f22e
+ dc.l $f0fffec0,$f22ebc00,$fea82d7c,$0ffffff8
+ dc.l $feb82d7c,$ffffffff,$febc61ff,$00000c6c
+ dc.l $4a0066ff,$00000c92,$61ff0000,$0c944a00
+ dc.l $66ff0000,$0c8452ae,$fea04cfb,$3fff0170
+ dc.l $00000cbe,$f23bd0ff,$01700000,$0cf4f23b
+ dc.l $9c000170,$00000d4a,$3d7c0000,$fea644fc
+ dc.l $000048ee,$7fffff80,$f22ef0ff,$ff20f22e
+ dc.l $bc00feb4,$f23c9c00,$ffffffff,$ffffffff
+ dc.l $ffffffff,$42eefea4,$48ee7fff,$ffc0f22e
+ dc.l $f0fffec0,$f22ebc00,$fea82d7c,$0000fff0
+ dc.l $feb42d7c,$0ffffff8,$feb82d7c,$ffffffff
+ dc.l $febc61ff,$00000bd4,$4a0066ff,$00000bfa
+ dc.l $61ff0000,$0bfc4a00,$66ff0000,$0bec52ae
+ dc.l $fea04cfb,$3fff0170,$00000c26,$f23bd0ff
+ dc.l $01700000,$0c5cf23b,$9c000170,$00000cb2
+ dc.l $f23c5800,$0001f23c,$58800002,$f23c5900
+ dc.l $0003f23c,$59800004,$f23c5a00,$0005f23c
+ dc.l $5a800006,$f23c5b00,$0007f23c,$5b800008
+ dc.l $f23c8400,$00000000,$70aa3d7c,$0000fea6
+ dc.l $48eeffff,$ff80f22e,$bc00feb4,$f22ef0ff
+ dc.l $ff2044fc,$0000f227,$e80042ee,$fea4f22e
+ dc.l $bc00fea8,$f23c4480,$7f800000,$f23c4580
+ dc.l $7f800000,$f23c4680,$7f800000,$f23c4780
+ dc.l $7f800000,$f21f4880,$f21f4980,$f21f4a80
+ dc.l $f21f4b80,$48eeffff,$ffc0f22e,$f0fffec0
+ dc.l $61ff0000,$0af64a00,$66ff0000,$0b1c61ff
+ dc.l $00000b1e,$4a0066ff,$00000b0e,$52aefea0
+ dc.l $4cfb3fff,$01700000,$0b48f23b,$d0ff0170
+ dc.l $00000b7e,$f23b9c00,$01700000,$0bd4f23c
+ dc.l $58000001,$f23c5880,$0002f23c,$59000003
+ dc.l $f23c5980,$0004f23c,$5a000005,$f23c5a80
+ dc.l $0006f23c,$5b000007,$f23c5b80,$0008f227
+ dc.l $6b00f227,$6a00f227,$6900f227,$6800f22e
+ dc.l $f0ffff20,$f23c4700,$7f800000,$f23c4600
+ dc.l $7f800000,$f23c4500,$7f800000,$f23c4400
+ dc.l $7f800000,$f23c8400,$00000000,$f23c8800
+ dc.l $00000000,$70aa3d7c,$0000fea6,$48eeffff
+ dc.l $ff80f22e,$bc00feb4,$44fc0000,$f21fd800
+ dc.l $42eefea4,$f22ebc00,$fea848ee,$ffffffc0
+ dc.l $f22ef0ff,$fec061ff,$00000a10,$4a0066ff
+ dc.l $00000a36,$61ff0000,$0a384a00,$66ff0000
+ dc.l $0a2852ae,$fea04cfb,$3fff0170,$00000a62
+ dc.l $f23bd0ff,$01700000,$0a98f23b,$9c000170
+ dc.l $00000aee,$f23c5800,$0001f23c,$58800002
+ dc.l $f23c5900,$0003f23c,$59800004,$f23c5a00
+ dc.l $0005f23c,$5a800006,$f23c5b00,$0007f23c
+ dc.l $5b800008,$f23c8400,$00000000,$203cffff
+ dc.l $ff003d7c,$0000fea6,$48eeffff,$ff80f22e
+ dc.l $bc00feb4,$f22ef0ff,$ff2044fc,$0000f227
+ dc.l $e80042ee,$fea4f22e,$bc00fea8,$48eeffff
+ dc.l $ffc0f22e,$f0fffec0,$61ff0000,$095e4a00
+ dc.l $66ff0000,$098461ff,$00000986,$4a0066ff
+ dc.l $00000976,$42804e75,$094e6f6e,$2d6d6173
+ dc.l $6b61626c,$65206f76,$6572666c,$6f772e2e
+ dc.l $2e0051fc,$52aefea0,$4cfb3fff,$01700000
+ dc.l $0990f23b,$d0ff0170,$000009c6,$f23b9c00
+ dc.l $01700000,$0a1c3d7c,$0000fea6,$48ee7fff
+ dc.l $ff80f22e,$f0ffff20,$f22ebc00,$feb4f23c
+ dc.l $58000002,$2d7c7ffe,$0000fe80,$2d7c8000
+ dc.l $0000fe84,$2d7c0000,$0000fe88,$44fc0000
+ dc.l $f22e4823,$fe8042ee,$fea448ee,$7fffffc0
+ dc.l $f22ef0ff,$fec0f22e,$bc00fea8,$2d7c7fff
+ dc.l $0000ff20,$2d7c0000,$0000ff24,$2d7c0000
+ dc.l $0000ff28,$2d7c0200,$1048feb8,$41faffc2
+ dc.l $2d48febc,$61ff0000,$08824a00,$66ff0000
+ dc.l $08a861ff,$000008aa,$4a0066ff,$0000089a
+ dc.l $42804e75,$09456e61,$626c6564,$206f7665
+ dc.l $72666c6f,$772e2e2e,$000051fc,$52aefea0
+ dc.l $4cfb3fff,$01700000,$08b8f23b,$d0ff0170
+ dc.l $000008ee,$f23b9c00,$01700000,$09443d7c
+ dc.l $0000fea6,$48ee7fff,$ff80f22e,$f0ffff20
+ dc.l $f23c9000,$00001000,$f22ebc00,$feb4f23c
+ dc.l $58000002,$2d7c7ffe,$0000fe80,$2d7c8000
+ dc.l $0000fe84,$2d7c0000,$0000fe88,$44fc0000
+ dc.l $f22e4823,$fe8042ee,$fea448ee,$7fffffc0
+ dc.l $f22ef0ff,$fec0f22e,$bc00fea8,$2d7c7fff
+ dc.l $0000ff20,$2d7c0000,$0000ff24,$2d7c0000
+ dc.l $0000ff28,$2d7c0200,$1048feb8,$41faffc2
+ dc.l $2d48febc,$61ff0000,$07a24a00,$66ff0000
+ dc.l $07c861ff,$000007ca,$4a0066ff,$000007ba
+ dc.l $42804e75,$09456e61,$626c6564,$20756e64
+ dc.l $6572666c,$6f772e2e,$2e0051fc,$52aefea0
+ dc.l $4cfb3fff,$01700000,$07d8f23b,$d0ff0170
+ dc.l $0000080e,$f23b9c00,$01700000,$08643d7c
+ dc.l $0000fea6,$48ee7fff,$ff80f22e,$f0ffff20
+ dc.l $f23c9000,$00000800,$f22ebc00,$feb42d7c
+ dc.l $00000000,$fe802d7c,$80000000,$fe842d7c
+ dc.l $00000000,$fe88f22e,$d080fe80,$44fc0000
+ dc.l $f23c5820,$000242ee,$fea448ee,$7fffffc0
+ dc.l $f22ef0ff,$fec0f22e,$bc00fea8,$2d7c0000
+ dc.l $0000ff20,$2d7c4000,$0000ff24,$2d7c0000
+ dc.l $0000ff28,$2d7c0000,$0800feb8,$41faffc2
+ dc.l $2d48febc,$61ff0000,$06c24a00,$66ff0000
+ dc.l $06e861ff,$000006ea,$4a0066ff,$000006da
+ dc.l $42804e75,$094e6f6e,$2d6d6173,$6b61626c
+ dc.l $6520756e,$64657266,$6c6f772e,$2e2e0000
+ dc.l $52aefea0,$4cfb3fff,$01700000,$06f4f23b
+ dc.l $d0ff0170,$0000072a,$f23b9c00,$01700000
+ dc.l $07803d7c,$0000fea6,$48ee7fff,$ff80f22e
+ dc.l $f0ffff20,$f22ebc00,$feb42d7c,$00000000
+ dc.l $fe802d7c,$80000000,$fe842d7c,$00000000
+ dc.l $fe88f22e,$d080fe80,$44fc0000,$f23c5820
+ dc.l $000242ee,$fea448ee,$7fffffc0,$f22ef0ff
+ dc.l $fec0f22e,$bc00fea8,$2d7c0000,$0000ff20
+ dc.l $2d7c4000,$0000ff24,$2d7c0000,$0000ff28
+ dc.l $2d7c0000,$0800feb8,$41faffc2,$2d48febc
+ dc.l $61ff0000,$05e64a00,$66ff0000,$060c61ff
+ dc.l $0000060e,$4a0066ff,$000005fe,$42804e75
+ dc.l $09456e61,$626c6564,$20696e65,$78616374
+ dc.l $2e2e2e00,$52aefea0,$4cfb3fff,$01700000
+ dc.l $0620f23b,$d0ff0170,$00000656,$f23b9c00
+ dc.l $01700000,$06ac3d7c,$0000fea6,$48ee7fff
+ dc.l $ff80f22e,$f0ffff20,$f23c9000,$00000200
+ dc.l $f22ebc00,$feb42d7c,$50000000,$fe802d7c
+ dc.l $80000000,$fe842d7c,$00000000,$fe88f22e
+ dc.l $d080fe80,$44fc0000,$f23c5822,$000242ee
+ dc.l $fea448ee,$7fffffc0,$f22ef0ff,$fec0f22e
+ dc.l $bc00fea8,$2d7c5000,$0000ff20,$2d7c8000
+ dc.l $0000ff24,$2d7c0000,$0000ff28,$2d7c0000
+ dc.l $0208feb8,$41faffc2,$2d48febc,$61ff0000
+ dc.l $050a4a00,$66ff0000,$053061ff,$00000532
+ dc.l $4a0066ff,$00000522,$42804e75,$09456e61
+ dc.l $626c6564,$20534e41,$4e2e2e2e,$000051fc
+ dc.l $52aefea0,$4cfb3fff,$01700000,$0544f23b
+ dc.l $d0ff0170,$0000057a,$f23b9c00,$01700000
+ dc.l $05d03d7c,$0000fea6,$48ee7fff,$ff80f22e
+ dc.l $f0ffff20,$f23c9000,$00004000,$f22ebc00
+ dc.l $feb42d7c,$ffff0000,$fe802d7c,$00000000
+ dc.l $fe842d7c,$00000001,$fe88f22e,$d080fe80
+ dc.l $44fc0000,$f23c5822,$000242ee,$fea448ee
+ dc.l $7fffffc0,$f22ef0ff,$fec0f22e,$bc00fea8
+ dc.l $2d7cffff,$0000ff20,$2d7c0000,$0000ff24
+ dc.l $2d7c0000,$0001ff28,$2d7c0900,$4080feb8
+ dc.l $41faffc2,$2d48febc,$61ff0000,$042e4a00
+ dc.l $66ff0000,$045461ff,$00000456,$4a0066ff
+ dc.l $00000446,$42804e75,$09456e61,$626c6564
+ dc.l $204f5045,$52522e2e,$2e0051fc,$52aefea0
+ dc.l $4cfb3fff,$01700000,$0468f23b,$d0ff0170
+ dc.l $0000049e,$f23b9c00,$01700000,$04f43d7c
+ dc.l $0000fea6,$48ee7fff,$ff80f22e,$f0ffff20
+ dc.l $f23c9000,$00002000,$f22ebc00,$feb42d7c
+ dc.l $ffff0000,$fe802d7c,$00000000,$fe842d7c
+ dc.l $00000000,$fe88f22e,$d080fe80,$44fc0000
+ dc.l $f23c4422,$7f800000,$42eefea4,$48ee7fff
+ dc.l $ffc0f22e,$f0fffec0,$f22ebc00,$fea82d7c
+ dc.l $ffff0000,$ff202d7c,$00000000,$ff242d7c
+ dc.l $00000000,$ff282d7c,$01002080,$feb841fa
+ dc.l $ffc02d48,$febc61ff,$00000350,$4a0066ff
+ dc.l $00000376,$61ff0000,$03784a00,$66ff0000
+ dc.l $03684280,$4e750945,$6e61626c,$65642044
+ dc.l $5a2e2e2e,$000051fc,$52aefea0,$4cfb3fff
+ dc.l $01700000,$038cf23b,$d0ff0170,$000003c2
+ dc.l $f23b9c00,$01700000,$04183d7c,$0000fea6
+ dc.l $48ee7fff,$ff80f22e,$f0ffff20,$f23c9000
+ dc.l $00000400,$f22ebc00,$feb42d7c,$40000000
+ dc.l $fe802d7c,$80000000,$fe842d7c,$00000000
+ dc.l $fe88f22e,$d080fe80,$44fc0000,$f23c5820
+ dc.l $000042ee,$fea448ee,$7fffffc0,$f22ef0ff
+ dc.l $fec0f22e,$bc00fea8,$2d7c4000,$0000ff20
+ dc.l $2d7c8000,$0000ff24,$2d7c0000,$0000ff28
+ dc.l $2d7c0200,$0410feb8,$41faffc2,$2d48febc
+ dc.l $61ff0000,$02764a00,$66ff0000,$029c61ff
+ dc.l $0000029e,$4a0066ff,$0000028e,$42804e75
+ dc.l $09556e69,$6d706c65,$6d656e74,$65642064
+ dc.l $61746120,$74797065,$2f666f72,$6d61742e
+ dc.l $2e2e0000,$52aefea0,$4cfb3fff,$01700000
+ dc.l $02a0f23b,$d0ff0170,$000002d6,$f23b9c00
+ dc.l $01700000,$032c3d7c,$0000fea6,$48ee7fff
+ dc.l $ff80f22e,$f0ffff20,$f22ebc00,$feb42d7c
+ dc.l $c03f0000,$fe802d7c,$00000000,$fe842d7c
+ dc.l $00000001,$fe88f23c,$58000002,$44fc0000
+ dc.l $f22e4823,$fe8042ee,$fea448ee,$7fffffc0
+ dc.l $f22ef0ff,$fec0f22e,$bc00fea8,$2d7cc001
+ dc.l $0000ff20,$2d7c8000,$0000ff24,$2d7c0000
+ dc.l $0000ff28,$2d7c0800,$0000feb8,$41faffc2
+ dc.l $2d48febc,$61ff0000,$01924a00,$66ff0000
+ dc.l $01b861ff,$000001ba,$4a0066ff,$000001aa
+ dc.l $52aefea0,$4cfb3fff,$01700000,$01e4f23b
+ dc.l $d0ff0170,$0000021a,$f23b9c00,$01700000
+ dc.l $02703d7c,$0000fea6,$48ee7fff,$ff80f22e
+ dc.l $f0ffff20,$f22ebc00,$feb42d7c,$80000000
+ dc.l $fe802d7c,$01000000,$fe842d7c,$00000000
+ dc.l $fe88f23c,$40007fff,$ffff44fc,$0000f22e
+ dc.l $4823fe80,$42eefea4,$48ee7fff,$ffc0f22e
+ dc.l $f0fffec0,$f22ebc00,$fea82d7c,$80170000
+ dc.l $ff202d7c,$fffffffe,$ff242d7c,$00000000
+ dc.l $ff282d7c,$08000000,$feb841fa,$ffc22d48
+ dc.l $febc61ff,$000000d4,$4a0066ff,$000000fa
+ dc.l $61ff0000,$00fc4a00,$66ff0000,$00ec52ae
+ dc.l $fea04cfb,$3fff0170,$00000126,$f23bd0ff
+ dc.l $01700000,$015cf23b,$9c000170,$000001b2
+ dc.l $3d7c0000,$fea648ee,$7fffff80,$f22ef0ff
+ dc.l $ff20f22e,$bc00feb4,$2d7cc123,$0001fe80
+ dc.l $2d7c2345,$6789fe84,$2d7c1234,$5678fe88
+ dc.l $44fc0000,$f22e4c18,$fe8042ee,$fea448ee
+ dc.l $7fffffc0,$f22ef0ff,$fec0f22e,$bc00fea8
+ dc.l $2d7c3e66,$0000ff20,$2d7cd0ed,$23e8ff24
+ dc.l $2d7cd140,$35bcff28,$2d7c0000,$0108feb8
+ dc.l $41faffc2,$2d48febc,$61ff0000,$001e4a00
+ dc.l $66ff0000,$004461ff,$00000046,$4a0066ff
+ dc.l $00000036,$42804e75,$41eeff80,$43eeffc0
+ dc.l $700eb189,$66ff0000,$001c51c8,$fff6302e
+ dc.l $fea6322e,$fea4b041,$66ff0000,$00084280
+ dc.l $4e757001,$4e75222e,$fea07001,$4e7541ee
+ dc.l $ff2043ee,$fec07017,$b18966ff,$0000002c
+ dc.l $51c8fff6,$41eefeb4,$43eefea8,$b18966ff
+ dc.l $00000018,$b18966ff,$00000010,$b18966ff
+ dc.l $00000008,$42804e75,$70014e75,$acacacac
+ dc.l $acacacac,$acacacac,$acacacac,$acacacac
+ dc.l $acacacac,$acacacac,$acacacac,$acacacac
+ dc.l $acacacac,$acacacac,$acacacac,$acacacac
+ dc.l $acacacac,$acacacac,$acacacac,$7fff0000
+ dc.l $ffffffff,$ffffffff,$7fff0000,$ffffffff
+ dc.l $ffffffff,$7fff0000,$ffffffff,$ffffffff
+ dc.l $7fff0000,$ffffffff,$ffffffff,$7fff0000
+ dc.l $ffffffff,$ffffffff,$7fff0000,$ffffffff
+ dc.l $ffffffff,$7fff0000,$ffffffff,$ffffffff
+ dc.l $7fff0000,$ffffffff,$ffffffff,$00000000
+ dc.l $00000000,$00000000,$2f00203a,$e884487b
+ dc.l $0930ffff,$e880202f,$00044e74,$00042f00
+ dc.l $203ae872,$487b0930,$ffffe86a,$202f0004
+ dc.l $4e740004,$00000000,$00000000,$00000000
diff --git a/arch/m68k/ifpsp060/ilsp.doc b/arch/m68k/ifpsp060/ilsp.doc
new file mode 100644
index 00000000000..f6fae6d900a
--- /dev/null
+++ b/arch/m68k/ifpsp060/ilsp.doc
@@ -0,0 +1,150 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+68060 INTEGER SOFTWARE PACKAGE (Library version)
+-------------------------------------------------
+
+The file ilsp.s contains the "Library version" of the
+68060 Integer Software Package. Routines included in this
+module can be used to emulate 64-bit divide and multiply,
+and the "cmp2" instruction. These instructions are not
+implemented in hardware on the 68060 and normally take
+exception vector #61 "Unimplemented Integer Instruction".
+
+By re-compiling a program that uses these instructions, and
+making subroutine calls in place of the unimplemented
+instructions, a program can avoid the overhead associated with
+taking the exception.
+
+Release file format:
+--------------------
+The file ilsp.sa is essentially a hexadecimal image of the
+release package. This is the ONLY format which will be supported.
+The hex image was created by assembling the source code and
+then converting the resulting binary output image into an
+ASCII text file. The hexadecimal numbers are listed
+using the Motorola Assembly Syntax assembler directive "dc.l"
+(define constant longword). The file can be converted to other
+assembly syntaxes by using any word processor with a global
+search and replace function.
+
+To assist in assembling and linking this module with other modules,
+the installer should add a symbolic label to the top of the file.
+This will allow calling routines to access the entry points
+of this package.
+
+The source code ilsp.s has also been included but only for
+documentation purposes.
+
+Release file structure:
+-----------------------
+The file ilsp.sa contains an "Entry-Point" section and a
+code section. The ILSP has no "Call-Out" section. The first section
+is the "Entry-Point" section. In order to access a function in the
+package, a program must "bsr" or "jsr" to the location listed
+below in "68060ILSP Entry Points" that corresponds to the desired
+function. A branch instruction located at the selected entry point
+within the package will then enter the correct emulation code routine.
+
+The entry point addresses at the beginning of the package will remain
+fixed so that a program calling the routines will not have to be
+re-compiled with every new 68060ILSP release.
+
+For example, to use a 64-bit multiply instruction,
+do a "bsr" or "jsr" to the entry point defined by
+the 060ILSP entry table. A compiler generated code sequence
+for unsigned multiply could look like:
+
+# mulu.l <ea>,Dh:Dl
+# mulu.l _multiplier,%d1:%d0
+
+ subq.l &0x8,%sp # make room for result on stack
+ pea (%sp) # pass: result addr on stack
+ mov.l %d0,-(%sp) # pass: multiplicand on stack
+ mov.l _multiplier,-(%sp) # pass: multiplier on stack
+ bsr.l _060LISP_TOP+0x18 # branch to multiply routine
+ add.l &0xc,%sp # clear arguments from stack
+ mov.l (%sp)+,%d1 # load result[63:32]
+ mov.l (%sp)+,%d0 # load result[31:0]
+
+For a divide:
+
+# divu.l <ea>,Dr:Dq
+# divu.l _divisor,%d1:%d0
+
+ subq.l &0x8,%sp # make room for result on stack
+ pea (%sp) # pass: result addr on stack
+ mov.l %d0,-(%sp) # pass: dividend hi on stack
+ mov.l %d1,-(%sp) # pass: dividend hi on stack
+ mov.l _divisor,-(%sp) # pass: divisor on stack
+ bsr.l _060LISP_TOP+0x08 # branch to divide routine
+ add.l &0xc,%sp # clear arguments from stack
+ mov.l (%sp)+,%d1 # load remainder
+ mov.l (%sp)+,%d0 # load quotient
+
+The library routines also return the correct condition code
+register value. If this is important, then the caller of the library
+routine must make sure that the value isn't lost while popping
+other items off of the stack.
+
+An example of using the "cmp2" instruction is as follows:
+
+# cmp2.l <ea>,Rn
+# cmp2.l _bounds,%d0
+
+ pea _bounds # pass ptr to bounds
+ mov.l %d0,-(%sp) # pass Rn
+ bsr.l _060LSP_TOP_+0x48 # branch to "cmp2" routine
+ mov.w %cc,_tmp # save off condition codes
+ addq.l &0x8,%sp # clear arguments from stack
+
+Exception reporting:
+--------------------
+If the instruction being emulated is a divide and the source
+operand is a zero, then the library routine, as its last
+instruction, executes an implemented divide using a zero
+source operand so that an "Integer Divide-by-Zero" exception
+will be taken. Although the exception stack frame will not
+point to the correct instruction, the user will at least be able
+to record that such an event occurred if desired.
+
+68060ILSP entry points:
+-----------------------
+_060ILSP_TOP:
+0x000: _060LSP__idivs64_
+0x008: _060LSP__idivu64_
+
+0x010: _060LSP__imuls64_
+0x018: _060LSP__imulu64_
+
+0x020: _060LSP__cmp2_Ab_
+0x028: _060LSP__cmp2_Aw_
+0x030: _060LSP__cmp2_Al_
+0x038: _060LSP__cmp2_Db_
+0x040: _060LSP__cmp2_Dw_
+0x048: _060LSP__cmp2_Dl_
diff --git a/arch/m68k/ifpsp060/ilsp.sa b/arch/m68k/ifpsp060/ilsp.sa
new file mode 100644
index 00000000000..2757d502b01
--- /dev/null
+++ b/arch/m68k/ifpsp060/ilsp.sa
@@ -0,0 +1,101 @@
+ dc.l $60ff0000,$01fe0000,$60ff0000,$02080000
+ dc.l $60ff0000,$04900000,$60ff0000,$04080000
+ dc.l $60ff0000,$051e0000,$60ff0000,$053c0000
+ dc.l $60ff0000,$055a0000,$60ff0000,$05740000
+ dc.l $60ff0000,$05940000,$60ff0000,$05b40000
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $4e56fff0,$48e73f00,$42eefff0,$50eeffff
+ dc.l $60104e56,$fff048e7,$3f0042ee,$fff051ee
+ dc.l $ffff2e2e,$00086700,$00ae2a2e,$000c2c2e
+ dc.l $00104a2e,$ffff671a,$4a875dee,$fffe6a02
+ dc.l $44874a85,$5deefffd,$6a0844fc,$00004086
+ dc.l $40854a85,$66164a86,$67000046,$be866306
+ dc.l $cb466000,$00124c47,$6005600a,$be85634c
+ dc.l $61ff0000,$00864a2e,$ffff6724,$4a2efffd
+ dc.l $67024485,$102efffe,$b12efffd,$670c0c86
+ dc.l $80000000,$62264486,$60060806,$001f661c
+ dc.l $026e0010,$fff044ee,$fff04a86,$48f60060
+ dc.l $01610014,$4cdf00fc,$4e5e4e75,$2a2e000c
+ dc.l $2c2e0010,$026e001c,$fff0006e,$0002fff0
+ dc.l $44eefff0,$60d62dae,$000c0161,$00142dae
+ dc.l $00100162,$00140004,$44eefff0,$4cdf00fc
+ dc.l $4e5e80fc,$00004e75,$0c870000,$ffff621e
+ dc.l $42814845,$48463a06,$8ac73205,$48463a06
+ dc.l $8ac74841,$32054245,$48452c01,$4e7542ae
+ dc.l $fff8422e,$fffc4281,$0807001f,$660e52ae
+ dc.l $fff8e38f,$e38ee395,$6000ffee,$26072405
+ dc.l $48424843,$b4436606,$323cffff,$600a2205
+ dc.l $82c30281,$0000ffff,$2f064246,$48462607
+ dc.l $2401c4c7,$4843c6c1,$28059883,$48443004
+ dc.l $38064a40,$6600000a,$b4846304,$538160de
+ dc.l $2f052c01,$48462a07,$61ff0000,$006a2405
+ dc.l $26062a1f,$2c1f9c83,$9b8264ff,$0000001a
+ dc.l $53814282,$26074843,$4243dc83,$db822607
+ dc.l $42434843,$da834a2e,$fffc6616,$3d41fff4
+ dc.l $42814845,$48463a06,$424650ee,$fffc6000
+ dc.l $ff6c3d41,$fff63c05,$48464845,$2e2efff8
+ dc.l $670a5387,$e28de296,$51cffffa,$2a062c2e
+ dc.l $fff44e75,$24062606,$28054843,$4844ccc5
+ dc.l $cac3c4c4,$c6c44284,$4846dc45,$d744dc42
+ dc.l $d7444846,$42454242,$48454842,$da82da83
+ dc.l $4e754e56,$fffc48e7,$380042ee,$fffc202e
+ dc.l $00086700,$005a222e,$000c6700,$00522400
+ dc.l $26002801,$48434844,$c0c1c2c3,$c4c4c6c4
+ dc.l $42844840,$d041d784,$d042d784,$48404241
+ dc.l $42424841,$4842d282,$d283382e,$fffc0204
+ dc.l $00104a81,$6a040004,$000844c4,$c34048f6
+ dc.l $00030161,$00104cdf,$001c4e5e,$4e754280
+ dc.l $4281382e,$fffc0204,$00100004,$000444c4
+ dc.l $60da4e56,$fffc48e7,$3c0042ee,$fffc202e
+ dc.l $000867da,$222e000c,$67d44205,$4a806c06
+ dc.l $44800005,$00014a81,$6c064481,$0a050001
+ dc.l $24002600,$28014843,$4844c0c1,$c2c3c4c4
+ dc.l $c6c44284,$4840d041,$d784d042,$d7844840
+ dc.l $42414242,$48414842,$d282d283,$4a056708
+ dc.l $46804681,$5280d384,$382efffc,$02040010
+ dc.l $4a816a04,$00040008,$44c4c340,$48f60003
+ dc.l $01610010,$4cdf003c,$4e5e4e75,$42804281
+ dc.l $382efffc,$02040010,$00040004,$44c460da
+ dc.l $4e56fffc,$48e73800,$42eefffc,$242e0008
+ dc.l $10360161,$000c1236,$0162000c,$000149c0
+ dc.l $49c16000,$00b84e56,$fffc48e7,$380042ee
+ dc.l $fffc242e,$00083036,$0161000c,$32360162
+ dc.l $000c0002,$48c048c1,$60000092,$4e56fffc
+ dc.l $48e73800,$42eefffc,$242e0008,$20360161
+ dc.l $000c2236,$0162000c,$00046000,$00704e56
+ dc.l $fffc48e7,$380042ee,$fffc242e,$00081036
+ dc.l $0161000c,$12360162,$000c0001,$49c049c1
+ dc.l $49c26000,$00484e56,$fffc48e7,$380042ee
+ dc.l $fffc242e,$00083036,$0161000c,$32360162
+ dc.l $000c0002,$48c048c1,$48c26000,$00204e56
+ dc.l $fffc48e7,$380042ee,$fffc242e,$00082036
+ dc.l $0161000c,$22360162,$000c0004,$948042c3
+ dc.l $02030004,$9280b282,$42c48604,$02030005
+ dc.l $382efffc,$0204001a,$880344c4,$4cdf001c
+ dc.l $4e5e4e75,$00000000,$00000000,$00000000
diff --git a/arch/m68k/ifpsp060/iskeleton.S b/arch/m68k/ifpsp060/iskeleton.S
new file mode 100644
index 00000000000..803a6ecdda8
--- /dev/null
+++ b/arch/m68k/ifpsp060/iskeleton.S
@@ -0,0 +1,349 @@
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+|M68000 Hi-Performance Microprocessor Division
+|M68060 Software Package
+|Production Release P1.00 -- October 10, 1994
+|
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+|
+|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+|To the maximum extent permitted by applicable law,
+|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+|and any warranty against infringement with regard to the SOFTWARE
+|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+|
+|To the maximum extent permitted by applicable law,
+|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+|
+|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+|so long as this entire notice is retained without alteration in any modified and/or
+|redistributed versions, and that such modified versions are clearly identified as such.
+|No licenses are granted by implication, estoppel or otherwise under any patents
+|or trademarks of Motorola, Inc.
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+| iskeleton.s
+|
+| This file contains:
+| (1) example "Call-out"s
+| (2) example package entry code
+| (3) example "Call-out" table
+|
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/offsets.h>
+
+
+|################################
+| (1) EXAMPLE CALL-OUTS #
+| #
+| _060_isp_done() #
+| _060_real_chk() #
+| _060_real_divbyzero() #
+| #
+| _060_real_cas() #
+| _060_real_cas2() #
+| _060_real_lock_page() #
+| _060_real_unlock_page() #
+|################################
+
+|
+| _060_isp_done():
+|
+| This is and example main exit point for the Unimplemented Integer
+| Instruction exception handler. For a normal exit, the
+| _isp_unimp() branches to here so that the operating system
+| can do any clean-up desired. The stack frame is the
+| Unimplemented Integer Instruction stack frame with
+| the PC pointing to the instruction following the instruction
+| just emulated.
+| To simply continue execution at the next instruction, just
+| do an "rte".
+|
+| Linux/68k: If returning to user space, check for needed reselections.
+
+ .global _060_isp_done
+_060_isp_done:
+ btst #0x5,%sp@ | supervisor bit set in saved SR?
+ beq .Lnotkern
+ rte
+.Lnotkern:
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ tstb %curptr@(TASK_NEEDRESCHED)
+ jne ret_from_exception | deliver signals,
+ | reschedule etc..
+ RESTORE_ALL
+
+|
+| _060_real_chk():
+|
+| This is an alternate exit point for the Unimplemented Integer
+| Instruction exception handler. If the instruction was a "chk2"
+| and the operand was out of bounds, then _isp_unimp() creates
+| a CHK exception stack frame from the Unimplemented Integer Instrcution
+| stack frame and branches to this routine.
+|
+| Linux/68k: commented out test for tracing
+
+ .global _060_real_chk
+_060_real_chk:
+| tst.b (%sp) | is tracing enabled?
+| bpls real_chk_end | no
+
+|
+| CHK FRAME TRACE FRAME
+| ***************** *****************
+| * Current PC * * Current PC *
+| ***************** *****************
+| * 0x2 * 0x018 * * 0x2 * 0x024 *
+| ***************** *****************
+| * Next * * Next *
+| * PC * * PC *
+| ***************** *****************
+| * SR * * SR *
+| ***************** *****************
+|
+| move.b #0x24,0x7(%sp) | set trace vecno
+| bral _060_real_trace
+
+real_chk_end:
+ bral trap | jump to trap handler
+
+|
+| _060_real_divbyzero:
+|
+| This is an alternate exit point for the Unimplemented Integer
+| Instruction exception handler isp_unimp(). If the instruction is a 64-bit
+| integer divide where the source operand is a zero, then the _isp_unimp()
+| creates a Divide-by-zero exception stack frame from the Unimplemented
+| Integer Instruction stack frame and branches to this routine.
+|
+| Remember that a trace exception may be pending. The code below performs
+| no action associated with the "chk" exception. If tracing is enabled,
+| then it create a Trace exception stack frame from the "chk" exception
+| stack frame and branches to the _real_trace() entry point.
+|
+| Linux/68k: commented out test for tracing
+
+ .global _060_real_divbyzero
+_060_real_divbyzero:
+| tst.b (%sp) | is tracing enabled?
+| bpls real_divbyzero_end | no
+
+|
+| DIVBYZERO FRAME TRACE FRAME
+| ***************** *****************
+| * Current PC * * Current PC *
+| ***************** *****************
+| * 0x2 * 0x014 * * 0x2 * 0x024 *
+| ***************** *****************
+| * Next * * Next *
+| * PC * * PC *
+| ***************** *****************
+| * SR * * SR *
+| ***************** *****************
+|
+| move.b #0x24,0x7(%sp) | set trace vecno
+| bral _060_real_trace
+
+real_divbyzero_end:
+ bral trap | jump to trap handler
+
+|##########################
+
+|
+| _060_real_cas():
+|
+| Entry point for the selected cas emulation code implementation.
+| If the implementation provided by the 68060ISP is sufficient,
+| then this routine simply re-enters the package through _isp_cas.
+|
+ .global _060_real_cas
+_060_real_cas:
+ bral _I_CALL_TOP+0x80+0x08
+
+|
+| _060_real_cas2():
+|
+| Entry point for the selected cas2 emulation code implementation.
+| If the implementation provided by the 68060ISP is sufficient,
+| then this routine simply re-enters the package through _isp_cas2.
+|
+ .global _060_real_cas2
+_060_real_cas2:
+ bral _I_CALL_TOP+0x80+0x10
+
+|
+| _060_lock_page():
+|
+| Entry point for the operating system`s routine to "lock" a page
+| from being paged out. This routine is needed by the cas/cas2
+| algorithms so that no page faults occur within the "core" code
+| region. Note: the routine must lock two pages if the operand
+| spans two pages.
+| NOTE: THE ROUTINE SHOULD RETURN AN FSLW VALUE IN D0 ON FAILURE
+| SO THAT THE 060SP CAN CREATE A PROPER ACCESS ERROR FRAME.
+| Arguments:
+| a0 = operand address
+| d0 = `xxxxxxff -> supervisor; `xxxxxx00 -> user
+| d1 = `xxxxxxff -> longword; `xxxxxx00 -> word
+| Expected outputs:
+| d0 = 0 -> success; non-zero -> failure
+|
+| Linux/m68k: Make sure the page is properly paged in, so we use
+| plpaw and handle any exception here. The kernel must not be
+| preempted until _060_unlock_page(), so that the page stays mapped.
+|
+ .global _060_real_lock_page
+_060_real_lock_page:
+ move.l %d2,-(%sp)
+ | load sfc/dfc
+ tst.b %d0
+ jne 1f
+ moveq #1,%d0
+ jra 2f
+1: moveq #5,%d0
+2: movec.l %dfc,%d2
+ movec.l %d0,%dfc
+ movec.l %d0,%sfc
+
+ clr.l %d0
+ | prefetch address
+ .chip 68060
+ move.l %a0,%a1
+1: plpaw (%a1)
+ addq.w #1,%a0
+ tst.b %d1
+ jeq 2f
+ addq.w #2,%a0
+2: plpaw (%a0)
+3: .chip 68k
+
+ | restore sfc/dfc
+ movec.l %d2,%dfc
+ movec.l %d2,%sfc
+ move.l (%sp)+,%d2
+ rts
+
+.section __ex_table,"a"
+ .align 4
+ .long 1b,11f
+ .long 2b,21f
+.previous
+.section .fixup,"ax"
+ .even
+11: move.l #0x020003c0,%d0
+ or.l %d2,%d0
+ swap %d0
+ jra 3b
+21: move.l #0x02000bc0,%d0
+ or.l %d2,%d0
+ swap %d0
+ jra 3b
+.previous
+
+|
+| _060_unlock_page():
+|
+| Entry point for the operating system`s routine to "unlock" a
+| page that has been "locked" previously with _real_lock_page.
+| Note: the routine must unlock two pages if the operand spans
+| two pages.
+| Arguments:
+| a0 = operand address
+| d0 = `xxxxxxff -> supervisor; `xxxxxx00 -> user
+| d1 = `xxxxxxff -> longword; `xxxxxx00 -> word
+|
+| Linux/m68k: perhaps reenable preemption here...
+
+ .global _060_real_unlock_page
+_060_real_unlock_page:
+ clr.l %d0
+ rts
+
+|###########################################################################
+
+|#################################
+| (2) EXAMPLE PACKAGE ENTRY CODE #
+|#################################
+
+ .global _060_isp_unimp
+_060_isp_unimp:
+ bral _I_CALL_TOP+0x80+0x00
+
+ .global _060_isp_cas
+_060_isp_cas:
+ bral _I_CALL_TOP+0x80+0x08
+
+ .global _060_isp_cas2
+_060_isp_cas2:
+ bral _I_CALL_TOP+0x80+0x10
+
+ .global _060_isp_cas_finish
+_060_isp_cas_finish:
+ bra.l _I_CALL_TOP+0x80+0x18
+
+ .global _060_isp_cas2_finish
+_060_isp_cas2_finish:
+ bral _I_CALL_TOP+0x80+0x20
+
+ .global _060_isp_cas_inrange
+_060_isp_cas_inrange:
+ bral _I_CALL_TOP+0x80+0x28
+
+ .global _060_isp_cas_terminate
+_060_isp_cas_terminate:
+ bral _I_CALL_TOP+0x80+0x30
+
+ .global _060_isp_cas_restart
+_060_isp_cas_restart:
+ bral _I_CALL_TOP+0x80+0x38
+
+|###########################################################################
+
+|###############################
+| (3) EXAMPLE CALL-OUT SECTION #
+|###############################
+
+| The size of this section MUST be 128 bytes!!!
+
+_I_CALL_TOP:
+ .long _060_real_chk - _I_CALL_TOP
+ .long _060_real_divbyzero - _I_CALL_TOP
+ .long _060_real_trace - _I_CALL_TOP
+ .long _060_real_access - _I_CALL_TOP
+ .long _060_isp_done - _I_CALL_TOP
+
+ .long _060_real_cas - _I_CALL_TOP
+ .long _060_real_cas2 - _I_CALL_TOP
+ .long _060_real_lock_page - _I_CALL_TOP
+ .long _060_real_unlock_page - _I_CALL_TOP
+
+ .long 0x00000000, 0x00000000, 0x00000000, 0x00000000
+ .long 0x00000000, 0x00000000, 0x00000000
+
+ .long _060_imem_read - _I_CALL_TOP
+ .long _060_dmem_read - _I_CALL_TOP
+ .long _060_dmem_write - _I_CALL_TOP
+ .long _060_imem_read_word - _I_CALL_TOP
+ .long _060_imem_read_long - _I_CALL_TOP
+ .long _060_dmem_read_byte - _I_CALL_TOP
+ .long _060_dmem_read_word - _I_CALL_TOP
+ .long _060_dmem_read_long - _I_CALL_TOP
+ .long _060_dmem_write_byte - _I_CALL_TOP
+ .long _060_dmem_write_word - _I_CALL_TOP
+ .long _060_dmem_write_long - _I_CALL_TOP
+
+ .long 0x00000000
+ .long 0x00000000, 0x00000000, 0x00000000, 0x00000000
+
+|###########################################################################
+
+| 060 INTEGER KERNEL PACKAGE MUST GO HERE!!!
+#include "isp.sa"
diff --git a/arch/m68k/ifpsp060/isp.doc b/arch/m68k/ifpsp060/isp.doc
new file mode 100644
index 00000000000..5a90fded3f0
--- /dev/null
+++ b/arch/m68k/ifpsp060/isp.doc
@@ -0,0 +1,218 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+68060 INTEGER SOFTWARE PACKAGE (Kernel version)
+------------------------------------------------
+
+The file isp.sa contains the 68060 Integer Software Package.
+This package is essentially an exception handler that can be
+integrated into an operating system to handle the "Unimplemented
+Integer Instruction" exception vector #61.
+This exception is taken when any of the integer instructions
+not hardware implemented on the 68060 are encountered. The
+isp.sa provides full emulation support for these instructions.
+
+The unimplemented integer instructions are:
+ 64-bit divide
+ 64-bit multiply
+ movep
+ cmp2
+ chk2
+ cas (w/ a misaligned effective address)
+ cas2
+
+Release file format:
+--------------------
+The file isp.sa is essentially a hexadecimal image of the
+release package. This is the ONLY format which will be supported.
+The hex image was created by assembling the source code and
+then converting the resulting binary output image into an
+ASCII text file. The hexadecimal numbers are listed
+using the Motorola Assembly Syntax assembler directive "dc.l"
+(define constant longword). The file can be converted to other
+assembly syntaxes by using any word processor with a global
+search and replace function.
+
+To assist in assembling and linking this module with other modules,
+the installer should add a symbolic label to the top of the file.
+This will allow calling routines to access the entry points
+of this package.
+
+The source code isp.s has also been included but only for
+documentation purposes.
+
+Release file structure:
+-----------------------
+
+(top of module)
+ -----------------
+ | | - 128 byte-sized section
+ (1) | Call-Out | - 4 bytes per entry (user fills these in)
+ | | - example routines in iskeleton.s
+ -----------------
+ | | - 8 bytes per entry
+ (2) | Entry Point | - user does a "bra" or "jmp" to this address
+ | |
+ -----------------
+ | | - code section
+ (3) ~ ~
+ | |
+ -----------------
+(bottom of module)
+
+The first section of this module is the "Call-out" section. This section
+is NOT INCLUDED in isp.sa (an example "Call-out" section is provided at
+the end of the file iskeleton.s). The purpose of this section is to allow
+the ISP routines to reference external functions that must be provided
+by the host operating system. This section MUST be exactly 128 bytes in
+size. There are 32 fields, each 4 bytes in size. Each field corresponds
+to a function required by the ISP (these functions and their location are
+listed in "68060ISP call-outs" below). Each field entry should contain
+the address of the corresponding function RELATIVE to the starting address
+of the "call-out" section. The "Call-out" section must sit adjacent to the
+isp.sa image in memory.
+
+The second section, the "Entry-point" section, is used by external routines
+to access the functions within the ISP. Since the isp.sa hex file contains
+no symbol names, this section contains function entry points that are fixed
+with respect to the top of the package. The currently defined entry-points
+are listed in section "68060 ISP entry points" below. A calling routine
+would simply execute a "bra" or "jmp" that jumped to the selected function
+entry-point.
+
+For example, if the 68060 hardware took a "Unimplemented Integer Instruction"
+exception (vector #61), the operating system should execute something
+similar to:
+
+ bra _060ISP_TOP+128+0
+
+(_060ISP_TOP is the starting address of the "Call-out" section; the "Call-out"
+section is 128 bytes long; and the Unimplemented Integer ISP handler entry
+point is located 0 bytes from the top of the "Entry-point" section.)
+
+The third section is the code section. After entering through an "Entry-point",
+the entry code jumps to the appropriate emulation code within the code section.
+
+68060ISP call-outs: (details in iskeleton.s)
+--------------------
+0x000: _060_real_chk
+0x004: _060_real_divbyzero
+0x008: _060_real_trace
+0x00c: _060_real_access
+0x010: _060_isp_done
+
+0x014: _060_real_cas
+0x018: _060_real_cas2
+0x01c: _060_real_lock_page
+0x020: _060_real_unlock_page
+
+0x024: (Motorola reserved)
+0x028: (Motorola reserved)
+0x02c: (Motorola reserved)
+0x030: (Motorola reserved)
+0x034: (Motorola reserved)
+0x038: (Motorola reserved)
+0x03c: (Motorola reserved)
+
+0x040: _060_imem_read
+0x044: _060_dmem_read
+0x048: _060_dmem_write
+0x04c: _060_imem_read_word
+0x050: _060_imem_read_long
+0x054: _060_dmem_read_byte
+0x058: _060_dmem_read_word
+0x05c: _060_dmem_read_long
+0x060: _060_dmem_write_byte
+0x064: _060_dmem_write_word
+0x068: _060_dmem_write_long
+
+0x06c: (Motorola reserved)
+0x070: (Motorola reserved)
+0x074: (Motorola reserved)
+0x078: (Motorola reserved)
+0x07c: (Motorola reserved)
+
+68060ISP entry points:
+-----------------------
+0x000: _060_isp_unimp
+
+0x008: _060_isp_cas
+0x010: _060_isp_cas2
+0x018: _060_isp_cas_finish
+0x020: _060_isp_cas2_finish
+0x028: _060_isp_cas_inrange
+0x030: _060_isp_cas_terminate
+0x038: _060_isp_cas_restart
+
+Integrating cas/cas2:
+---------------------
+The instructions "cas2" and "cas" (when used with a misaligned effective
+address) take the Unimplemented Integer Instruction exception. When the
+060ISP is installed properly, these instructions will enter through the
+_060_isp_unimp() entry point of the ISP.
+
+After the 060ISP decodes the instruction type and fetches the appropriate
+data registers, and BEFORE the actual emulated transfers occur, the
+package calls either the "Call-out" _060_real_cas() or _060_real_cas2().
+If the emulation code provided by the 060ISP is sufficient for the
+host system (see isp.s source code), then these "Call-out"s should be
+made, by the system integrator, to point directly back into the package
+through the "Entry-point"s _060_isp_cas() or _060_isp_cas2().
+
+One other necessary action by the integrator is to supply the routines
+_060_real_lock_page() and _060_real_unlock_page(). These functions are
+defined further in iskeleton.s and the 68060 Software Package Specification.
+
+If the "core" emulation routines of either "cas" or "cas2" perform some
+actions which are too system-specific, then the system integrator must
+supply new emulation code. This new emulation code should reside within
+the functions _060_real_cas() or _060_real_cas2(). When this new emulation
+code has completed, then it should re-enter the 060ISP package through the
+"Entry-point" _060_isp_cas_finish() or _060_isp_cas2_finish().
+To see what the register state is upon entering _060_real_cas() or
+_060_real_cas2() and what it should be upon return to the package through
+_060_isp_cas_finish() or _060_isp_cas2_finish(), please refer to the
+source code in isp.s.
+
+Miscellaneous:
+--------------
+
+_060_isp_unimp:
+----------------
+- documented in 2.2 in spec.
+- Basic flow:
+ exception taken ---> enter _060_isp_unimp --|
+ |
+ |
+ may exit through _060_real_itrace <----|
+ or |
+ may exit through _060_real_chk <----|
+ or |
+ may exit through _060_real_divbyzero <----|
+ or |
+ may exit through _060_isp_done <----|
diff --git a/arch/m68k/ifpsp060/isp.sa b/arch/m68k/ifpsp060/isp.sa
new file mode 100644
index 00000000000..2f88d2a7d15
--- /dev/null
+++ b/arch/m68k/ifpsp060/isp.sa
@@ -0,0 +1,392 @@
+ .long 0x60ff0000,0x02360000,0x60ff0000,0x16260000
+ .long 0x60ff0000,0x12dc0000,0x60ff0000,0x11ea0000
+ .long 0x60ff0000,0x10de0000,0x60ff0000,0x12a40000
+ .long 0x60ff0000,0x12560000,0x60ff0000,0x122a0000
+ .long 0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x51fc51fc,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x2f00203a,0xfefc487b,0x0930ffff,0xfef8202f
+ .long 0x00044e74,0x00042f00,0x203afeea,0x487b0930
+ .long 0xfffffee2,0x202f0004,0x4e740004,0x2f00203a
+ .long 0xfed8487b,0x0930ffff,0xfecc202f,0x00044e74
+ .long 0x00042f00,0x203afec6,0x487b0930,0xfffffeb6
+ .long 0x202f0004,0x4e740004,0x2f00203a,0xfeb4487b
+ .long 0x0930ffff,0xfea0202f,0x00044e74,0x00042f00
+ .long 0x203afea2,0x487b0930,0xfffffe8a,0x202f0004
+ .long 0x4e740004,0x2f00203a,0xfe90487b,0x0930ffff
+ .long 0xfe74202f,0x00044e74,0x00042f00,0x203afe7e
+ .long 0x487b0930,0xfffffe5e,0x202f0004,0x4e740004
+ .long 0x2f00203a,0xfe6c487b,0x0930ffff,0xfe48202f
+ .long 0x00044e74,0x00042f00,0x203afe76,0x487b0930
+ .long 0xfffffe32,0x202f0004,0x4e740004,0x2f00203a
+ .long 0xfe64487b,0x0930ffff,0xfe1c202f,0x00044e74
+ .long 0x00042f00,0x203afe52,0x487b0930,0xfffffe06
+ .long 0x202f0004,0x4e740004,0x2f00203a,0xfe40487b
+ .long 0x0930ffff,0xfdf0202f,0x00044e74,0x00042f00
+ .long 0x203afe2e,0x487b0930,0xfffffdda,0x202f0004
+ .long 0x4e740004,0x2f00203a,0xfe1c487b,0x0930ffff
+ .long 0xfdc4202f,0x00044e74,0x00042f00,0x203afe0a
+ .long 0x487b0930,0xfffffdae,0x202f0004,0x4e740004
+ .long 0x2f00203a,0xfdf8487b,0x0930ffff,0xfd98202f
+ .long 0x00044e74,0x00042f00,0x203afde6,0x487b0930
+ .long 0xfffffd82,0x202f0004,0x4e740004,0x2f00203a
+ .long 0xfdd4487b,0x0930ffff,0xfd6c202f,0x00044e74
+ .long 0x00042f00,0x203afdc2,0x487b0930,0xfffffd56
+ .long 0x202f0004,0x4e740004,0x4e56ffa0,0x48ee3fff
+ .long 0xffc02d56,0xfff8082e,0x00050004,0x66084e68
+ .long 0x2d48fffc,0x600841ee,0x000c2d48,0xfffc422e
+ .long 0xffaa3d6e,0x0004ffa8,0x2d6e0006,0xffa4206e
+ .long 0xffa458ae,0xffa461ff,0xffffff26,0x2d40ffa0
+ .long 0x0800001e,0x67680800,0x00166628,0x61ff0000
+ .long 0x0cb0082e,0x00050004,0x670000ac,0x082e0002
+ .long 0xffaa6700,0x00a2082e,0x00070004,0x66000186
+ .long 0x600001b0,0x61ff0000,0x0a28082e,0x0002ffaa
+ .long 0x660e082e,0x0005ffaa,0x6600010a,0x60000078
+ .long 0x082e0005,0x000467ea,0x082e0005,0xffaa6600
+ .long 0x01264a2e,0x00046b00,0x014c6000,0x01760800
+ .long 0x0018670a,0x61ff0000,0x07ae6000,0x004a0800
+ .long 0x001b6730,0x48400c00,0x00fc670a,0x61ff0000
+ .long 0x0e926000,0x0032206e,0xffa454ae,0xffa461ff
+ .long 0xfffffe68,0x4a816600,0x019861ff,0x00000d20
+ .long 0x60000014,0x61ff0000,0x08c40c2e,0x0010ffaa
+ .long 0x66000004,0x605c1d6e,0xffa90005,0x082e0005
+ .long 0x00046606,0x206efffc,0x4e604cee,0x3fffffc0
+ .long 0x082e0007,0x00046612,0x2d6effa4,0x00062cae
+ .long 0xfff84e5e,0x60ffffff,0xfd622d6e,0xfff8fffc
+ .long 0x3d6e0004,0x00002d6e,0x00060008,0x2d6effa4
+ .long 0x00023d7c,0x20240006,0x598e4e5e,0x60ffffff
+ .long 0xfd0e1d6e,0xffa90005,0x4cee3fff,0xffc03cae
+ .long 0x00042d6e,0x00060008,0x2d6effa4,0x00023d7c
+ .long 0x20180006,0x2c6efff8,0xdffc0000,0x006060ff
+ .long 0xfffffcb0,0x1d6effa9,0x00054cee,0x3fffffc0
+ .long 0x3cae0004,0x2d6e0006,0x00082d6e,0xffa40002
+ .long 0x3d7c2014,0x00062c6e,0xfff8dffc,0x00000060
+ .long 0x60ffffff,0xfc941d6e,0xffa90005,0x4cee3fff
+ .long 0xffc02d6e,0x0006000c,0x3d7c2014,0x000a2d6e
+ .long 0xffa40006,0x2c6efff8,0xdffc0000,0x006460ff
+ .long 0xfffffc66,0x1d6effa9,0x00054cee,0x3fffffc0
+ .long 0x2d6e0006,0x000c3d7c,0x2024000a,0x2d6effa4
+ .long 0x00062c6e,0xfff8dffc,0x00000064,0x60ffffff
+ .long 0xfc4e1d6e,0xffa90005,0x4cee3fff,0xffc03d7c
+ .long 0x00f4000e,0x2d6effa4,0x000a3d6e,0x00040008
+ .long 0x2c6efff8,0xdffc0000,0x006860ff,0xfffffc4c
+ .long 0x2c882d40,0xfffc4fee,0xffc04cdf,0x7fff2f2f
+ .long 0x000c2f6f,0x00040010,0x2f6f000c,0x00042f6f
+ .long 0x0008000c,0x2f5f0004,0x3f7c4008,0x00066028
+ .long 0x4cee3fff,0xffc04e5e,0x514f2eaf,0x00083f6f
+ .long 0x000c0004,0x3f7c4008,0x00062f6f,0x00020008
+ .long 0x2f7c0942,0x8001000c,0x08170005,0x670608ef
+ .long 0x0002000d,0x60ffffff,0xfbcc0c2e,0x0040ffaa
+ .long 0x660c4280,0x102effab,0x2daeffac,0x0ce04e75
+ .long 0x2040302e,0xffa03200,0x0240003f,0x02810000
+ .long 0x0007303b,0x020a4efb,0x00064afc,0x00400000
+ .long 0x00000000,0x00000000,0x00000000,0x00000000
+ .long 0x00000000,0x00000000,0x00000000,0x00000080
+ .long 0x0086008c,0x00920098,0x009e00a4,0x00aa00b0
+ .long 0x00ce00ec,0x010a0128,0x01460164,0x01820196
+ .long 0x01b401d2,0x01f0020e,0x022c024a,0x0268027c
+ .long 0x029a02b8,0x02d602f4,0x03120330,0x034e036c
+ .long 0x036c036c,0x036c036c,0x036c036c,0x036c03d6
+ .long 0x03f0040a,0x042a03ca,0x00000000,0x0000206e
+ .long 0xffe04e75,0x206effe4,0x4e75206e,0xffe84e75
+ .long 0x206effec,0x4e75206e,0xfff04e75,0x206efff4
+ .long 0x4e75206e,0xfff84e75,0x206efffc,0x4e752008
+ .long 0x206effe0,0xd0882d40,0xffe02d48,0xffac1d7c
+ .long 0x0000ffab,0x1d7c0040,0xffaa4e75,0x2008206e
+ .long 0xffe4d088,0x2d40ffe4,0x2d48ffac,0x1d7c0001
+ .long 0xffab1d7c,0x0040ffaa,0x4e752008,0x206effe8
+ .long 0xd0882d40,0xffe82d48,0xffac1d7c,0x0002ffab
+ .long 0x1d7c0040,0xffaa4e75,0x2008206e,0xffecd088
+ .long 0x2d40ffec,0x2d48ffac,0x1d7c0003,0xffab1d7c
+ .long 0x0040ffaa,0x4e752008,0x206efff0,0xd0882d40
+ .long 0xfff02d48,0xffac1d7c,0x0004ffab,0x1d7c0040
+ .long 0xffaa4e75,0x2008206e,0xfff4d088,0x2d40fff4
+ .long 0x2d48ffac,0x1d7c0005,0xffab1d7c,0x0040ffaa
+ .long 0x4e752008,0x206efff8,0xd0882d40,0xfff82d48
+ .long 0xffac1d7c,0x0006ffab,0x1d7c0040,0xffaa4e75
+ .long 0x1d7c0004,0xffaa2008,0x206efffc,0xd0882d40
+ .long 0xfffc4e75,0x202effe0,0x2d40ffac,0x90882d40
+ .long 0xffe02040,0x1d7c0000,0xffab1d7c,0x0040ffaa
+ .long 0x4e75202e,0xffe42d40,0xffac9088,0x2d40ffe4
+ .long 0x20401d7c,0x0001ffab,0x1d7c0040,0xffaa4e75
+ .long 0x202effe8,0x2d40ffac,0x90882d40,0xffe82040
+ .long 0x1d7c0002,0xffab1d7c,0x0040ffaa,0x4e75202e
+ .long 0xffec2d40,0xffac9088,0x2d40ffec,0x20401d7c
+ .long 0x0003ffab,0x1d7c0040,0xffaa4e75,0x202efff0
+ .long 0x2d40ffac,0x90882d40,0xfff02040,0x1d7c0004
+ .long 0xffab1d7c,0x0040ffaa,0x4e75202e,0xfff42d40
+ .long 0xffac9088,0x2d40fff4,0x20401d7c,0x0005ffab
+ .long 0x1d7c0040,0xffaa4e75,0x202efff8,0x2d40ffac
+ .long 0x90882d40,0xfff82040,0x1d7c0006,0xffab1d7c
+ .long 0x0040ffaa,0x4e751d7c,0x0008ffaa,0x202efffc
+ .long 0x90882d40,0xfffc2040,0x4e75206e,0xffa454ae
+ .long 0xffa461ff,0xfffff9d4,0x4a8166ff,0xfffffd04
+ .long 0x3040d1ee,0xffe04e75,0x206effa4,0x54aeffa4
+ .long 0x61ffffff,0xf9b64a81,0x66ffffff,0xfce63040
+ .long 0xd1eeffe4,0x4e75206e,0xffa454ae,0xffa461ff
+ .long 0xfffff998,0x4a8166ff,0xfffffcc8,0x3040d1ee
+ .long 0xffe84e75,0x206effa4,0x54aeffa4,0x61ffffff
+ .long 0xf97a4a81,0x66ffffff,0xfcaa3040,0xd1eeffec
+ .long 0x4e75206e,0xffa454ae,0xffa461ff,0xfffff95c
+ .long 0x4a8166ff,0xfffffc8c,0x3040d1ee,0xfff04e75
+ .long 0x206effa4,0x54aeffa4,0x61ffffff,0xf93e4a81
+ .long 0x66ffffff,0xfc6e3040,0xd1eefff4,0x4e75206e
+ .long 0xffa454ae,0xffa461ff,0xfffff920,0x4a8166ff
+ .long 0xfffffc50,0x3040d1ee,0xfff84e75,0x206effa4
+ .long 0x54aeffa4,0x61ffffff,0xf9024a81,0x66ffffff
+ .long 0xfc323040,0xd1eefffc,0x4e752f01,0x206effa4
+ .long 0x54aeffa4,0x61ffffff,0xf8e24a81,0x66ffffff
+ .long 0xfc12221f,0x207614e0,0x08000008,0x670e48e7
+ .long 0x3c002a00,0x260860ff,0x000000ec,0x2f022200
+ .long 0xe9590241,0x000f2236,0x14c00800,0x000b6602
+ .long 0x48c12400,0xef5a0282,0x00000003,0xe5a949c0
+ .long 0xd081d1c0,0x241f4e75,0x1d7c0080,0xffaa206e
+ .long 0xffa44e75,0x206effa4,0x54aeffa4,0x61ffffff
+ .long 0xf87a4a81,0x66ffffff,0xfbaa3040,0x4e75206e
+ .long 0xffa458ae,0xffa461ff,0xfffff876,0x4a8166ff
+ .long 0xfffffb90,0x20404e75,0x206effa4,0x54aeffa4
+ .long 0x61ffffff,0xf8464a81,0x66ffffff,0xfb763040
+ .long 0xd1eeffa4,0x55884e75,0x206effa4,0x54aeffa4
+ .long 0x61ffffff,0xf8264a81,0x66ffffff,0xfb56206e
+ .long 0xffa45588,0x08000008,0x670e48e7,0x3c002a00
+ .long 0x260860ff,0x00000030,0x2f022200,0xe9590241
+ .long 0x000f2236,0x14c00800,0x000b6602,0x48c12400
+ .long 0xef5a0282,0x00000003,0xe5a949c0,0xd081d1c0
+ .long 0x241f4e75,0x08050006,0x67044282,0x6016e9c5
+ .long 0x24042436,0x24c00805,0x000b6602,0x48c2e9c5
+ .long 0x0542e1aa,0x08050007,0x67024283,0xe9c50682
+ .long 0x0c000002,0x6d346718,0x206effa4,0x58aeffa4
+ .long 0x61ffffff,0xf7ac4a81,0x66ffffff,0xfac66018
+ .long 0x206effa4,0x54aeffa4,0x61ffffff,0xf77e4a81
+ .long 0x66ffffff,0xfaae48c0,0xd680e9c5,0x07826700
+ .long 0x006a0c00,0x00026d34,0x6718206e,0xffa458ae
+ .long 0xffa461ff,0xfffff76a,0x4a8166ff,0xfffffa84
+ .long 0x601c206e,0xffa454ae,0xffa461ff,0xfffff73c
+ .long 0x4a8166ff,0xfffffa6c,0x48c06002,0x42802800
+ .long 0x08050002,0x67122043,0x61ffffff,0xf7764a81
+ .long 0x6624d082,0xd0846016,0xd6822043,0x61ffffff
+ .long 0xf7624a81,0x6610d084,0x6004d682,0x20032040
+ .long 0x4cdf003c,0x4e752043,0x203c0101,0x000160ff
+ .long 0xfffff9f0,0x322effa0,0x10010240,0x00072076
+ .long 0x04e0d0ee,0xffa20801,0x00076700,0x008c3001
+ .long 0xef580240,0x00072036,0x04c00801,0x00066752
+ .long 0x24002448,0xe19a2002,0x61ffffff,0xf71c4a81
+ .long 0x660000fc,0x544a204a,0xe19a2002,0x61ffffff
+ .long 0xf7084a81,0x660000e8,0x544a204a,0xe19a2002
+ .long 0x61ffffff,0xf6f44a81,0x660000d4,0x544a204a
+ .long 0xe19a2002,0x61ffffff,0xf6e04a81,0x660000c0
+ .long 0x4e752400,0x2448e048,0x61ffffff,0xf6cc4a81
+ .long 0x660000ac,0x544a204a,0x200261ff,0xfffff6ba
+ .long 0x4a816600,0x009a4e75,0x08010006,0x675c2448
+ .long 0x61ffffff,0xf6624a81,0x66000092,0x2400544a
+ .long 0x204a61ff,0xfffff650,0x4a816600,0x0080e14a
+ .long 0x1400544a,0x204a61ff,0xfffff63c,0x4a816600
+ .long 0x006ce18a,0x1400544a,0x204a61ff,0xfffff628
+ .long 0x4a816600,0x0058e18a,0x1400122e,0xffa0e209
+ .long 0x02410007,0x2d8214c0,0x4e752448,0x61ffffff
+ .long 0xf6064a81,0x66000036,0x2400544a,0x204a61ff
+ .long 0xfffff5f4,0x4a816600,0x0024e14a,0x1400122e
+ .long 0xffa0e209,0x02410007,0x3d8214c2,0x4e75204a
+ .long 0x203c00a1,0x000160ff,0xfffff8a8,0x204a203c
+ .long 0x01210001,0x60ffffff,0xf89a61ff,0xfffff914
+ .long 0x102effa2,0xe9180240,0x000f2436,0x04c00c2e
+ .long 0x0002ffa0,0x6d506728,0x244861ff,0xfffff5c4
+ .long 0x4a816600,0x009e2600,0x588a204a,0x61ffffff
+ .long 0xf5b24a81,0x6600008c,0x22002003,0x60000048
+ .long 0x244861ff,0xfffff59c,0x4a816600,0x00763200
+ .long 0x484048c0,0x48c1082e,0x0007ffa2,0x66000028
+ .long 0x48c26000,0x00222448,0x61ffffff,0xf5604a81
+ .long 0x6600005e,0x1200e048,0x49c049c1,0x082e0007
+ .long 0xffa26602,0x49c29480,0x42c30203,0x00049280
+ .long 0xb28242c4,0x86040203,0x0005382e,0xffa80204
+ .long 0x001a8803,0x3d44ffa8,0x082e0003,0xffa26602
+ .long 0x4e750804,0x00006602,0x4e751d7c,0x0010ffaa
+ .long 0x4e75204a,0x203c0101,0x000160ff,0xfffff7c4
+ .long 0x204a203c,0x01410001,0x60ffffff,0xf7b6102e
+ .long 0xffa10200,0x00386600,0x0208102e,0xffa10240
+ .long 0x00072e36,0x04c06700,0x00c0102e,0xffa3122e
+ .long 0xffa20240,0x0007e809,0x02410007,0x3d40ffb2
+ .long 0x3d41ffb4,0x2a3604c0,0x2c3614c0,0x082e0003
+ .long 0xffa2671a,0x4a875dee,0xffb06a02,0x44874a85
+ .long 0x5deeffb1,0x6a0844fc,0x00004086,0x40854a85
+ .long 0x66164a86,0x67000048,0xbe866306,0xcb466000
+ .long 0x00124c47,0x6005600a,0xbe85634e,0x61ff0000
+ .long 0x0068082e,0x0003ffa2,0x67244a2e,0xffb16702
+ .long 0x4485102e,0xffb0b12e,0xffb1670c,0x0c868000
+ .long 0x00006226,0x44866006,0x0806001f,0x661c44ee
+ .long 0xffa84a86,0x42eeffa8,0x302effb2,0x322effb4
+ .long 0x2d8504c0,0x2d8614c0,0x4e7508ee,0x0001ffa9
+ .long 0x08ae0000,0xffa94e75,0x022e001e,0xffa9002e
+ .long 0x0020ffaa,0x4e750c87,0x0000ffff,0x621e4281
+ .long 0x48454846,0x3a068ac7,0x32054846,0x3a068ac7
+ .long 0x48413205,0x42454845,0x2c014e75,0x42aeffbc
+ .long 0x422effb6,0x42810807,0x001f660e,0x52aeffbc
+ .long 0xe38fe38e,0xe3956000,0xffee2607,0x24054842
+ .long 0x4843b443,0x6606323c,0xffff600a,0x220582c3
+ .long 0x02810000,0xffff2f06,0x42464846,0x26072401
+ .long 0xc4c74843,0xc6c12805,0x98834844,0x30043806
+ .long 0x4a406600,0x000ab484,0x63045381,0x60de2f05
+ .long 0x2c014846,0x2a0761ff,0x0000006a,0x24052606
+ .long 0x2a1f2c1f,0x9c839b82,0x64ff0000,0x001a5381
+ .long 0x42822607,0x48434243,0xdc83db82,0x26074243
+ .long 0x4843da83,0x4a2effb6,0x66163d41,0xffb84281
+ .long 0x48454846,0x3a064246,0x50eeffb6,0x6000ff6c
+ .long 0x3d41ffba,0x3c054846,0x48452e2e,0xffbc670a
+ .long 0x5387e28d,0xe29651cf,0xfffa2a06,0x2c2effb8
+ .long 0x4e752406,0x26062805,0x48434844,0xccc5cac3
+ .long 0xc4c4c6c4,0x42844846,0xdc45d744,0xdc42d744
+ .long 0x48464245,0x42424845,0x4842da82,0xda834e75
+ .long 0x700461ff,0xfffff61c,0x0c2e0080,0xffaa6712
+ .long 0x244861ff,0xfffff2dc,0x4a81661e,0x2e006000
+ .long 0xfde658ae,0xffa461ff,0xfffff286,0x4a8166ff
+ .long 0xfffff5a0,0x2e006000,0xfdce61ff,0xfffff5ce
+ .long 0x204a203c,0x01010001,0x60ffffff,0xf556102e
+ .long 0xffa10c00,0x00076e00,0x00b40240,0x00072636
+ .long 0x04c0342e,0xffa24241,0x1202e95a,0x02420007
+ .long 0x283624c0,0x4a846700,0x00884a83,0x67000082
+ .long 0x422effb0,0x082e0003,0xffa26718,0x4a836c08
+ .long 0x4483002e,0x0001ffb0,0x4a846c08,0x44840a2e
+ .long 0x0001ffb0,0x2a032c03,0x2e044846,0x4847c6c4
+ .long 0xc8c6cac7,0xccc74287,0x4843d644,0xdd87d645
+ .long 0xdd874843,0x42444245,0x48444845,0xd885d886
+ .long 0x4a2effb0,0x67084683,0x46845283,0xd9872d83
+ .long 0x24c044fc,0x00002d84,0x14c042c7,0x02070008
+ .long 0x1c2effa9,0x02060010,0x8c071d46,0xffa94e75
+ .long 0x42b624c0,0x42b614c0,0x7e0460e4,0x700461ff
+ .long 0xfffff510,0x0c2e0080,0xffaa6714,0x244861ff
+ .long 0xfffff1d0,0x4a816600,0x00202600,0x6000ff34
+ .long 0x58aeffa4,0x61ffffff,0xf1784a81,0x66ffffff
+ .long 0xf4922600,0x6000ff1c,0x61ffffff,0xf4c0204a
+ .long 0x203c0101,0x000160ff,0xfffff448,0x2d40ffb4
+ .long 0x2200e958,0x0240000f,0x227604c0,0x2d49ffb0
+ .long 0x2001ec49,0x02410007,0x2a3614c0,0x02400007
+ .long 0x263604c0,0x3d40ffba,0x302effa2,0x2200e958
+ .long 0x0240000f,0x207604c0,0x2d48ffbc,0x2001ec49
+ .long 0x02410007,0x283614c0,0x02400007,0x243604c0
+ .long 0x3d40ffb8,0x082e0001,0xffa056c7,0x082e0005
+ .long 0x000456c6,0x24482649,0x22072006,0x61ffffff
+ .long 0xf05c204a,0x4a8066ff,0x000001c8,0x22072006
+ .long 0x204b61ff,0xfffff046,0x204b4a80,0x660a204a
+ .long 0x224b60ff,0xfffff020,0x2f002207,0x2006204a
+ .long 0x61ffffff,0xf03e201f,0x204b60ff,0x00000194
+ .long 0x082e0001,0xffa06648,0x44eeffa8,0xb0426602
+ .long 0xb24342ee,0xffa84a04,0x6610362e,0xffba3d81
+ .long 0x34c2342e,0xffb83d80,0x24c2082e,0x00050004
+ .long 0x56c22002,0x51c1206e,0xffbc61ff,0xffffeff4
+ .long 0x200251c1,0x206effb0,0x61ffffff,0xefe64e75
+ .long 0x44eeffa8,0xb0826602,0xb28342ee,0xffa84a04
+ .long 0x6610362e,0xffba2d81,0x34c0342e,0xffb82d80
+ .long 0x24c0082e,0x00050004,0x56c22002,0x50c1206e
+ .long 0xffbc61ff,0xffffefac,0x200250c1,0x206effb0
+ .long 0x61ffffff,0xef9e4e75,0x202effb4,0x6000feae
+ .long 0x082e0001,0xffa06610,0x700261ff,0xfffff364
+ .long 0x2d48ffb4,0x51c7600e,0x700461ff,0xfffff354
+ .long 0x2d48ffb4,0x50c7302e,0xffa22200,0xec480240
+ .long 0x00072436,0x04c00241,0x00072836,0x14c03d41
+ .long 0xffb8082e,0x00050004,0x56c62448,0x22072006
+ .long 0x61ffffff,0xef284a80,0x66000096,0x204a60ff
+ .long 0xffffeeee,0x082e0001,0xffa0662c,0x44eeffa8
+ .long 0xb04442ee,0xffa84a01,0x6608362e,0xffb83d80
+ .long 0x34c2206e,0xffb451c1,0x082e0005,0x000456c0
+ .long 0x61ffffff,0xeefe4e75,0x44eeffa8,0xb08442ee
+ .long 0xffa84a01,0x6608362e,0xffb82d80,0x34c0206e
+ .long 0xffb450c1,0x082e0005,0x000456c0,0x61ffffff
+ .long 0xeed24e75,0x4e7b6000,0x4e7b6001,0x0c2e00fc
+ .long 0xffa167ff,0xffffff24,0x206effb4,0x082e0001
+ .long 0xffa056c7,0x6000ff40,0x4e7b6000,0x4e7b6001
+ .long 0x24482f00,0x61ffffff,0xf264201f,0x588f518f
+ .long 0x518e721a,0x41ef0008,0x43ef0000,0x22d851c9
+ .long 0xfffc3d7c,0x4008000a,0x2d4a000c,0x2d400010
+ .long 0x4cee3fff,0xffc04e5e,0x60ffffff,0xedf84280
+ .long 0x43fb0170,0x000005ae,0xb3c86d0e,0x43fb0170
+ .long 0x00000010,0xb1c96d02,0x4e7570ff,0x4e754a06
+ .long 0x66047001,0x60027005,0x4a076700,0x01e42448
+ .long 0x26492848,0x2a49568c,0x568d220a,0x40c7007c
+ .long 0x07004e7a,0x60004e7b,0x00004e7b,0x0001f58a
+ .long 0xf58cf58b,0xf58df46a,0xf46cf46b,0xf46d2441
+ .long 0x56812841,0xf5caf5cc,0x247c8000,0x0000267c
+ .long 0xa0000000,0x287c0000,0x00002008,0x02000003
+ .long 0x671c0c00,0x00026700,0x00966000,0x010251fc
+ .long 0x4e7ba008,0x0e911000,0x0e900000,0x6002600e
+ .long 0xb082661c,0xb2836618,0x0e915800,0x6002600e
+ .long 0x4e7bb008,0x0e904800,0x4e7bc008,0x6034600e
+ .long 0x4e7bb008,0x0e900800,0x4e7bc008,0x6012600e
+ .long 0x4e714e71,0x4e714e71,0x4e714e71,0x4e7160b0
+ .long 0x4e7b6000,0x4e7b6001,0x46c751c4,0x60ffffff
+ .long 0xfd424e7b,0x60004e7b,0x600146c7,0x50c460ff
+ .long 0xfffffd30,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x4e7ba008,0x0e911000,0x0e900000,0x6002600e
+ .long 0xb082662c,0xb2836628,0x0e915800,0x6002600e
+ .long 0x48440e58,0x48004e7b,0xb0084844,0x6002600e
+ .long 0x0e504800,0x4e7bc008,0x6000ffa8,0x4e71600e
+ .long 0x48400e58,0x08004e7b,0xb0084840,0x6002600e
+ .long 0x0e500800,0x4e7bc008,0x6000ff76,0x4e71600e
+ .long 0x4e714e71,0x4e714e71,0x4e714e71,0x4e716090
+ .long 0x4e7ba008,0x0e911000,0x0e900000,0x6002600e
+ .long 0xb082663c,0xb2836638,0x0e915800,0x6002600e
+ .long 0xe19c0e18,0x48004844,0x0e584800,0x6002600e
+ .long 0xe19c4e7b,0xb0080e10,0x48006004,0x4e71600e
+ .long 0x4e7bc008,0x6000ff2c,0x4e714e71,0x4e71600e
+ .long 0xe1980e18,0x08004840,0x0e580800,0x6002600e
+ .long 0xe1984e7b,0xb0080e10,0x08006004,0x4e71600e
+ .long 0x4e7bc008,0x6000feea,0x4e714e71,0x4e71600c
+ .long 0x4e714e71,0x4e714e71,0x4e714e71,0x6000ff72
+ .long 0x24482649,0x28482a49,0x528c528d,0x220a40c7
+ .long 0x007c0700,0x4e7a6000,0x4e7b0000,0x4e7b0001
+ .long 0xf58af58c,0xf58bf58d,0xf46af46c,0xf46bf46d
+ .long 0x24415681,0x2841f5ca,0xf5cc247c,0x80000000
+ .long 0x267ca000,0x0000287c,0x00000000,0x20080800
+ .long 0x00006600,0x009a6016,0x51fc51fc,0x51fc51fc
+ .long 0x4e7ba008,0x0e511000,0x0e500000,0x6002600e
+ .long 0xb042661c,0xb2436618,0x0e515800,0x6002600e
+ .long 0x4e7bb008,0x0e504800,0x4e7bc008,0x6034600e
+ .long 0x4e7bb008,0x0e500800,0x4e7bc008,0x6012600e
+ .long 0x4e714e71,0x4e714e71,0x4e714e71,0x4e7160b0
+ .long 0x4e7b6000,0x4e7b6001,0x46c751c4,0x60ffffff
+ .long 0xfb624e7b,0x60004e7b,0x600146c7,0x50c460ff
+ .long 0xfffffb50,0x51fc51fc,0x51fc51fc,0x51fc51fc
+ .long 0x4e7ba008,0x0e511000,0x0e500000,0x6002600e
+ .long 0xb042662c,0xb2436628,0x0e515800,0x6002600e
+ .long 0xe09c0e18,0x48004e7b,0xb008e19c,0x6002600e
+ .long 0x0e104800,0x4e7bc008,0x6000ffa8,0x4e71600e
+ .long 0xe0980e18,0x08004e7b,0xb008e198,0x6002600e
+ .long 0x0e100800,0x4e7bc008,0x6000ff76,0x4e71600e
+ .long 0x4e714e71,0x4e714e71,0x4e714e71,0x4e716090
+ .long 0x4a066604,0x70016002,0x70054a07,0x660000c6
+ .long 0x22482448,0x528a2602,0xe04a40c7,0x007c0700
+ .long 0x4e7a6000,0x4e7b0000,0x4e7b0001,0xf589f58a
+ .long 0xf469f46a,0x227c8000,0x0000247c,0xa0000000
+ .long 0x267c0000,0x00006016,0x51fc51fc,0x51fc51fc
+ .long 0x4e7b9008,0x0e500000,0xb0446624,0x6002600e
+ .long 0x0e182800,0x4e7ba008,0x0e103800,0x6002600e
+ .long 0x4e7bb008,0x604c4e71,0x4e714e71,0x4e71600e
+ .long 0xe0980e18,0x08004e7b,0xa008e198,0x6002600e
+ .long 0x0e100800,0x4e7bb008,0x60164e71,0x4e71600e
+ .long 0x4e714e71,0x4e714e71,0x4e714e71,0x4e7160a0
+ .long 0x4e7b6000,0x4e7b6001,0x46c751c1,0x60ffffff
+ .long 0xfb164e7b,0x60004e7b,0x600146c7,0x50c160ff
+ .long 0xfffffb04,0x22482448,0x568a2208,0x08010000
+ .long 0x660000c2,0x26024842,0x40c7007c,0x07004e7a
+ .long 0x60004e7b,0x00004e7b,0x0001f589,0xf58af469
+ .long 0xf46a227c,0x80000000,0x247ca000,0x0000267c
+ .long 0x00000000,0x601851fc,0x51fc51fc,0x51fc51fc
+ .long 0x4e7b9008,0x0e900000,0xb0846624,0x6002600e
+ .long 0x0e582800,0x4e7ba008,0x0e503800,0x6002600e
+ .long 0x4e7bb008,0x604c4e71,0x4e714e71,0x4e71600e
+ .long 0x48400e58,0x08004840,0x4e7ba008,0x6002600e
+ .long 0x0e500800,0x4e7bb008,0x60164e71,0x4e71600e
+ .long 0x4e714e71,0x4e714e71,0x4e714e71,0x4e7160a0
+ .long 0x4e7b6000,0x4e7b6001,0x46c751c1,0x60ffffff
+ .long 0xfa464e7b,0x60004e7b,0x600146c7,0x50c160ff
+ .long 0xfffffa34,0x2a02e08a,0x26024842,0x40c7007c
+ .long 0x07004e7a,0x60004e7b,0x00004e7b,0x0001f589
+ .long 0xf58af469,0xf46a227c,0x80000000,0x247ca000
+ .long 0x0000267c,0x00000000,0x601451fc,0x51fc51fc
+ .long 0x4e7b9008,0x0e900000,0xb0846624,0x6002600e
+ .long 0x0e182800,0x0e583800,0x4e7ba008,0x6002600e
+ .long 0x0e105800,0x4e7bb008,0x6000ff88,0x4e71600e
+ .long 0xe1980e18,0x08004840,0x0e580800,0x6002600e
+ .long 0xe1984e7b,0xa0080e10,0x08006004,0x4e71600e
+ .long 0x4e7bb008,0x6000ff4a,0x4e714e71,0x4e71600e
+ .long 0x4e714e71,0x4e714e71,0x4e714e71,0x4e716090
diff --git a/arch/m68k/ifpsp060/itest.sa b/arch/m68k/ifpsp060/itest.sa
new file mode 100644
index 00000000000..7b15eaf6380
--- /dev/null
+++ b/arch/m68k/ifpsp060/itest.sa
@@ -0,0 +1,1281 @@
+ dc.l $60ff0000,$005c5465,$7374696e,$67203638
+ dc.l $30363020,$49535020,$73746172,$7465643a
+ dc.l $0a007061,$73736564,$0a002066,$61696c65
+ dc.l $640a0000,$4a80660e,$487affe8,$61ff0000
+ dc.l $4f9a588f,$4e752f01,$61ff0000,$4fa4588f
+ dc.l $487affd8,$61ff0000,$4f82588f,$4e754e56
+ dc.l $ff6048e7,$3f3c487a,$ff9e61ff,$00004f6c
+ dc.l $588f42ae,$ff78487b,$01700000,$00ea61ff
+ dc.l $00004f58,$588f61ff,$000000f0,$61ffffff
+ dc.l $ffa642ae,$ff78487b,$01700000,$0af661ff
+ dc.l $00004f38,$588f61ff,$00000af8,$61ffffff
+ dc.l $ff8642ae,$ff78487b,$01700000,$179c61ff
+ dc.l $00004f18,$588f61ff,$0000179c,$61ffffff
+ dc.l $ff6642ae,$ff78487b,$01700000,$038661ff
+ dc.l $00004ef8,$588f61ff,$00000380,$61ffffff
+ dc.l $ff4642ae,$ff78487b,$01700000,$202c61ff
+ dc.l $00004ed8,$588f2d7c,$00000002,$ff7461ff
+ dc.l $0000202c,$61ffffff,$ff1e42ae,$ff78487b
+ dc.l $01700000,$0d7c61ff,$00004eb0,$588f61ff
+ dc.l $00000d74,$61ffffff,$fefe42ae,$ff78487b
+ dc.l $01700000,$0f8e61ff,$00004e90,$588f61ff
+ dc.l $00000f88,$61ffffff,$fede4cdf,$3cfc4e5e
+ dc.l $4e750936,$342d6269,$74206d75,$6c746970
+ dc.l $6c792e2e,$2e0051fc,$52aeff78,$4cfb3fff
+ dc.l $01700000,$4e184281,$243c9999,$9999263c
+ dc.l $88888888,$3d7c0004,$ff7c44fc,$000048ee
+ dc.l $7fffff80,$4c013402,$42eeff7e,$48ee7fff
+ dc.l $ffc042ae,$ff8842ae,$ff8c61ff,$00004da6
+ dc.l $4a0066ff,$00004dcc,$52aeff78,$4cfb3fff
+ dc.l $01700000,$4dc8223c,$77777777,$243c9999
+ dc.l $99997600,$3d7c0004,$ff7c44fc,$000048ee
+ dc.l $7fffff80,$4c013402,$42eeff7e,$48ee7fff
+ dc.l $ffc042ae,$ff8842ae,$ff8c61ff,$00004d56
+ dc.l $4a0066ff,$00004d7c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$4d787210,$243c6666,$66663d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff804c01
+ dc.l $240242ee,$ff7e48ee,$7fffffc0,$2d7c0000
+ dc.l $0006ff88,$61ff0000,$4d0c4a00,$66ff0000
+ dc.l $4d3252ae,$ff784cfb,$3fff0170,$00004d2e
+ dc.l $223c5555,$55557400,$76033d7c,$0000ff7c
+ dc.l $44fc0000,$48ee7fff,$ff804c01,$340242ee
+ dc.l $ff7e48ee,$7fffffc0,$2d7c0000,$0000ff88
+ dc.l $2d7cffff,$ffffff8c,$61ff0000,$4cb84a00
+ dc.l $66ff0000,$4cde52ae,$ff784cfb,$3fff0170
+ dc.l $00004cda,$223c4000,$00007400,$76043d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff804c01
+ dc.l $340242ee,$ff7e48ee,$7fffffc0,$2d7c0000
+ dc.l $0001ff88,$2d7c0000,$0000ff8c,$61ff0000
+ dc.l $4c644a00,$66ff0000,$4c8a52ae,$ff784cfb
+ dc.l $3fff0170,$00004c86,$72ff7400,$76ff3d7c
+ dc.l $0008ff7c,$44fc0000,$48ee7fff,$ff804c01
+ dc.l $340242ee,$ff7e48ee,$7fffffc0,$2d7cffff
+ dc.l $fffeff88,$2d7c0000,$0001ff8c,$61ff0000
+ dc.l $4c144a00,$66ff0000,$4c3a52ae,$ff784cfb
+ dc.l $3fff0170,$00004c36,$223c8000,$00007400
+ dc.l $76ff3d7c,$0000ff7c,$44fc0000,$48ee7fff
+ dc.l $ff804c01,$3c0242ee,$ff7e48ee,$7fffffc0
+ dc.l $2d7c0000,$0000ff88,$2d7c8000,$0000ff8c
+ dc.l $61ff0000,$4bc04a00,$66ff0000,$4be652ae
+ dc.l $ff784cfb,$3fff0170,$00004be2,$223c8000
+ dc.l $00007400,$76013d7c,$0008ff7c,$44fc0000
+ dc.l $48ee7fff,$ff804c01,$3c0242ee,$ff7e48ee
+ dc.l $7fffffc0,$2d7cffff,$ffffff88,$2d7c8000
+ dc.l $0000ff8c,$61ff0000,$4b6c4a00,$66ff0000
+ dc.l $4b9252ae,$ff784cfb,$3fff0170,$00004b8e
+ dc.l $72017400,$263c8000,$00003d7c,$0008ff7c
+ dc.l $44fc0000,$48ee7fff,$ff804c01,$3c0242ee
+ dc.l $ff7e48ee,$7fffffc0,$2d7cffff,$ffffff88
+ dc.l $2d7c8000,$0000ff8c,$61ff0000,$4b184a00
+ dc.l $66ff0000,$4b3e222e,$ff784280,$4e75096d
+ dc.l $6f766570,$2e2e2e00,$52aeff78,$4cfb3fff
+ dc.l $01700000,$4b2841ee,$ff60303c,$aaaa4228
+ dc.l $00004228,$00023d7c,$001fff7c,$44fc001f
+ dc.l $48ee7fff,$ff800188,$000042ee,$ff7e48ee
+ dc.l $7fffffc0,$12280002,$e1491228,$0000b041
+ dc.l $66ff0000,$4ade61ff,$00004aaa,$4a0066ff
+ dc.l $00004ad0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $4acc41ee,$ff64303c,$aaaa42a8,$fffc4290
+ dc.l $42a80004,$3d7c001f,$ff7c44fc,$001f48ee
+ dc.l $7fffff80,$01880000,$42eeff7e,$48ee7fff
+ dc.l $ffc04aa8,$fffc66ff,$00004a88,$4aa80004
+ dc.l $66ff0000,$4a7e0c90,$aa00aa00,$66ff0000
+ dc.l $4a7261ff,$00004a3e,$4a0066ff,$00004a64
+ dc.l $52aeff78,$4cfb3fff,$01700000,$4a6041ee
+ dc.l $ff60303c,$aaaa4228,$00004228,$00023d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff800188
+ dc.l $000042ee,$ff7e48ee,$7fffffc0,$12280002
+ dc.l $e1491228,$0000b041,$66ff0000,$4a1661ff
+ dc.l $000049e2,$4a0066ff,$00004a08,$52aeff78
+ dc.l $4cfb3fff,$01700000,$4a0441ee,$ff60117c
+ dc.l $00aa0000,$117c00aa,$00023d7c,$001fff7c
+ dc.l $44fc001f,$48ee7fff,$ff800108,$000042ee
+ dc.l $ff7e48ee,$7fffffc0,$3d7caaaa,$ff82323c
+ dc.l $aaaab041,$66ff0000,$49ba61ff,$00004986
+ dc.l $4a0066ff,$000049ac,$52aeff78,$4cfb3fff
+ dc.l $01700000,$49a841ee,$ff60203c,$aaaaaaaa
+ dc.l $42280000,$42280002,$42280004,$42280006
+ dc.l $3d7c001f,$ff7c44fc,$001f48ee,$7fffff80
+ dc.l $01c80000,$42eeff7e,$48ee7fff,$ffc01228
+ dc.l $0006e189,$12280004,$e1891228,$0002e189
+ dc.l $12280000,$b08166ff,$00004948,$61ff0000
+ dc.l $49144a00,$66ff0000,$493a52ae,$ff784cfb
+ dc.l $3fff0170,$00004936,$41eeff64,$203caaaa
+ dc.l $aaaa42a8,$fffc4290,$42a80004,$42a80008
+ dc.l $3d7c001f,$ff7c44fc,$001f48ee,$7fffff80
+ dc.l $01c80000,$42eeff7e,$48ee7fff,$ffc04aa8
+ dc.l $fffc66ff,$000048ec,$4aa80008,$66ff0000
+ dc.l $48e20c90,$aa00aa00,$66ff0000,$48d60ca8
+ dc.l $aa00aa00,$000466ff,$000048c8,$61ff0000
+ dc.l $48944a00,$66ff0000,$48ba52ae,$ff784cfb
+ dc.l $3fff0170,$000048b6,$41eeff60,$117c00aa
+ dc.l $0000117c,$00aa0002,$117c00aa,$0004117c
+ dc.l $00aa0006,$3d7c001f,$ff7c44fc,$001f48ee
+ dc.l $7fffff80,$01480000,$42eeff7e,$48ee7fff
+ dc.l $ffc02d7c,$aaaaaaaa,$ff80223c,$aaaaaaaa
+ dc.l $b08166ff,$0000485c,$61ff0000,$48284a00
+ dc.l $66ff0000,$484e52ae,$ff784cfb,$3fff0170
+ dc.l $0000484a,$41eeff60,$3e3caaaa,$42280000
+ dc.l $42280002,$3d7c001f,$ff7c44fc,$001f48ee
+ dc.l $7fffff80,$0f880000,$42eeff7e,$48ee7fff
+ dc.l $ffc01228,$0002e149,$12280000,$be4166ff
+ dc.l $00004800,$61ff0000,$47cc4a00,$66ff0000
+ dc.l $47f252ae,$ff784cfb,$3fff0170,$000047ee
+ dc.l $41eeff60,$117c00aa,$0000117c,$00aa0002
+ dc.l $3d7c001f,$ff7c44fc,$001f48ee,$7fffff80
+ dc.l $0f080000,$42eeff7e,$48ee7fff,$ffc03d7c
+ dc.l $aaaaff9e,$323caaaa,$be4166ff,$000047a4
+ dc.l $61ff0000,$47704a00,$66ff0000,$479652ae
+ dc.l $ff784cfb,$3fff0170,$00004792,$41eeff60
+ dc.l $303caaaa,$42280000,$42280002,$3d7c001f
+ dc.l $ff7c44fc,$001f48ee,$7fffff80,$01880000
+ dc.l $42eeff7e,$48ee7fff,$ffc01228,$0002e149
+ dc.l $12280000,$b04166ff,$00004748,$61ff0000
+ dc.l $47144a00,$66ff0000,$473a52ae,$ff784cfb
+ dc.l $3fff0170,$00004736,$41eeff60,$303caaaa
+ dc.l $42280008,$4228000a,$3d7c001f,$ff7c44fc
+ dc.l $001f48ee,$7fffff80,$01880008,$42eeff7e
+ dc.l $48ee7fff,$ffc01228,$000ae149,$12280008
+ dc.l $b04166ff,$000046ec,$61ff0000,$46b84a00
+ dc.l $66ff0000,$46de52ae,$ff784cfb,$3fff0170
+ dc.l $000046da,$41eeff60,$117c00aa,$0008117c
+ dc.l $00aa000a,$3d7c001f,$ff7c44fc,$001f48ee
+ dc.l $7fffff80,$01080008,$42eeff7e,$48ee7fff
+ dc.l $ffc03d7c,$aaaaff82,$323caaaa,$b04166ff
+ dc.l $00004690,$61ff0000,$465c4a00,$66ff0000
+ dc.l $468252ae,$ff784cfb,$3fff0170,$0000467e
+ dc.l $41eeff60,$203caaaa,$aaaa4228,$00084228
+ dc.l $000a4228,$000c4228,$000e3d7c,$001fff7c
+ dc.l $44fc001f,$48ee7fff,$ff8001c8,$000842ee
+ dc.l $ff7e48ee,$7fffffc0,$1228000e,$e1891228
+ dc.l $000ce189,$1228000a,$e1891228,$0008b081
+ dc.l $66ff0000,$461e61ff,$000045ea,$4a0066ff
+ dc.l $00004610,$52aeff78,$4cfb3fff,$01700000
+ dc.l $460c41ee,$ff60117c,$00aa0008,$117c00aa
+ dc.l $000a117c,$00aa000c,$117c00aa,$000e3d7c
+ dc.l $001fff7c,$44fc001f,$48ee7fff,$ff800148
+ dc.l $000842ee,$ff7e48ee,$7fffffc0,$2d7caaaa
+ dc.l $aaaaff80,$223caaaa,$aaaab081,$66ff0000
+ dc.l $45b261ff,$0000457e,$4a0066ff,$000045a4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$45a041ee
+ dc.l $ff68303c,$aaaa4228,$fff84228,$fffa3d7c
+ dc.l $001fff7c,$44fc001f,$48ee7fff,$ff800188
+ dc.l $fff842ee,$ff7e48ee,$7fffffc0,$1228fffa
+ dc.l $e1491228,$fff8b041,$66ff0000,$455661ff
+ dc.l $00004522,$4a0066ff,$00004548,$52aeff78
+ dc.l $4cfb3fff,$01700000,$454441ee,$ff68117c
+ dc.l $00aafff8,$117c00aa,$fffa3d7c,$001fff7c
+ dc.l $44fc001f,$48ee7fff,$ff800108,$fff842ee
+ dc.l $ff7e48ee,$7fffffc0,$3d7caaaa,$ff82323c
+ dc.l $aaaab041,$66ff0000,$44fa61ff,$000044c6
+ dc.l $4a0066ff,$000044ec,$52aeff78,$4cfb3fff
+ dc.l $01700000,$44e841ee,$ff68203c,$aaaaaaaa
+ dc.l $4228fff8,$4228fffa,$4228fffc,$42280000
+ dc.l $3d7c001f,$ff7c44fc,$001f48ee,$7fffff80
+ dc.l $01c8fff8,$42eeff7e,$48ee7fff,$ffc01228
+ dc.l $fffee189,$1228fffc,$e1891228,$fffae189
+ dc.l $1228fff8,$b08166ff,$00004488,$61ff0000
+ dc.l $44544a00,$66ff0000,$447a52ae,$ff784cfb
+ dc.l $3fff0170,$00004476,$41eeff68,$117c00aa
+ dc.l $fff8117c,$00aafffa,$117c00aa,$fffc117c
+ dc.l $00aa0000,$3d7c001f,$ff7c44fc,$001f48ee
+ dc.l $7fffff80,$0148fff8,$42eeff7e,$48ee7fff
+ dc.l $ffc02d7c,$aaaaaaaa,$ff80223c,$aaaaaaaa
+ dc.l $b08166ff,$0000441c,$61ff0000,$43e84a00
+ dc.l $66ff0000,$440e222e,$ff784280,$4e750936
+ dc.l $342d6269,$74206469,$76696465,$2e2e2e00
+ dc.l $52aeff78,$52aeff78,$4cfb3fff,$01700000
+ dc.l $43ec7201,$74007600,$3d7c0014,$ff7c44fc
+ dc.l $001f48ee,$7fffff80,$4c413402,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$0000438a,$4a0066ff
+ dc.l $000043b0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $43ac223c,$44444444,$7400263c,$55555555
+ dc.l $3d7c0010,$ff7c44fc,$001f48ee,$7fffff80
+ dc.l $4c413402,$42eeff7e,$48ee7fff,$ffc02d7c
+ dc.l $11111111,$ff882d7c,$00000001,$ff8c61ff
+ dc.l $00004332,$4a0066ff,$00004358,$52aeff78
+ dc.l $4cfb3fff,$01700000,$4354223c,$55555555
+ dc.l $7400263c,$44444444,$3d7c0014,$ff7c44fc
+ dc.l $001f48ee,$7fffff80,$4c413402,$42eeff7e
+ dc.l $48ee7fff,$ffc02d7c,$44444444,$ff882d7c
+ dc.l $00000000,$ff8c61ff,$000042da,$4a0066ff
+ dc.l $00004300,$52aeff78,$4cfb3fff,$01700000
+ dc.l $42fc223c,$11111111,$243c4444,$4444263c
+ dc.l $44444444,$3d7c001e,$ff7c44fc,$001d48ee
+ dc.l $7fffff80,$4c413402,$42eeff7e,$48ee7fff
+ dc.l $ffc061ff,$0000428e,$4a0066ff,$000042b4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$42b072fe
+ dc.l $74017602,$3d7c001e,$ff7c44fc,$001d48ee
+ dc.l $7fffff80,$4c413c02,$42eeff7e,$48ee7fff
+ dc.l $ffc061ff,$0000424e,$4a0066ff,$00004274
+ dc.l $52aeff78,$4cfb3fff,$01700000,$427072fe
+ dc.l $74017600,$3d7c0018,$ff7c44fc,$001d48ee
+ dc.l $7fffff80,$4c413c02,$42eeff7e,$48ee7fff
+ dc.l $ffc02d7c,$00000000,$ff882d7c,$80000000
+ dc.l $ff8c61ff,$000041fe,$4a0066ff,$00004224
+ dc.l $52aeff78,$4cfb3fff,$01700000,$42207202
+ dc.l $74017600,$3d7c001e,$ff7c44fc,$001d48ee
+ dc.l $7fffff80,$4c413c02,$42eeff7e,$48ee7fff
+ dc.l $ffc061ff,$000041be,$4a0066ff,$000041e4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$41e072ff
+ dc.l $74fe76ff,$3d7c0008,$ff7c44fc,$000048ee
+ dc.l $7fffff80,$4c413402,$42eeff7e,$48ee7fff
+ dc.l $ffc061ff,$0000417e,$4a0066ff,$000041a4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$41a072ff
+ dc.l $74fe76ff,$3d7c0008,$ff7c44fc,$000048ee
+ dc.l $7fffff80,$4c7c2402,$ffffffff,$42eeff7e
+ dc.l $48ee7fff,$ffc02d7c,$ffffffff,$ff8861ff
+ dc.l $00004132,$4a0066ff,$00004158,$52aeff78
+ dc.l $4cfb3fff,$01700000,$4154223c,$0000ffff
+ dc.l $7401263c,$55555555,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$7fffff80,$4c413402,$42eeff7e
+ dc.l $48ee7fff,$ffc02d7c,$0000aaab,$ff882d7c
+ dc.l $00015556,$ff8c61ff,$000040da,$4a0066ff
+ dc.l $00004100,$222eff78,$42804e75,$09636173
+ dc.l $2e2e2e00,$52aeff78,$4cfb3fff,$01700000
+ dc.l $40ec41ee,$ff6130bc,$aaaa323c,$aaaa343c
+ dc.l $bbbb3d7c,$0014ff7c,$44fc0010,$48ee7fff
+ dc.l $ff800cd0,$008142ee,$ff7e3610,$3d7cbbbb
+ dc.l $ff8e48ee,$7fffffc0,$61ff0000,$40784a00
+ dc.l $66ff0000,$409e52ae,$ff784cfb,$3fff0170
+ dc.l $0000409a,$41eeff61,$30bceeee,$323caaaa
+ dc.l $343cbbbb,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $7fffff80,$0cd00081,$42eeff7e,$36103d7c
+ dc.l $eeeeff86,$3d7ceeee,$ff8e48ee,$7fffffc0
+ dc.l $61ff0000,$40204a00,$66ff0000,$404652ae
+ dc.l $ff784cfb,$3fff0170,$00004042,$41eeff62
+ dc.l $20bcaaaa,$aaaa223c,$aaaaaaaa,$243cbbbb
+ dc.l $bbbb3d7c,$0004ff7c,$44fc0000,$48ee7fff
+ dc.l $ff800ed0,$008142ee,$ff7e2610,$2d7cbbbb
+ dc.l $bbbbff8c,$48ee7fff,$ffc061ff,$00003fc6
+ dc.l $4a0066ff,$00003fec,$52aeff78,$4cfb3fff
+ dc.l $01700000,$3fe841ee,$ff6220bc,$eeeeeeee
+ dc.l $223caaaa,$aaaa243c,$bbbbbbbb,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$7fffff80,$0ed00081
+ dc.l $42eeff7e,$26102d7c,$eeeeeeee,$ff842d7c
+ dc.l $eeeeeeee,$ff8c48ee,$7fffffc0,$61ff0000
+ dc.l $3f644a00,$66ff0000,$3f8a52ae,$ff784cfb
+ dc.l $3fff0170,$00003f86,$41eeff61,$20bcaaaa
+ dc.l $aaaa223c,$aaaaaaaa,$243cbbbb,$bbbb3d7c
+ dc.l $0004ff7c,$44fc0000,$48ee7fff,$ff800ed0
+ dc.l $008142ee,$ff7e2610,$2d7cbbbb,$bbbbff8c
+ dc.l $48ee7fff,$ffc061ff,$00003f0a,$4a0066ff
+ dc.l $00003f30,$52aeff78,$4cfb3fff,$01700000
+ dc.l $3f2c41ee,$ff6120bc,$7fffffff,$223c8000
+ dc.l $0000243c,$bbbbbbbb,$3d7c001b,$ff7c44fc
+ dc.l $001048ee,$7fffff80,$0ed00081,$42eeff7e
+ dc.l $26102d7c,$7fffffff,$ff842d7c,$7fffffff
+ dc.l $ff8c48ee,$7fffffc0,$61ff0000,$3ea84a00
+ dc.l $66ff0000,$3ece222e,$ff784280,$4e750963
+ dc.l $6173322e,$2e2e0000,$52aeff78,$4cfb3fff
+ dc.l $01700000,$3eb841ee,$ff6043ee,$ff6420bc
+ dc.l $aaaaaaaa,$22bcbbbb,$bbbb223c,$aaaaaaaa
+ dc.l $243cbbbb,$bbbb263c,$cccccccc,$283cdddd
+ dc.l $dddd3d7c,$0014ff7c,$44fc0010,$48ee7fff
+ dc.l $ff800efc,$80c19102,$42eeff7e,$2a102c11
+ dc.l $2d7ccccc,$ccccff94,$2d7cdddd,$ddddff98
+ dc.l $48ee7fff,$ffc061ff,$00003e1a,$4a0066ff
+ dc.l $00003e40,$52aeff78,$4cfb3fff,$01700000
+ dc.l $3e3c41ee,$ff6143ee,$ff6520bc,$aaaaaaaa
+ dc.l $22bcbbbb,$bbbb223c,$aaaaaaaa,$243cbbbb
+ dc.l $bbbb263c,$cccccccc,$283cdddd,$dddd3d7c
+ dc.l $0014ff7c,$44fc0010,$48ee7fff,$ff800efc
+ dc.l $80c19102,$42eeff7e,$2a102c11,$2d7ccccc
+ dc.l $ccccff94,$2d7cdddd,$ddddff98,$48ee7fff
+ dc.l $ffc061ff,$00003d9e,$4a0066ff,$00003dc4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$3dc041ee
+ dc.l $ff6243ee,$ff6620bc,$aaaaaaaa,$22bcbbbb
+ dc.l $bbbb223c,$aaaaaaaa,$243cbbbb,$bbbb263c
+ dc.l $cccccccc,$283cdddd,$dddd3d7c,$0014ff7c
+ dc.l $44fc0010,$48ee7fff,$ff800efc,$80c19102
+ dc.l $42eeff7e,$2a102c11,$2d7ccccc,$ccccff94
+ dc.l $2d7cdddd,$ddddff98,$48ee7fff,$ffc061ff
+ dc.l $00003d22,$4a0066ff,$00003d48,$52aeff78
+ dc.l $4cfb3fff,$01700000,$3d4441ee,$ff6043ee
+ dc.l $ff6420bc,$eeeeeeee,$22bcbbbb,$bbbb223c
+ dc.l $aaaaaaaa,$243cbbbb,$bbbb263c,$cccccccc
+ dc.l $283cdddd,$dddd3d7c,$0000ff7c,$44fc0000
+ dc.l $48ee7fff,$ff800efc,$80c19102,$42eeff7e
+ dc.l $2a102c11,$2d7ceeee,$eeeeff84,$2d7cbbbb
+ dc.l $bbbbff88,$2d7ceeee,$eeeeff94,$2d7cbbbb
+ dc.l $bbbbff98,$48ee7fff,$ffc061ff,$00003c96
+ dc.l $4a0066ff,$00003cbc,$52aeff78,$4cfb3fff
+ dc.l $01700000,$3cb841ee,$ff6143ee,$ff6520bc
+ dc.l $eeeeeeee,$22bcbbbb,$bbbb223c,$aaaaaaaa
+ dc.l $243cbbbb,$bbbb263c,$cccccccc,$283cdddd
+ dc.l $dddd3d7c,$0000ff7c,$44fc0000,$48ee7fff
+ dc.l $ff800efc,$80c19102,$42eeff7e,$2a102c11
+ dc.l $2d7ceeee,$eeeeff84,$2d7cbbbb,$bbbbff88
+ dc.l $2d7ceeee,$eeeeff94,$2d7cbbbb,$bbbbff98
+ dc.l $48ee7fff,$ffc061ff,$00003c0a,$4a0066ff
+ dc.l $00003c30,$52aeff78,$4cfb3fff,$01700000
+ dc.l $3c2c41ee,$ff6243ee,$ff6620bc,$eeeeeeee
+ dc.l $22bcbbbb,$bbbb223c,$aaaaaaaa,$243cbbbb
+ dc.l $bbbb263c,$cccccccc,$283cdddd,$dddd3d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff800efc
+ dc.l $80c19102,$42eeff7e,$2a102c11,$2d7ceeee
+ dc.l $eeeeff84,$2d7cbbbb,$bbbbff88,$2d7ceeee
+ dc.l $eeeeff94,$2d7cbbbb,$bbbbff98,$48ee7fff
+ dc.l $ffc061ff,$00003b7e,$4a0066ff,$00003ba4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$3ba041ee
+ dc.l $ff6043ee,$ff6420bc,$aaaaaaaa,$22bceeee
+ dc.l $eeee223c,$aaaaaaaa,$243cbbbb,$bbbb263c
+ dc.l $cccccccc,$283cdddd,$dddd3d7c,$0000ff7c
+ dc.l $44fc0000,$48ee7fff,$ff800efc,$80c19102
+ dc.l $42eeff7e,$2a102c11,$2d7caaaa,$aaaaff84
+ dc.l $2d7ceeee,$eeeeff88,$2d7caaaa,$aaaaff94
+ dc.l $2d7ceeee,$eeeeff98,$48ee7fff,$ffc061ff
+ dc.l $00003af2,$4a0066ff,$00003b18,$52aeff78
+ dc.l $4cfb3fff,$01700000,$3b1441ee,$ff6143ee
+ dc.l $ff6520bc,$aaaaaaaa,$22bceeee,$eeee223c
+ dc.l $aaaaaaaa,$243cbbbb,$bbbb263c,$cccccccc
+ dc.l $283cdddd,$dddd3d7c,$0000ff7c,$44fc0000
+ dc.l $48ee7fff,$ff800efc,$80c19102,$42eeff7e
+ dc.l $2a102c11,$2d7caaaa,$aaaaff84,$2d7ceeee
+ dc.l $eeeeff88,$2d7caaaa,$aaaaff94,$2d7ceeee
+ dc.l $eeeeff98,$48ee7fff,$ffc061ff,$00003a66
+ dc.l $4a0066ff,$00003a8c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$3a8841ee,$ff6243ee,$ff6620bc
+ dc.l $aaaaaaaa,$22bc7fff,$ffff223c,$aaaaaaaa
+ dc.l $243c8000,$0000263c,$cccccccc,$283cdddd
+ dc.l $dddd3d7c,$000bff7c,$44fc0000,$48ee7fff
+ dc.l $ff800efc,$80c19102,$42eeff7e,$2a102c11
+ dc.l $2d7caaaa,$aaaaff84,$2d7c7fff,$ffffff88
+ dc.l $2d7caaaa,$aaaaff94,$2d7c7fff,$ffffff98
+ dc.l $48ee7fff,$ffc061ff,$000039da,$4a0066ff
+ dc.l $00003a00,$52aeff78,$4cfb3fff,$01700000
+ dc.l $39fc41ee,$ff6043ee,$ff6430bc,$aaaa32bc
+ dc.l $bbbb323c,$aaaa343c,$bbbb363c,$cccc383c
+ dc.l $dddd3d7c,$0014ff7c,$44fc0010,$48ee7fff
+ dc.l $ff800cfc,$80c19102,$42eeff7e,$3a103c11
+ dc.l $3d7ccccc,$ff963d7c,$ddddff9a,$48ee7fff
+ dc.l $ffc061ff,$0000396e,$4a0066ff,$00003994
+ dc.l $52aeff78,$4cfb3fff,$01700000,$399041ee
+ dc.l $ff6143ee,$ff6530bc,$aaaa32bc,$bbbb323c
+ dc.l $aaaa343c,$bbbb363c,$cccc383c,$dddd3d7c
+ dc.l $0004ff7c,$44fc0000,$48ee7fff,$ff800cfc
+ dc.l $80c19102,$42eeff7e,$3a103c11,$3d7ccccc
+ dc.l $ff963d7c,$ddddff9a,$48ee7fff,$ffc061ff
+ dc.l $00003902,$4a0066ff,$00003928,$52aeff78
+ dc.l $4cfb3fff,$01700000,$392441ee,$ff6043ee
+ dc.l $ff6430bc,$eeee32bc,$bbbb323c,$aaaa343c
+ dc.l $bbbb363c,$cccc383c,$dddd3d7c,$0000ff7c
+ dc.l $44fc0000,$48ee7fff,$ff800cfc,$80c19102
+ dc.l $42eeff7e,$3a103c11,$3d7ceeee,$ff863d7c
+ dc.l $bbbbff8a,$3d7ceeee,$ff963d7c,$bbbbff9a
+ dc.l $48ee7fff,$ffc061ff,$0000388a,$4a0066ff
+ dc.l $000038b0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $38ac41ee,$ff6143ee,$ff6530bc,$eeee32bc
+ dc.l $bbbb323c,$aaaa343c,$bbbb363c,$cccc383c
+ dc.l $dddd3d7c,$0000ff7c,$44fc0000,$48ee7fff
+ dc.l $ff800cfc,$80c19102,$42eeff7e,$3a103c11
+ dc.l $3d7ceeee,$ff863d7c,$bbbbff8a,$3d7ceeee
+ dc.l $ff963d7c,$bbbbff9a,$48ee7fff,$ffc061ff
+ dc.l $00003812,$4a0066ff,$00003838,$52aeff78
+ dc.l $4cfb3fff,$01700000,$383441ee,$ff6043ee
+ dc.l $ff6430bc,$aaaa32bc,$eeee323c,$aaaa343c
+ dc.l $bbbb363c,$cccc383c,$dddd3d7c,$0000ff7c
+ dc.l $44fc0000,$48ee7fff,$ff800cfc,$80c19102
+ dc.l $42eeff7e,$3a103c11,$3d7caaaa,$ff863d7c
+ dc.l $eeeeff8a,$3d7caaaa,$ff963d7c,$eeeeff9a
+ dc.l $48ee7fff,$ffc061ff,$0000379a,$4a0066ff
+ dc.l $000037c0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $37bc41ee,$ff6143ee,$ff6530bc,$aaaa32bc
+ dc.l $7fff323c,$aaaa343c,$8000363c,$cccc383c
+ dc.l $dddd3d7c,$001bff7c,$44fc0010,$48ee7fff
+ dc.l $ff800cfc,$80c19102,$42eeff7e,$3a103c11
+ dc.l $3d7caaaa,$ff863d7c,$7fffff8a,$3d7caaaa
+ dc.l $ff963d7c,$7fffff9a,$48ee7fff,$ffc061ff
+ dc.l $00003722,$4a0066ff,$00003748,$222eff78
+ dc.l $42804e75,$09636d70,$322c6368,$6b322e2e
+ dc.l $2e0051fc,$52aeff78,$4cfb3fff,$01700000
+ dc.l $372c3d7c,$2040ff60,$223c1111,$11203d7c
+ dc.l $0004ff7c,$44fc0000,$48ee7fff,$ff8000ee
+ dc.l $1000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $000036c2,$4a0066ff,$000036e8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$36e43d7c,$2040ff60
+ dc.l $227c0000,$00403d7c,$0004ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$9000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$0000367a,$4a0066ff
+ dc.l $000036a0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $369c3d7c,$2040ff60,$223c1111,$11303d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff8000ee
+ dc.l $1800ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $00003632,$4a0066ff,$00003658,$52aeff78
+ dc.l $4cfb3fff,$01700000,$36543d7c,$2040ff60
+ dc.l $227c0000,$00103d7c,$0001ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$9000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$000035ea,$4a0066ff
+ dc.l $00003610,$52aeff78,$4cfb3fff,$01700000
+ dc.l $360c3d7c,$2040ff60,$223c1111,$11503d7c
+ dc.l $0001ff7c,$44fc0000,$48ee7fff,$ff8000ee
+ dc.l $1000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $000035a2,$4a0066ff,$000035c8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$35c43d7c,$2040ff60
+ dc.l $227c0000,$00903d7c,$0001ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$9000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$0000355a,$4a0066ff
+ dc.l $00003580,$52aeff78,$4cfb3fff,$01700000
+ dc.l $357c2d7c,$2000a000,$ff60223c,$11112000
+ dc.l $3d7c0004,$ff7c44fc,$000048ee,$7fffff80
+ dc.l $02ee1000,$ff6042ee,$ff7e48ee,$7fffffc0
+ dc.l $61ff0000,$35104a00,$66ff0000,$353652ae
+ dc.l $ff784cfb,$3fff0170,$00003532,$2d7c2000
+ dc.l $a000ff60,$227cffff,$a0003d7c,$0004ff7c
+ dc.l $44fc0000,$48ee7fff,$ff8002ee,$9000ff60
+ dc.l $42eeff7e,$48ee7fff,$ffc061ff,$000034c6
+ dc.l $4a0066ff,$000034ec,$52aeff78,$4cfb3fff
+ dc.l $01700000,$34e82d7c,$2000a000,$ff60223c
+ dc.l $11113000,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $7fffff80,$02ee1800,$ff6042ee,$ff7e48ee
+ dc.l $7fffffc0,$61ff0000,$347c4a00,$66ff0000
+ dc.l $34a252ae,$ff784cfb,$3fff0170,$0000349e
+ dc.l $2d7c2000,$a000ff60,$227cffff,$90003d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff8002ee
+ dc.l $9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $00003432,$4a0066ff,$00003458,$52aeff78
+ dc.l $4cfb3fff,$01700000,$34542d7c,$2000a000
+ dc.l $ff60223c,$11111000,$3d7c0001,$ff7c44fc
+ dc.l $000048ee,$7fffff80,$02ee1000,$ff6042ee
+ dc.l $ff7e48ee,$7fffffc0,$61ff0000,$33e84a00
+ dc.l $66ff0000,$340e52ae,$ff784cfb,$3fff0170
+ dc.l $0000340a,$2d7c2000,$a000ff60,$227cffff
+ dc.l $b0003d7c,$0001ff7c,$44fc0000,$48ee7fff
+ dc.l $ff8002ee,$9000ff60,$42eeff7e,$48ee7fff
+ dc.l $ffc061ff,$0000339e,$4a0066ff,$000033c4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$33c02d7c
+ dc.l $a0000000,$ff602d7c,$c0000000,$ff64223c
+ dc.l $a0000000,$3d7c000c,$ff7c44fc,$000848ee
+ dc.l $7fffff80,$04ee1000,$ff6042ee,$ff7e48ee
+ dc.l $7fffffc0,$61ff0000,$334c4a00,$66ff0000
+ dc.l $337252ae,$ff784cfb,$3fff0170,$0000336e
+ dc.l $2d7ca000,$0000ff60,$2d7cc000,$0000ff64
+ dc.l $227cc000,$00003d7c,$000cff7c,$44fc0008
+ dc.l $48ee7fff,$ff8004ee,$9000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$000032fa,$4a0066ff
+ dc.l $00003320,$52aeff78,$4cfb3fff,$01700000
+ dc.l $331c2d7c,$a0000000,$ff602d7c,$c0000000
+ dc.l $ff64223c,$b0000000,$3d7c0008,$ff7c44fc
+ dc.l $000848ee,$7fffff80,$04ee1800,$ff6042ee
+ dc.l $ff7e48ee,$7fffffc0,$61ff0000,$32a84a00
+ dc.l $66ff0000,$32ce52ae,$ff784cfb,$3fff0170
+ dc.l $000032ca,$2d7ca000,$0000ff60,$2d7cc000
+ dc.l $0000ff64,$227c1000,$00003d7c,$0009ff7c
+ dc.l $44fc0008,$48ee7fff,$ff8004ee,$9000ff60
+ dc.l $42eeff7e,$48ee7fff,$ffc061ff,$00003256
+ dc.l $4a0066ff,$0000327c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$32782d7c,$a0000000,$ff602d7c
+ dc.l $c0000000,$ff64223c,$90000000,$3d7c0009
+ dc.l $ff7c44fc,$000848ee,$7fffff80,$04ee1000
+ dc.l $ff6042ee,$ff7e48ee,$7fffffc0,$61ff0000
+ dc.l $32044a00,$66ff0000,$322a52ae,$ff784cfb
+ dc.l $3fff0170,$00003226,$2d7ca000,$0000ff60
+ dc.l $2d7cc000,$0000ff64,$227cd000,$00003d7c
+ dc.l $0009ff7c,$44fc0008,$48ee7fff,$ff8004ee
+ dc.l $9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $000031b2,$4a0066ff,$000031d8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$31d43d7c,$a040ff60
+ dc.l $223c1111,$11a03d7c,$0004ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$0000316a,$4a0066ff
+ dc.l $00003190,$52aeff78,$4cfb3fff,$01700000
+ dc.l $318c3d7c,$a040ff60,$227c0000,$00403d7c
+ dc.l $0004ff7c,$44fc0000,$48ee7fff,$ff8000ee
+ dc.l $9800ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $00003122,$4a0066ff,$00003148,$52aeff78
+ dc.l $4cfb3fff,$01700000,$31443d7c,$a040ff60
+ dc.l $223c1111,$11b03d7c,$0000ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$000030da,$4a0066ff
+ dc.l $00003100,$52aeff78,$4cfb3fff,$01700000
+ dc.l $30fc3d7c,$a040ff60,$227c0000,$00103d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff8000ee
+ dc.l $9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $00003092,$4a0066ff,$000030b8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$30b43d7c,$a040ff60
+ dc.l $223c1111,$11903d7c,$0001ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$0000304a,$4a0066ff
+ dc.l $00003070,$52aeff78,$4cfb3fff,$01700000
+ dc.l $306c3d7c,$a040ff60,$227c0000,$00503d7c
+ dc.l $0001ff7c,$44fc0000,$48ee7fff,$ff8000ee
+ dc.l $9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $00003002,$4a0066ff,$00003028,$52aeff78
+ dc.l $4cfb3fff,$01700000,$30243d7c,$a0c0ff60
+ dc.l $223c1111,$11a03d7c,$0004ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$00002fba,$4a0066ff
+ dc.l $00002fe0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $2fdc3d7c,$a0c0ff60,$227cffff,$ffc03d7c
+ dc.l $0004ff7c,$44fc0000,$48ee7fff,$ff8000ee
+ dc.l $9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $00002f72,$4a0066ff,$00002f98,$52aeff78
+ dc.l $4cfb3fff,$01700000,$2f943d7c,$a0c0ff60
+ dc.l $223c1111,$11b03d7c,$0000ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$1800ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$00002f2a,$4a0066ff
+ dc.l $00002f50,$52aeff78,$4cfb3fff,$01700000
+ dc.l $2f4c3d7c,$a0c0ff60,$227c1111,$11903d7c
+ dc.l $0001ff7c,$44fc0000,$48ee7fff,$ff8000ee
+ dc.l $9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $00002ee2,$4a0066ff,$00002f08,$52aeff78
+ dc.l $4cfb3fff,$01700000,$2f043d7c,$a0c0ff60
+ dc.l $223c1111,$11d03d7c,$0001ff7c,$44fc0000
+ dc.l $48ee7fff,$ff8000ee,$1000ff60,$42eeff7e
+ dc.l $48ee7fff,$ffc061ff,$00002e9a,$4a0066ff
+ dc.l $00002ec0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $2ebc3d7c,$a0c0ff60,$227c0000,$00503d7c
+ dc.l $001bff7c,$44fc001f,$48ee7fff,$ff8000ee
+ dc.l $9000ff60,$42eeff7e,$48ee7fff,$ffc061ff
+ dc.l $00002e52,$4a0066ff,$00002e78,$222eff78
+ dc.l $42804e75,$09456666,$65637469,$76652061
+ dc.l $64647265,$73736573,$2e2e2e00,$52aeff78
+ dc.l $4cfb3fff,$01700000,$2e544282,$760241ee
+ dc.l $ff743d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c10,$340242ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$2de84a00
+ dc.l $66ff0000,$2e0e52ae,$ff784cfb,$3fff0170
+ dc.l $00002e0a,$42827602,$41eeff74,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c183402
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c41ee,$ff782d48,$ffa061ff,$00002d96
+ dc.l $4a0066ff,$00002dbc,$52aeff78,$4cfb3fff
+ dc.l $01700000,$2db84282,$760241ee,$ff783d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c20
+ dc.l $340242ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$41eeff74,$2d48ffa0,$61ff0000
+ dc.l $2d444a00,$66ff0000,$2d6a52ae,$ff784cfb
+ dc.l $3fff0170,$00002d66,$42827602,$41ee0f74
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c283402,$f00042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$2cf84a00
+ dc.l $66ff0000,$2d1e52ae,$ff784cfb,$3fff0170
+ dc.l $00002d1a,$42827602,$41eeef74,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c283402
+ dc.l $100042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$2cac4a00,$66ff0000
+ dc.l $2cd252ae,$ff7852ae,$ff7852ae,$ff784cfb
+ dc.l $3fff0170,$00002cc6,$42827602,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c3c3402
+ dc.l $00000002,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$00002c5a,$4a0066ff
+ dc.l $00002c80,$52aeff78,$60040000,$00024cfb
+ dc.l $3fff0170,$00002c76,$42827602,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c3a3402
+ dc.l $ffda42ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$2c0c4a00,$66ff0000
+ dc.l $2c3252ae,$ff784cfb,$3fff0170,$00002c2e
+ dc.l $42827602,$43eeff78,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$ffffff80,$4c213402,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c41ee
+ dc.l $ff742d48,$ffa461ff,$00002bba,$4a0066ff
+ dc.l $00002be0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $2bdc4282,$760245ee,$ff783d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c22,$340242ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $41eeff74,$2d48ffa8,$61ff0000,$2b684a00
+ dc.l $66ff0000,$2b8e52ae,$ff784cfb,$3fff0170
+ dc.l $00002b8a,$42827602,$47eeff78,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c233402
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c41ee,$ff742d48,$ffac61ff,$00002b16
+ dc.l $4a0066ff,$00002b3c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$2b384282,$760249ee,$ff783d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c24
+ dc.l $340242ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$41eeff74,$2d48ffb0,$61ff0000
+ dc.l $2ac44a00,$66ff0000,$2aea52ae,$ff784cfb
+ dc.l $3fff0170,$00002ae6,$42827602,$4beeff78
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c253402,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c41ee,$ff742d48,$ffb461ff
+ dc.l $00002a72,$4a0066ff,$00002a98,$52aeff78
+ dc.l $4cfb3fff,$01700000,$2a94224e,$42827602
+ dc.l $4de9ff78,$337c0000,$ff7c44fc,$000048e9
+ dc.l $ffffff80,$4c263402,$42e9ff7e,$48e9ffff
+ dc.l $ffc0237c,$00000004,$ff8c41e9,$ff742348
+ dc.l $ffb82c49,$61ff0000,$2a1c4a00,$66ff0000
+ dc.l $2a4252ae,$ff784cfb,$3fff0170,$00002a3e
+ dc.l $42827602,$204f4fee,$ff783d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c27,$340242ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $43eeff74,$2d49ffbc,$2e4861ff,$000029c6
+ dc.l $4a0066ff,$000029ec,$52aeff78,$4cfb3fff
+ dc.l $01700000,$29e84282,$760241ee,$ff7478f0
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c303402,$401042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$29784a00
+ dc.l $66ff0000,$299e52ae,$ff784cfb,$3fff0170
+ dc.l $0000299a,$42827602,$41eeff74,$78f83d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c30
+ dc.l $34024210,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000292a,$4a0066ff
+ dc.l $00002950,$52aeff78,$4cfb3fff,$01700000
+ dc.l $294c4282,$760241ee,$ff7478fc,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c303402
+ dc.l $441042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$28dc4a00,$66ff0000
+ dc.l $290252ae,$ff784cfb,$3fff0170,$000028fe
+ dc.l $42827602,$41eeff74,$78fe3d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c30,$34024610
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000288e,$4a0066ff,$000028b4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$28b04282
+ dc.l $760241ee,$ff7478f0,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$ffffff80,$4c303402,$481042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$28404a00,$66ff0000,$286652ae
+ dc.l $ff784cfb,$3fff0170,$00002862,$42827602
+ dc.l $41eeff74,$78f83d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c30,$34024a10,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $000027f2,$4a0066ff,$00002818,$52aeff78
+ dc.l $4cfb3fff,$01700000,$28144282,$760241ee
+ dc.l $ff7478fc,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c303402,$4c1042ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $27a44a00,$66ff0000,$27ca52ae,$ff784cfb
+ dc.l $3fff0170,$000027c6,$42827602,$41eeff74
+ dc.l $78fe3d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c30,$34024e10,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00002756
+ dc.l $4a0066ff,$0000277c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$27784282,$760241ee,$ff74287c
+ dc.l $fffffffe,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c303402,$ce1042ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $27044a00,$66ff0000,$272a52ae,$ff784cfb
+ dc.l $3fff0170,$00002726,$42827602,$41eeff74
+ dc.l $287c0000,$00023d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c30,$3402cef0,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $000026b2,$4a0066ff,$000026d8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$26d44282,$760243ee
+ dc.l $ff7478f0,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c313402,$401042ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $26644a00,$66ff0000,$268a52ae,$ff784cfb
+ dc.l $3fff0170,$00002686,$42827602,$45eeff74
+ dc.l $78f03d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c32,$34024010,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00002616
+ dc.l $4a0066ff,$0000263c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$26384282,$760247ee,$ff7478f0
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c333402,$401042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$25c84a00
+ dc.l $66ff0000,$25ee52ae,$ff784cfb,$3fff0170
+ dc.l $000025ea,$42827602,$49eeff74,$78f03d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c34
+ dc.l $34024010,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000257a,$4a0066ff
+ dc.l $000025a0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $259c4282,$76024bee,$ff7478f0,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c353402
+ dc.l $401042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$252c4a00,$66ff0000
+ dc.l $255252ae,$ff784cfb,$3fff0170,$0000254e
+ dc.l $224e4282,$76024de9,$ff7478f0,$337c0000
+ dc.l $ff7c44fc,$000048e9,$ffffff80,$4c363402
+ dc.l $401042e9,$ff7e48e9,$ffffffc0,$237c0000
+ dc.l $0004ff8c,$2c4961ff,$000024da,$4a0066ff
+ dc.l $00002500,$52aeff78,$4cfb3fff,$01700000
+ dc.l $24fc4282,$7602204f,$4feeff74,$78f03d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c37
+ dc.l $34024010,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c2e48,$61ff0000,$24884a00
+ dc.l $66ff0000,$24ae52ae,$ff784cfb,$3fff0170
+ dc.l $000024aa,$42827602,$43eeff74,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c113402
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000243e,$4a0066ff,$00002464
+ dc.l $52aeff78,$4cfb3fff,$01700000,$24604282
+ dc.l $760245ee,$ff743d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c12,$340242ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $23f44a00,$66ff0000,$241a52ae,$ff784cfb
+ dc.l $3fff0170,$00002416,$42827602,$47eeff74
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c133402,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$000023aa,$4a0066ff
+ dc.l $000023d0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $23cc4282,$760249ee,$ff743d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c14,$340242ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$23604a00,$66ff0000,$238652ae
+ dc.l $ff784cfb,$3fff0170,$00002382,$42827602
+ dc.l $4beeff74,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c153402,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00002316
+ dc.l $4a0066ff,$0000233c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$2338224e,$42827602,$4de9ff74
+ dc.l $337c0000,$ff7c44fc,$000048e9,$ffffff80
+ dc.l $4c163402,$42e9ff7e,$48e9ffff,$ffc0237c
+ dc.l $00000004,$ff8c2c49,$61ff0000,$22c84a00
+ dc.l $66ff0000,$22ee52ae,$ff784cfb,$3fff0170
+ dc.l $000022ea,$42827602,$204f4fee,$ff743d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c17
+ dc.l $340242ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$2e4861ff,$0000227a,$4a0066ff
+ dc.l $000022a0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $229c4282,$760243ee,$ff743d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c19,$340242ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $41eeff78,$2d48ffa4,$61ff0000,$22284a00
+ dc.l $66ff0000,$224e52ae,$ff784cfb,$3fff0170
+ dc.l $0000224a,$42827602,$45eeff74,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c1a3402
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c41ee,$ff782d48,$ffa861ff,$000021d6
+ dc.l $4a0066ff,$000021fc,$52aeff78,$4cfb3fff
+ dc.l $01700000,$21f84282,$760247ee,$ff743d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c1b
+ dc.l $340242ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$41eeff78,$2d48ffac,$61ff0000
+ dc.l $21844a00,$66ff0000,$21aa52ae,$ff784cfb
+ dc.l $3fff0170,$000021a6,$42827602,$49eeff74
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c1c3402,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c41ee,$ff782d48,$ffb061ff
+ dc.l $00002132,$4a0066ff,$00002158,$52aeff78
+ dc.l $4cfb3fff,$01700000,$21544282,$76024bee
+ dc.l $ff743d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c1d,$340242ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$41eeff78,$2d48ffb4
+ dc.l $61ff0000,$20e04a00,$66ff0000,$210652ae
+ dc.l $ff784cfb,$3fff0170,$00002102,$224e4282
+ dc.l $76024de9,$ff74337c,$0000ff7c,$44fc0000
+ dc.l $48e9ffff,$ff804c1e,$340242e9,$ff7e48e9
+ dc.l $ffffffc0,$237c0000,$0004ff8c,$41e9ff78
+ dc.l $2348ffb8,$2c4961ff,$0000208a,$4a0066ff
+ dc.l $000020b0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $20ac4282,$7602204f,$4feeff74,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c1f3402
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c43ee,$ff782d49,$ffbc2e48,$61ff0000
+ dc.l $20344a00,$66ff0000,$205a52ae,$ff784cfb
+ dc.l $3fff0170,$00002056,$42827602,$43eeef74
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c293402,$100042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$1fe84a00
+ dc.l $66ff0000,$200e52ae,$ff784cfb,$3fff0170
+ dc.l $0000200a,$42827602,$45eeef74,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c2a3402
+ dc.l $100042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$1f9c4a00,$66ff0000
+ dc.l $1fc252ae,$ff784cfb,$3fff0170,$00001fbe
+ dc.l $42827602,$47eeef74,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$ffffff80,$4c2b3402,$100042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$1f504a00,$66ff0000,$1f7652ae
+ dc.l $ff784cfb,$3fff0170,$00001f72,$42827602
+ dc.l $49eeef74,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c2c3402,$100042ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $1f044a00,$66ff0000,$1f2a52ae,$ff784cfb
+ dc.l $3fff0170,$00001f26,$42827602,$4beeef74
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c2d3402,$100042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$1eb84a00
+ dc.l $66ff0000,$1ede52ae,$ff784cfb,$3fff0170
+ dc.l $00001eda,$224e4282,$76024de9,$ef74337c
+ dc.l $0000ff7c,$44fc0000,$48e9ffff,$ff804c2e
+ dc.l $34021000,$42e9ff7e,$48e9ffff,$ffc0237c
+ dc.l $00000004,$ff8c2c49,$61ff0000,$1e684a00
+ dc.l $66ff0000,$1e8e52ae,$ff784cfb,$3fff0170
+ dc.l $00001e8a,$42827602,$204f4fee,$ef743d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c2f
+ dc.l $34021000,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c2e48,$61ff0000,$1e184a00
+ dc.l $66ff0000,$1e3e52ae,$ff784cfb,$3fff0170
+ dc.l $00001e3a,$42827602,$41ee0f74,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c283402
+ dc.l $f00042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$1dcc4a00,$66ff0000
+ dc.l $1df252ae,$ff786004,$00000002,$4cfb3fff
+ dc.l $01700000,$1de84282,$76023d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c3a,$3402ffda
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$00001d7e,$4a0066ff,$00001da4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$1da04282
+ dc.l $760247ee,$ff7478f0,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$ffffff80,$4c333402,$401042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$1d304a00,$66ff0000,$1d5652ae
+ dc.l $ff784cfb,$3fff0170,$00001d52,$42827602
+ dc.l $47eeff74,$78f83d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c33,$34024210,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $00001ce2,$4a0066ff,$00001d08,$52aeff78
+ dc.l $4cfb3fff,$01700000,$1d044282,$760247ee
+ dc.l $ff7478fc,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c333402,$441042ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $1c944a00,$66ff0000,$1cba52ae,$ff784cfb
+ dc.l $3fff0170,$00001cb6,$42827602,$47eeff74
+ dc.l $78fe3d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c33,$34024610,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00001c46
+ dc.l $4a0066ff,$00001c6c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$1c684282,$760247ee,$ff7478f0
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c333402,$481042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$1bf84a00
+ dc.l $66ff0000,$1c1e52ae,$ff784cfb,$3fff0170
+ dc.l $00001c1a,$42827602,$47eeff74,$78f83d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c33
+ dc.l $34024a10,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$00001baa,$4a0066ff
+ dc.l $00001bd0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $1bcc4282,$760247ee,$ff7478fc,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c333402
+ dc.l $4c1042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$1b5c4a00,$66ff0000
+ dc.l $1b8252ae,$ff784cfb,$3fff0170,$00001b7e
+ dc.l $42827602,$47eeff74,$78fe3d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c33,$34024e10
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$00001b0e,$4a0066ff,$00001b34
+ dc.l $52aeff78,$4cfb3fff,$01700000,$1b304282
+ dc.l $760247ee,$ff74287c,$00000002,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c333402
+ dc.l $cef042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$1abc4a00,$66ff0000
+ dc.l $1ae252ae,$ff784cfb,$3fff0170,$00001ade
+ dc.l $42827602,$47eeff74,$287c0000,$00023d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c33
+ dc.l $34020750,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$00001a6a,$4a0066ff
+ dc.l $00001a90,$52aeff78,$4cfb3fff,$01700000
+ dc.l $1a8c4282,$760247ee,$ff74284b,$d9fc0000
+ dc.l $00103d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c30,$3402c9a0,$fff042ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $1a144a00,$66ff0000,$1a3a52ae,$ff784cfb
+ dc.l $3fff0170,$00001a36,$42827602,$47eeff74
+ dc.l $287c0000,$00023d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c33,$3402cef0,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $000019c2,$4a0066ff,$000019e8,$52aeff78
+ dc.l $60040000,$00024cfb,$3fff0170,$000019de
+ dc.l $42827602,$47eeff74,$78f03d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c3b,$340240e4
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000196e,$4a0066ff,$00001994
+ dc.l $52aeff78,$60040000,$00024cfb,$3fff0170
+ dc.l $0000198a,$42827602,$41eeff74,$78f83d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff804c3b
+ dc.l $340242e4,$42eeff7e,$48ee7fff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000191a,$4a0066ff
+ dc.l $00001940,$52aeff78,$60040000,$00024cfb
+ dc.l $3fff0170,$00001936,$42827602,$41eeff74
+ dc.l $78fc3d7c,$0000ff7c,$44fc0000,$48ee7fff
+ dc.l $ff804c3b,$340244e4,$42eeff7e,$48ee7fff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$000018c6
+ dc.l $4a0066ff,$000018ec,$52aeff78,$60040000
+ dc.l $00024cfb,$3fff0170,$000018e2,$42827602
+ dc.l $41eeff74,$78fe3d7c,$0000ff7c,$44fc0000
+ dc.l $48ee7fff,$ff804c3b,$340246e4,$42eeff7e
+ dc.l $48ee7fff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $00001872,$4a0066ff,$00001898,$52aeff78
+ dc.l $60040000,$00024cfb,$3fff0170,$0000188e
+ dc.l $42827602,$41eeff74,$78f03d7c,$0000ff7c
+ dc.l $44fc0000,$48ee7fff,$ff804c3b,$340248e4
+ dc.l $42eeff7e,$48ee7fff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000181e,$4a0066ff,$00001844
+ dc.l $52aeff78,$60040000,$00024cfb,$3fff0170
+ dc.l $0000183a,$42827602,$41eeff74,$78f83d7c
+ dc.l $0000ff7c,$44fc0000,$48ee7fff,$ff804c3b
+ dc.l $34024ae4,$42eeff7e,$48ee7fff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$000017ca,$4a0066ff
+ dc.l $000017f0,$52aeff78,$60040000,$00024cfb
+ dc.l $3fff0170,$000017e6,$42827602,$41eeff74
+ dc.l $78fc3d7c,$0000ff7c,$44fc0000,$48ee7fff
+ dc.l $ff804c3b,$34024ce4,$42eeff7e,$48ee7fff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00001776
+ dc.l $4a0066ff,$0000179c,$52aeff78,$60040000
+ dc.l $00024cfb,$3fff0170,$00001792,$42827602
+ dc.l $41eeff74,$78fe3d7c,$0000ff7c,$44fc0000
+ dc.l $48ee7fff,$ff804c3b,$34024ee4,$42eeff7e
+ dc.l $48ee7fff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $00001722,$4a0066ff,$00001748,$52aeff78
+ dc.l $60040000,$00024cfb,$3fff0170,$0000173e
+ dc.l $42827602,$47eeff74,$287cffff,$fffe3d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+ dc.l $3402cee0,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$000016ca,$4a0066ff
+ dc.l $000016f0,$52aeff78,$60040000,$00024cfb
+ dc.l $3fff0170,$000016e6,$42827602,$47eeff74
+ dc.l $287c0000,$00023d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$34020760,$ffd042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$16704a00,$66ff0000,$169652ae
+ dc.l $ff7852ae,$ff784cfb,$3fff0170,$0000168e
+ dc.l $42827602,$47f9ffff,$ff74287c,$00000002
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c3b3402,$cf300000,$000a42ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$60040000
+ dc.l $000261ff,$0000160e,$4a0066ff,$00001634
+ dc.l $52aeff78,$60040000,$00024cfb,$3fff0170
+ dc.l $0000162a,$42827602,$43eeff74,$78f03d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+ dc.l $340240e4,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$000015ba,$4a0066ff
+ dc.l $000015e0,$52aeff78,$60040000,$00024cfb
+ dc.l $3fff0170,$000015d6,$42827602,$41eeff74
+ dc.l $78f83d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c3b,$340242e4,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00001566
+ dc.l $4a0066ff,$0000158c,$52aeff78,$60040000
+ dc.l $00024cfb,$3fff0170,$00001582,$42827602
+ dc.l $41eeff74,$78fc3d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$340244e4,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $00001512,$4a0066ff,$00001538,$52aeff78
+ dc.l $60040000,$00024cfb,$3fff0170,$0000152e
+ dc.l $42827602,$41eeff74,$78fe3d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c3b,$340246e4
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$000014be,$4a0066ff,$000014e4
+ dc.l $52aeff78,$60040000,$00024cfb,$3fff0170
+ dc.l $000014da,$42827602,$41eeff74,$78f03d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+ dc.l $340248e4,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000146a,$4a0066ff
+ dc.l $00001490,$52aeff78,$60040000,$00024cfb
+ dc.l $3fff0170,$00001486,$42827602,$41eeff74
+ dc.l $78f83d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c3b,$34024ae4,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00001416
+ dc.l $4a0066ff,$0000143c,$52aeff78,$60040000
+ dc.l $00024cfb,$3fff0170,$00001432,$42827602
+ dc.l $41eeff74,$78fc3d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$34024ce4,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $000013c2,$4a0066ff,$000013e8,$52aeff78
+ dc.l $60040000,$00024cfb,$3fff0170,$000013de
+ dc.l $42827602,$41eeff74,$78fe3d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c3b,$34024ee4
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000136e,$4a0066ff,$00001394
+ dc.l $52aeff78,$4cfb3fff,$01700000,$13904282
+ dc.l $760241ee,$ff7478fe,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$ffffff80,$4c3b3402,$4e2642ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $60040000,$000261ff,$0000131a,$4a0066ff
+ dc.l $00001340,$52aeff78,$4cfb3fff,$01700000
+ dc.l $133c4282,$760247ee,$ef7449ee,$ff70288b
+ dc.l $78f03d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c34,$34024122,$00101000,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $000012c2,$4a0066ff,$000012e8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$12e44282,$760247ee
+ dc.l $ef7449ee,$ff70288b,$78f83d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c34,$34024322
+ dc.l $00101000,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000126a,$4a0066ff
+ dc.l $00001290,$52aeff78,$4cfb3fff,$01700000
+ dc.l $128c4282,$760247ee,$ef7449ee,$ff70288b
+ dc.l $78fc3d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c34,$34024522,$00101000,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $00001212,$4a0066ff,$00001238,$52aeff78
+ dc.l $4cfb3fff,$01700000,$12344282,$760247ee
+ dc.l $ef7449ee,$ff70288b,$78fe3d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c34,$34024722
+ dc.l $00101000,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$000011ba,$4a0066ff
+ dc.l $000011e0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $11dc4282,$760247ee,$ef7449ee,$ff70288b
+ dc.l $78f03d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c34,$34024922,$00101000,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $00001162,$4a0066ff,$00001188,$52aeff78
+ dc.l $4cfb3fff,$01700000,$11844282,$760247ee
+ dc.l $ef7449ee,$ff70288b,$78f83d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c34,$34024b22
+ dc.l $00101000,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000110a,$4a0066ff
+ dc.l $00001130,$52aeff78,$4cfb3fff,$01700000
+ dc.l $112c4282,$760247ee,$ef7449ee,$ff70288b
+ dc.l $78fc3d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c34,$34024d22,$00101000,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $000010b2,$4a0066ff,$000010d8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$10d44282,$760247ee
+ dc.l $ef7449ee,$ff70288b,$78fe3d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c34,$34024f22
+ dc.l $00101000,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000105a,$4a0066ff
+ dc.l $00001080,$52aeff78,$4cfb3fff,$01700000
+ dc.l $107c4282,$760247ee,$ef7449ee,$ff70288b
+ dc.l $78fe3d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c34,$34024f33,$00000010,$00001000
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$00000ffe,$4a0066ff,$00001024
+ dc.l $52aeff78,$4cfb3fff,$01700000,$10204282
+ dc.l $760247ee,$ef7449ee,$ff70288b,$78fe3d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c34
+ dc.l $34020753,$00001000,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00000fa6
+ dc.l $4a0066ff,$00000fcc,$52aeff78,$4cfb3fff
+ dc.l $01700000,$0fc84282,$760247ee,$ef7449ee
+ dc.l $ff70288b,$78fe3d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c34,$34020753,$00001000
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$00000f4e,$4a0066ff,$00000f74
+ dc.l $52aeff78,$4cfb3fff,$01700000,$0f704282
+ dc.l $760247ee,$ef7449ee,$ff70288b,$78f0d88c
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c303402,$49b30000,$00100000,$100042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$0ef04a00,$66ff0000,$0f1652ae
+ dc.l $ff7852ae,$ff784cfb,$3fff0170,$00000f0e
+ dc.l $224e4282,$760247e9,$0f7449e9,$ff70288b
+ dc.l $2c7cffff,$fffe337c,$0000ff7c,$44fc0000
+ dc.l $48e9ffff,$ff804c34,$3402ef22,$0010f000
+ dc.l $42e9ff7e,$48e9ffff,$ffc0237c,$00000004
+ dc.l $ff8c2c49,$61ff0000,$0e8c4a00,$66ff0000
+ dc.l $0eb252ae,$ff784cfb,$3fff0170,$00000eae
+ dc.l $224e4282,$760247e9,$0f7449e9,$ff70288b
+ dc.l $2c7c0000,$0002337c,$0000ff7c,$44fc0000
+ dc.l $48e9ffff,$ff804c34,$3402ef22,$fff0f000
+ dc.l $42e9ff7e,$48e9ffff,$ffc0237c,$00000004
+ dc.l $ff8c2c49,$61ff0000,$0e2c4a00,$66ff0000
+ dc.l $0e5252ae,$ff784cfb,$3fff0170,$00000e4e
+ dc.l $42827602,$47eeff54,$49eeff70,$288b99fc
+ dc.l $00000010,$78103d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c34,$34024126,$00100010
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$00000dce,$4a0066ff,$00000df4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$0df04282
+ dc.l $760247ee,$ff5449ee,$ff70288b,$99fc0000
+ dc.l $00107808,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c343402,$43260010,$001042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$0d704a00,$66ff0000,$0d9652ae
+ dc.l $ff784cfb,$3fff0170,$00000d92,$42827602
+ dc.l $47eeff54,$49eeff70,$288b99fc,$00000010
+ dc.l $78043d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c34,$34024526,$00100010,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $00000d12,$4a0066ff,$00000d38,$52aeff78
+ dc.l $4cfb3fff,$01700000,$0d344282,$760247ee
+ dc.l $ff5449ee,$ff70288b,$99fc0000,$00107802
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c343402,$47260010,$001042ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $0cb44a00,$66ff0000,$0cda52ae,$ff784cfb
+ dc.l $3fff0170,$00000cd6,$42827602,$47eeff54
+ dc.l $49eeff70,$288b99fc,$00000010,$78103d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c34
+ dc.l $34024926,$00100010,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00000c56
+ dc.l $4a0066ff,$00000c7c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$0c784282,$760247ee,$ff5449ee
+ dc.l $ff70288b,$99fc0000,$00107808,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c343402
+ dc.l $43260010,$001042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$0bf84a00
+ dc.l $66ff0000,$0c1e52ae,$ff784cfb,$3fff0170
+ dc.l $00000c1a,$42827602,$47eeff54,$49eeff70
+ dc.l $288b99fc,$00000010,$78043d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c34,$34024d26
+ dc.l $00100010,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$00000b9a,$4a0066ff
+ dc.l $00000bc0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $0bbc4282,$760247ee,$ff5449ee,$ff70288b
+ dc.l $99fc0000,$00107802,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$ffffff80,$4c343402,$4f260010
+ dc.l $001042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$0b3c4a00,$66ff0000
+ dc.l $0b6252ae,$ff784cfb,$3fff0170,$00000b5e
+ dc.l $42827602,$47eeff54,$49eeff70,$288b99fc
+ dc.l $00000010,$78023d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c34,$34024f37,$00000010
+ dc.l $00000010,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$00000ada,$4a0066ff
+ dc.l $00000b00,$52aeff78,$4cfb3fff,$01700000
+ dc.l $0afc4282,$760247ee,$ff5449ee,$ff70288b
+ dc.l $78023d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c34,$34020753,$00000020,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $00000a82,$4a0066ff,$00000aa8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$0aa4204f,$42827602
+ dc.l $47eeff54,$4feeff70,$2e8b7820,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c373402
+ dc.l $491542ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$2e4861ff,$00000a2a,$4a0066ff
+ dc.l $00000a50,$52aeff78,$52aeff78,$4cfb3fff
+ dc.l $01700000,$0a48224e,$42827602,$47e9ff74
+ dc.l $4de9ff70,$2c8bddfc,$00000010,$2a7cffff
+ dc.l $fffe337c,$0000ff7c,$44fc0000,$48e9ffff
+ dc.l $ff804c36,$3402df27,$fff00000,$001042e9
+ dc.l $ff7e48e9,$ffffffc0,$237c0000,$0004ff8c
+ dc.l $2c4961ff,$000009be,$4a0066ff,$000009e4
+ dc.l $222eff78,$42804e75,$52aeff78,$4cfb3fff
+ dc.l $01700000,$09d84282,$760247fa,$ef7449fa
+ dc.l $ff70288b,$78f03d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$34024122,$ff801000
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000095e,$4a0066ff,$00000984
+ dc.l $52aeff78,$4cfb3fff,$01700000,$09804282
+ dc.l $760247fa,$ef7449fa,$ff70288b,$78f83d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+ dc.l $34024322,$ff801000,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00000906
+ dc.l $4a0066ff,$0000092c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$09284282,$760247fa,$ef7449fa
+ dc.l $ff70288b,$78fc3d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$34024522,$ff801000
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$000008ae,$4a0066ff,$000008d4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$08d04282
+ dc.l $760247fa,$ef7449fa,$ff70288b,$78fe3d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+ dc.l $34024722,$ff801000,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00000856
+ dc.l $4a0066ff,$0000087c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$08784282,$760247fa,$ef7449fa
+ dc.l $ff70288b,$78f03d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$34024922,$ff801000
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$000007fe,$4a0066ff,$00000824
+ dc.l $52aeff78,$4cfb3fff,$01700000,$08204282
+ dc.l $760247fa,$ef7449fa,$ff70288b,$78f83d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+ dc.l $34024b22,$ff801000,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$000007a6
+ dc.l $4a0066ff,$000007cc,$52aeff78,$4cfb3fff
+ dc.l $01700000,$07c84282,$760247fa,$ef7449fa
+ dc.l $ff70288b,$78fc3d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$34024d22,$ff801000
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000074e,$4a0066ff,$00000774
+ dc.l $52aeff78,$4cfb3fff,$01700000,$07704282
+ dc.l $760247fa,$ef7449fa,$ff70288b,$78fe3d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+ dc.l $34024f22,$ff801000,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$000006f6
+ dc.l $4a0066ff,$0000071c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$07184282,$760247fa,$ef7449fa
+ dc.l $ff70288b,$78fe3d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$34024f33,$ffffff80
+ dc.l $00001000,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000069a,$4a0066ff
+ dc.l $000006c0,$52aeff78,$4cfb3fff,$01700000
+ dc.l $06bc4282,$760247fa,$ef7449fa,$ff70288b
+ dc.l $78fe3d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c3b,$34020773,$ffffff70,$00001000
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000063e,$4a0066ff,$00000664
+ dc.l $52aeff78,$4cfb3fff,$01700000,$06604282
+ dc.l $760247fa,$ef7449fa,$ff70288b,$280c3d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c30
+ dc.l $34024993,$00001000,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$000005e6
+ dc.l $4a0066ff,$0000060c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$06084282,$760247fa,$ef7449fa
+ dc.l $ff70288b,$78f0d88c,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$ffffff80,$4c303402,$49b30000
+ dc.l $00100000,$100042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$05884a00
+ dc.l $66ff0000,$05ae52ae,$ff784282,$760247fa
+ dc.l $ff7449fa,$ff70288b,$78f03d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c30,$340201f1
+ dc.l $ffffff70,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000053a,$4a0066ff
+ dc.l $00000560,$52aeff78,$4cfb3fff,$01700000
+ dc.l $055c4282,$760247fa,$0f7449fa,$ff70288b
+ dc.l $2c7c0000,$00023d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$3402ef22,$ff60f000
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$000004de,$4a0066ff,$00000504
+ dc.l $52aeff78,$4cfb3fff,$01700000,$0500204f
+ dc.l $42827602,$47fa0f74,$49faff70,$288b2e7c
+ dc.l $00000002,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c3b3402,$ff22ff60,$f00042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $2e4861ff,$0000047e,$4a0066ff,$000004a4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$04a04282
+ dc.l $760247fa,$ff5449fa,$ff70288b,$99fc0000
+ dc.l $00107810,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c3b3402,$4126ff70,$001042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$04204a00,$66ff0000,$044652ae
+ dc.l $ff784cfb,$3fff0170,$00000442,$42827602
+ dc.l $47faff54,$49faff70,$288b99fc,$00000010
+ dc.l $78083d7c,$0000ff7c,$44fc0000,$48eeffff
+ dc.l $ff804c3b,$34024326,$ff700010,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c61ff
+ dc.l $000003c2,$4a0066ff,$000003e8,$52aeff78
+ dc.l $4cfb3fff,$01700000,$03e44282,$760247fa
+ dc.l $ff5449fa,$ff70288b,$99fc0000,$00107804
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c3b3402,$4526ff70,$001042ee,$ff7e48ee
+ dc.l $ffffffc0,$2d7c0000,$0004ff8c,$61ff0000
+ dc.l $03644a00,$66ff0000,$038a52ae,$ff784cfb
+ dc.l $3fff0170,$00000386,$42827602,$47faff54
+ dc.l $49faff70,$288b99fc,$00000010,$78023d7c
+ dc.l $0000ff7c,$44fc0000,$48eeffff,$ff804c3b
+ dc.l $34024726,$ff700010,$42eeff7e,$48eeffff
+ dc.l $ffc02d7c,$00000004,$ff8c61ff,$00000306
+ dc.l $4a0066ff,$0000032c,$52aeff78,$4cfb3fff
+ dc.l $01700000,$03284282,$760247fa,$ff5449fa
+ dc.l $ff70288b,$99fc0000,$00107810,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c3b3402
+ dc.l $4926ff70,$001042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$02a84a00
+ dc.l $66ff0000,$02ce52ae,$ff784cfb,$3fff0170
+ dc.l $000002ca,$42827602,$47faff54,$49faff70
+ dc.l $288b99fc,$00000010,$78083d7c,$0000ff7c
+ dc.l $44fc0000,$48eeffff,$ff804c3b,$34024326
+ dc.l $ff700010,$42eeff7e,$48eeffff,$ffc02d7c
+ dc.l $00000004,$ff8c61ff,$0000024a,$4a0066ff
+ dc.l $00000270,$52aeff78,$4cfb3fff,$01700000
+ dc.l $026c4282,$760247fa,$ff5449fa,$ff70288b
+ dc.l $99fc0000,$00107804,$3d7c0000,$ff7c44fc
+ dc.l $000048ee,$ffffff80,$4c3b3402,$4d26ff70
+ dc.l $001042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$01ec4a00,$66ff0000
+ dc.l $021252ae,$ff784cfb,$3fff0170,$0000020e
+ dc.l $42827602,$47faff54,$49faff70,$288b99fc
+ dc.l $00000010,$78023d7c,$0000ff7c,$44fc0000
+ dc.l $48eeffff,$ff804c3b,$34024f26,$ff700010
+ dc.l $42eeff7e,$48eeffff,$ffc02d7c,$00000004
+ dc.l $ff8c61ff,$0000018e,$4a0066ff,$000001b4
+ dc.l $52aeff78,$4cfb3fff,$01700000,$01b04282
+ dc.l $760247fa,$ff5449fa,$ff70288b,$99fc0000
+ dc.l $00107802,$3d7c0000,$ff7c44fc,$000048ee
+ dc.l $ffffff80,$4c3b3402,$4f37ffff,$ff700000
+ dc.l $001042ee,$ff7e48ee,$ffffffc0,$2d7c0000
+ dc.l $0004ff8c,$61ff0000,$012c4a00,$66ff0000
+ dc.l $015252ae,$ff784cfb,$3fff0170,$0000014e
+ dc.l $42827602,$47faff54,$49faff70,$288b7802
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c3b3402,$0773ffff,$ff700000,$002042ee
+ dc.l $ff7e48ee,$ffffffc0,$2d7c0000,$0004ff8c
+ dc.l $61ff0000,$00d04a00,$66ff0000,$00f652ae
+ dc.l $ff784cfb,$3fff0170,$000000f2,$42827602
+ dc.l $47faff54,$49faff70,$288b7804,$3d7c0000
+ dc.l $ff7c44fc,$000048ee,$ffffff80,$4c303402
+ dc.l $4fb5ffff,$ff7042ee,$ff7e48ee,$ffffffc0
+ dc.l $2d7c0000,$0004ff8c,$61ff0000,$00784a00
+ dc.l $66ff0000,$009e52ae,$ff784cfb,$3fff0170
+ dc.l $0000009a,$204f4282,$760247fa,$ff744dfa
+ dc.l $ff702c8b,$ddfc0000,$00102e7c,$fffffffe
+ dc.l $3d7c0000,$ff7c44fc,$000048ee,$ffffff80
+ dc.l $4c3b3402,$ff27ff70,$00000010,$42eeff7e
+ dc.l $48eeffff,$ffc02d7c,$00000004,$ff8c2e48
+ dc.l $61ff0000,$00104a00,$66ff0000,$00364280
+ dc.l $4e7541ee,$ff8043ee,$ffc0700e,$b18966ff
+ dc.l $0000001c,$51c8fff6,$302eff7c,$322eff7e
+ dc.l $b04166ff,$00000008,$42804e75,$70014e75
+ dc.l $222eff78,$70014e75,$acacacac,$acacacac
+ dc.l $acacacac,$acacacac,$acacacac,$acacacac
+ dc.l $acacacac,$acacacac,$acacacac,$acacacac
+ dc.l $acacacac,$acacacac,$acacacac,$acacacac
+ dc.l $acacacac,$acacacac,$2f00203a,$afa4487b
+ dc.l $0930ffff,$afa0202f,$00044e74,$00042f00
+ dc.l $203aaf92,$487b0930,$ffffaf8a,$202f0004
+ dc.l $4e740004,$00000000,$00000000,$00000000
diff --git a/arch/m68k/ifpsp060/os.S b/arch/m68k/ifpsp060/os.S
new file mode 100644
index 00000000000..aa4df87a6c4
--- /dev/null
+++ b/arch/m68k/ifpsp060/os.S
@@ -0,0 +1,396 @@
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+|MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+|M68000 Hi-Performance Microprocessor Division
+|M68060 Software Package
+|Production Release P1.00 -- October 10, 1994
+|
+|M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+|
+|THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+|To the maximum extent permitted by applicable law,
+|MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+|INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+|and any warranty against infringement with regard to the SOFTWARE
+|(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+|
+|To the maximum extent permitted by applicable law,
+|IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+|(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+|BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+|ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+|Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+|
+|You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+|so long as this entire notice is retained without alteration in any modified and/or
+|redistributed versions, and that such modified versions are clearly identified as such.
+|No licenses are granted by implication, estoppel or otherwise under any patents
+|or trademarks of Motorola, Inc.
+|~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+| os.s
+|
+| This file contains:
+| - example "Call-Out"s required by both the ISP and FPSP.
+|
+
+#include <linux/linkage.h>
+
+|################################
+| EXAMPLE CALL-OUTS #
+| #
+| _060_dmem_write() #
+| _060_dmem_read() #
+| _060_imem_read() #
+| _060_dmem_read_byte() #
+| _060_dmem_read_word() #
+| _060_dmem_read_long() #
+| _060_imem_read_word() #
+| _060_imem_read_long() #
+| _060_dmem_write_byte() #
+| _060_dmem_write_word() #
+| _060_dmem_write_long() #
+| #
+| _060_real_trace() #
+| _060_real_access() #
+|################################
+
+|
+| Each IO routine checks to see if the memory write/read is to/from user
+| or supervisor application space. The examples below use simple "move"
+| instructions for supervisor mode applications and call _copyin()/_copyout()
+| for user mode applications.
+| When installing the 060SP, the _copyin()/_copyout() equivalents for a
+| given operating system should be substituted.
+|
+| The addresses within the 060SP are guaranteed to be on the stack.
+| The result is that Unix processes are allowed to sleep as a consequence
+| of a page fault during a _copyout.
+|
+| Linux/68k: The _060_[id]mem_{read,write}_{byte,word,long} functions
+| (i.e. all the known length <= 4) are implemented by single moves
+| statements instead of (more expensive) copy{in,out} calls, if
+| working in user space
+
+|
+| _060_dmem_write():
+|
+| Writes to data memory while in supervisor mode.
+|
+| INPUTS:
+| a0 - supervisor source address
+| a1 - user destination address
+| d0 - number of bytes to write
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d1 - 0 = success, !0 = failure
+|
+ .global _060_dmem_write
+_060_dmem_write:
+ subq.l #1,%d0
+ btst #0x5,0x4(%a6) | check for supervisor state
+ beqs user_write
+super_write:
+ move.b (%a0)+,(%a1)+ | copy 1 byte
+ dbra %d0,super_write | quit if --ctr < 0
+ clr.l %d1 | return success
+ rts
+user_write:
+ move.b (%a0)+,%d1 | copy 1 byte
+copyoutae:
+ movs.b %d1,(%a1)+
+ dbra %d0,user_write | quit if --ctr < 0
+ clr.l %d1 | return success
+ rts
+
+|
+| _060_imem_read(), _060_dmem_read():
+|
+| Reads from data/instruction memory while in supervisor mode.
+|
+| INPUTS:
+| a0 - user source address
+| a1 - supervisor destination address
+| d0 - number of bytes to read
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d1 - 0 = success, !0 = failure
+|
+ .global _060_imem_read
+ .global _060_dmem_read
+_060_imem_read:
+_060_dmem_read:
+ subq.l #1,%d0
+ btst #0x5,0x4(%a6) | check for supervisor state
+ beqs user_read
+super_read:
+ move.b (%a0)+,(%a1)+ | copy 1 byte
+ dbra %d0,super_read | quit if --ctr < 0
+ clr.l %d1 | return success
+ rts
+user_read:
+copyinae:
+ movs.b (%a0)+,%d1
+ move.b %d1,(%a1)+ | copy 1 byte
+ dbra %d0,user_read | quit if --ctr < 0
+ clr.l %d1 | return success
+ rts
+
+|
+| _060_dmem_read_byte():
+|
+| Read a data byte from user memory.
+|
+| INPUTS:
+| a0 - user source address
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d0 - data byte in d0
+| d1 - 0 = success, !0 = failure
+|
+ .global _060_dmem_read_byte
+_060_dmem_read_byte:
+ clr.l %d0 | clear whole longword
+ clr.l %d1 | assume success
+ btst #0x5,0x4(%a6) | check for supervisor state
+ bnes dmrbs | supervisor
+dmrbuae:movs.b (%a0),%d0 | fetch user byte
+ rts
+dmrbs: move.b (%a0),%d0 | fetch super byte
+ rts
+
+|
+| _060_dmem_read_word():
+|
+| Read a data word from user memory.
+|
+| INPUTS:
+| a0 - user source address
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d0 - data word in d0
+| d1 - 0 = success, !0 = failure
+|
+| _060_imem_read_word():
+|
+| Read an instruction word from user memory.
+|
+| INPUTS:
+| a0 - user source address
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d0 - instruction word in d0
+| d1 - 0 = success, !0 = failure
+|
+ .global _060_dmem_read_word
+ .global _060_imem_read_word
+_060_dmem_read_word:
+_060_imem_read_word:
+ clr.l %d1 | assume success
+ clr.l %d0 | clear whole longword
+ btst #0x5,0x4(%a6) | check for supervisor state
+ bnes dmrws | supervisor
+dmrwuae:movs.w (%a0), %d0 | fetch user word
+ rts
+dmrws: move.w (%a0), %d0 | fetch super word
+ rts
+
+|
+| _060_dmem_read_long():
+|
+
+|
+| INPUTS:
+| a0 - user source address
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d0 - data longword in d0
+| d1 - 0 = success, !0 = failure
+|
+| _060_imem_read_long():
+|
+| Read an instruction longword from user memory.
+|
+| INPUTS:
+| a0 - user source address
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d0 - instruction longword in d0
+| d1 - 0 = success, !0 = failure
+|
+ .global _060_dmem_read_long
+ .global _060_imem_read_long
+_060_dmem_read_long:
+_060_imem_read_long:
+ clr.l %d1 | assume success
+ btst #0x5,0x4(%a6) | check for supervisor state
+ bnes dmrls | supervisor
+dmrluae:movs.l (%a0),%d0 | fetch user longword
+ rts
+dmrls: move.l (%a0),%d0 | fetch super longword
+ rts
+
+|
+| _060_dmem_write_byte():
+|
+| Write a data byte to user memory.
+|
+| INPUTS:
+| a0 - user destination address
+| d0 - data byte in d0
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d1 - 0 = success, !0 = failure
+|
+ .global _060_dmem_write_byte
+_060_dmem_write_byte:
+ clr.l %d1 | assume success
+ btst #0x5,0x4(%a6) | check for supervisor state
+ bnes dmwbs | supervisor
+dmwbuae:movs.b %d0,(%a0) | store user byte
+ rts
+dmwbs: move.b %d0,(%a0) | store super byte
+ rts
+
+|
+| _060_dmem_write_word():
+|
+| Write a data word to user memory.
+|
+| INPUTS:
+| a0 - user destination address
+| d0 - data word in d0
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d1 - 0 = success, !0 = failure
+|
+ .global _060_dmem_write_word
+_060_dmem_write_word:
+ clr.l %d1 | assume success
+ btst #0x5,0x4(%a6) | check for supervisor state
+ bnes dmwws | supervisor
+dmwwu:
+dmwwuae:movs.w %d0,(%a0) | store user word
+ bras dmwwr
+dmwws: move.w %d0,(%a0) | store super word
+dmwwr: clr.l %d1 | return success
+ rts
+
+|
+| _060_dmem_write_long():
+|
+| Write a data longword to user memory.
+|
+| INPUTS:
+| a0 - user destination address
+| d0 - data longword in d0
+| 0x4(%a6),bit5 - 1 = supervisor mode, 0 = user mode
+| OUTPUTS:
+| d1 - 0 = success, !0 = failure
+|
+ .global _060_dmem_write_long
+_060_dmem_write_long:
+ clr.l %d1 | assume success
+ btst #0x5,0x4(%a6) | check for supervisor state
+ bnes dmwls | supervisor
+dmwluae:movs.l %d0,(%a0) | store user longword
+ rts
+dmwls: move.l %d0,(%a0) | store super longword
+ rts
+
+
+#if 0
+|###############################################
+
+|
+| Use these routines if your kernel doesn't have _copyout/_copyin equivalents.
+| Assumes that D0/D1/A0/A1 are scratch registers. The _copyin/_copyout
+| below assume that the SFC/DFC have been set previously.
+|
+| Linux/68k: These are basically non-inlined versions of
+| memcpy_{to,from}fs, but without long-transfer optimization
+| Note: Assumed that SFC/DFC are pointing correctly to user data
+| space... Should be right, or are there any exceptions?
+
+|
+| int _copyout(supervisor_addr, user_addr, nbytes)
+|
+ .global _copyout
+_copyout:
+ move.l 4(%sp),%a0 | source
+ move.l 8(%sp),%a1 | destination
+ move.l 12(%sp),%d0 | count
+ subq.l #1,%d0
+moreout:
+ move.b (%a0)+,%d1 | fetch supervisor byte
+copyoutae:
+ movs.b %d1,(%a1)+ | store user byte
+ dbra %d0,moreout | are we through yet?
+ moveq #0,%d0 | return success
+ rts
+
+|
+| int _copyin(user_addr, supervisor_addr, nbytes)
+|
+ .global _copyin
+_copyin:
+ move.l 4(%sp),%a0 | source
+ move.l 8(%sp),%a1 | destination
+ move.l 12(%sp),%d0 | count
+ subq.l #1,%d0
+morein:
+copyinae:
+ movs.b (%a0)+,%d1 | fetch user byte
+ move.b %d1,(%a1)+ | write supervisor byte
+ dbra %d0,morein | are we through yet?
+ moveq #0,%d0 | return success
+ rts
+#endif
+
+|###########################################################################
+
+|
+| _060_real_trace():
+|
+| This is the exit point for the 060FPSP when an instruction is being traced
+| and there are no other higher priority exceptions pending for this instruction
+| or they have already been processed.
+|
+| The sample code below simply executes an "rte".
+|
+ .global _060_real_trace
+_060_real_trace:
+ bral trap
+
+|
+| _060_real_access():
+|
+| This is the exit point for the 060FPSP when an access error exception
+| is encountered. The routine below should point to the operating system
+| handler for access error exceptions. The exception stack frame is an
+| 8-word access error frame.
+|
+| The sample routine below simply executes an "rte" instruction which
+| is most likely the incorrect thing to do and could put the system
+| into an infinite loop.
+|
+ .global _060_real_access
+_060_real_access:
+ bral buserr
+
+
+
+| Execption handling for movs access to illegal memory
+ .section .fixup,#alloc,#execinstr
+ .even
+1: moveq #-1,%d1
+ rts
+.section __ex_table,#alloc
+ .align 4
+ .long dmrbuae,1b
+ .long dmrwuae,1b
+ .long dmrluae,1b
+ .long dmwbuae,1b
+ .long dmwwuae,1b
+ .long dmwluae,1b
+ .long copyoutae,1b
+ .long copyinae,1b
+ .text
diff --git a/arch/m68k/ifpsp060/pfpsp.sa b/arch/m68k/ifpsp060/pfpsp.sa
new file mode 100644
index 00000000000..d276b27f1f6
--- /dev/null
+++ b/arch/m68k/ifpsp060/pfpsp.sa
@@ -0,0 +1,1730 @@
+ dc.l $60ff0000,$17400000,$60ff0000,$15f40000
+ dc.l $60ff0000,$02b60000,$60ff0000,$04700000
+ dc.l $60ff0000,$1b100000,$60ff0000,$19aa0000
+ dc.l $60ff0000,$1b5a0000,$60ff0000,$062e0000
+ dc.l $60ff0000,$102c0000,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $51fc51fc,$51fc51fc,$51fc51fc,$51fc51fc
+ dc.l $2f00203a,$ff2c487b,$0930ffff,$fef8202f
+ dc.l $00044e74,$00042f00,$203afef2,$487b0930
+ dc.l $fffffee2,$202f0004,$4e740004,$2f00203a
+ dc.l $fee0487b,$0930ffff,$fecc202f,$00044e74
+ dc.l $00042f00,$203afed2,$487b0930,$fffffeb6
+ dc.l $202f0004,$4e740004,$2f00203a,$fea4487b
+ dc.l $0930ffff,$fea0202f,$00044e74,$00042f00
+ dc.l $203afe96,$487b0930,$fffffe8a,$202f0004
+ dc.l $4e740004,$2f00203a,$fe7c487b,$0930ffff
+ dc.l $fe74202f,$00044e74,$00042f00,$203afe76
+ dc.l $487b0930,$fffffe5e,$202f0004,$4e740004
+ dc.l $2f00203a,$fe68487b,$0930ffff,$fe48202f
+ dc.l $00044e74,$00042f00,$203afe56,$487b0930
+ dc.l $fffffe32,$202f0004,$4e740004,$2f00203a
+ dc.l $fe44487b,$0930ffff,$fe1c202f,$00044e74
+ dc.l $00042f00,$203afe32,$487b0930,$fffffe06
+ dc.l $202f0004,$4e740004,$2f00203a,$fe20487b
+ dc.l $0930ffff,$fdf0202f,$00044e74,$00042f00
+ dc.l $203afe1e,$487b0930,$fffffdda,$202f0004
+ dc.l $4e740004,$2f00203a,$fe0c487b,$0930ffff
+ dc.l $fdc4202f,$00044e74,$00042f00,$203afdfa
+ dc.l $487b0930,$fffffdae,$202f0004,$4e740004
+ dc.l $2f00203a,$fde8487b,$0930ffff,$fd98202f
+ dc.l $00044e74,$00042f00,$203afdd6,$487b0930
+ dc.l $fffffd82,$202f0004,$4e740004,$2f00203a
+ dc.l $fdc4487b,$0930ffff,$fd6c202f,$00044e74
+ dc.l $00042f00,$203afdb2,$487b0930,$fffffd56
+ dc.l $202f0004,$4e740004,$2f00203a,$fda0487b
+ dc.l $0930ffff,$fd40202f,$00044e74,$00042f00
+ dc.l $203afd8e,$487b0930,$fffffd2a,$202f0004
+ dc.l $4e740004,$2f00203a,$fd7c487b,$0930ffff
+ dc.l $fd14202f,$00044e74,$00042f00,$203afd6a
+ dc.l $487b0930,$fffffcfe,$202f0004,$4e740004
+ dc.l $40c62d38,$d3d64634,$3d6f90ae,$b1e75cc7
+ dc.l $40000000,$c90fdaa2,$2168c235,$00000000
+ dc.l $3fff0000,$c90fdaa2,$2168c235,$00000000
+ dc.l $3fe45f30,$6dc9c883,$4e56ff40,$f32eff6c
+ dc.l $48ee0303,$ff9cf22e,$bc00ff60,$f22ef0c0
+ dc.l $ffdc2d6e,$ff68ff44,$206eff44,$58aeff44
+ dc.l $61ffffff,$ff042d40,$ff40082e,$0005ff42
+ dc.l $66000116,$41eeff6c,$61ff0000,$051c41ee
+ dc.l $ff6c61ff,$00002aec,$1d40ff4e,$082e0005
+ dc.l $ff436726,$e9ee0183,$ff4261ff,$00005cac
+ dc.l $41eeff78,$61ff0000,$2aca0c00,$00066606
+ dc.l $61ff0000,$2a2e1d40,$ff4f4280,$102eff63
+ dc.l $122eff43,$0241007f,$02ae00ff,$01ffff64
+ dc.l $f23c9000,$00000000,$f23c8800,$00000000
+ dc.l $41eeff6c,$43eeff78,$223b1530,$00001974
+ dc.l $4ebb1930,$0000196c,$e9ee0183,$ff4261ff
+ dc.l $00005cd8,$082e0004,$ff626622,$082e0001
+ dc.l $ff626644,$f22ed0c0,$ffdcf22e,$9c00ff60
+ dc.l $4cee0303,$ff9c4e5e,$60ffffff,$fcc6f22e
+ dc.l $f040ff6c,$3d7ce005,$ff6ef22e,$d0c0ffdc
+ dc.l $f22e9c00,$ff604cee,$0303ff9c,$f36eff6c
+ dc.l $4e5e60ff,$fffffcb2,$f22ef040,$ff6c1d7c
+ dc.l $00c4000b,$3d7ce001,$ff6ef22e,$d0c0ffdc
+ dc.l $f22e9c00,$ff604cee,$0303ff9c,$f36eff6c
+ dc.l $4e5e60ff,$fffffcae,$1d7c0000,$ff4e4280
+ dc.l $102eff63,$02aeffff,$00ffff64,$f23c9000
+ dc.l $00000000,$f23c8800,$00000000,$41eeff6c
+ dc.l $61ff0000,$2e0c082e,$0004ff62,$6600ff70
+ dc.l $082e0001,$ff626600,$ff90f22e,$d0c0ffdc
+ dc.l $f22e9c00,$ff604cee,$0303ff9c,$4e5e0817
+ dc.l $000767ff,$fffffc0c,$f22fa400,$00083f7c
+ dc.l $20240006,$60ffffff,$fcec4e56,$ff40f32e
+ dc.l $ff6c48ee,$0303ff9c,$f22ebc00,$ff60f22e
+ dc.l $f0c0ffdc,$2d6eff68,$ff44206e,$ff4458ae
+ dc.l $ff4461ff,$fffffd42,$2d40ff40,$082e0005
+ dc.l $ff426600,$013241ee,$ff6c61ff,$0000035a
+ dc.l $41eeff6c,$61ff0000,$292a1d40,$ff4e082e
+ dc.l $0005ff43,$672e082e,$0004ff43,$6626e9ee
+ dc.l $0183ff42,$61ff0000,$5ae241ee,$ff7861ff
+ dc.l $00002900,$0c000006,$660661ff,$00002864
+ dc.l $1d40ff4f,$4280102e,$ff63122e,$ff430241
+ dc.l $007f02ae,$00ff01ff,$ff64f23c,$90000000
+ dc.l $0000f23c,$88000000,$000041ee,$ff6c43ee
+ dc.l $ff78223b,$15300000,$17aa4ebb,$19300000
+ dc.l $17a2e9ee,$0183ff42,$61ff0000,$5b0e082e
+ dc.l $0003ff62,$6622082e,$0001ff62,$664ef22e
+ dc.l $d0c0ffdc,$f22e9c00,$ff604cee,$0303ff9c
+ dc.l $4e5e60ff,$fffffafc,$082e0003,$ff666700
+ dc.l $ffd6f22e,$f040ff6c,$3d7ce003,$ff6ef22e
+ dc.l $d0c0ffdc,$f22e9c00,$ff604cee,$0303ff9c
+ dc.l $f36eff6c,$4e5e60ff,$fffffaf4,$082e0001
+ dc.l $ff666700,$ffaaf22e,$f040ff6c,$1d7c00c4
+ dc.l $000b3d7c,$e001ff6e,$f22ed0c0,$ffdcf22e
+ dc.l $9c00ff60,$4cee0303,$ff9cf36e,$ff6c4e5e
+ dc.l $60ffffff,$fad01d7c,$0000ff4e,$4280102e
+ dc.l $ff6302ae,$ffff00ff,$ff64f23c,$90000000
+ dc.l $0000f23c,$88000000,$000041ee,$ff6c61ff
+ dc.l $00002c2e,$082e0003,$ff626600,$ff66082e
+ dc.l $0001ff62,$6600ff90,$f22ed0c0,$ffdcf22e
+ dc.l $9c00ff60,$4cee0303,$ff9c4e5e,$08170007
+ dc.l $67ffffff,$fa2ef22f,$a4000008,$3f7c2024
+ dc.l $000660ff,$fffffb0e,$4e56ff40,$f32eff6c
+ dc.l $48ee0303,$ff9cf22e,$bc00ff60,$f22ef0c0
+ dc.l $ffdc082e,$00050004,$66084e68,$2d48ffd8
+ dc.l $600841ee,$00102d48,$ffd82d6e,$ff68ff44
+ dc.l $206eff44,$58aeff44,$61ffffff,$fb4c2d40
+ dc.l $ff40422e,$ff4a082e,$0005ff42,$66000208
+ dc.l $e9ee0006,$ff420c00,$00136700,$049e02ae
+ dc.l $00ff00ff,$ff64f23c,$90000000,$0000f23c
+ dc.l $88000000,$000041ee,$ff6c61ff,$0000013a
+ dc.l $41eeff6c,$61ff0000,$270a0c00,$00066606
+ dc.l $61ff0000,$266e1d40,$ff4ee9ee,$0183ff42
+ dc.l $082e0005,$ff436728,$0c2e003a,$ff436720
+ dc.l $61ff0000,$58b641ee,$ff7861ff,$000026d4
+ dc.l $0c000006,$660661ff,$00002638,$1d40ff4f
+ dc.l $4280102e,$ff63e9ee,$1047ff43,$41eeff6c
+ dc.l $43eeff78,$223b1d30,$00001598,$4ebb1930
+ dc.l $00001590,$102eff62,$6634102e,$ff430200
+ dc.l $00380c00,$0038670c,$e9ee0183,$ff4261ff
+ dc.l $000058e8,$f22ed0c0,$ffdcf22e,$9c00ff60
+ dc.l $4cee0303,$ff9c4e5e,$60ffffff,$f8e6c02e
+ dc.l $ff66edc0,$06086614,$082e0004,$ff6667ba
+ dc.l $082e0001,$ff6267b2,$60000066,$04800000
+ dc.l $00180c00,$00066614,$082e0003,$ff666600
+ dc.l $004a082e,$0004ff66,$66000046,$2f0061ff
+ dc.l $000007e0,$201f3d7b,$0222ff6e,$f22ed0c0
+ dc.l $ffdcf22e,$9c00ff60,$4cee0303,$ff9cf36e
+ dc.l $ff6c4e5e,$60ffffff,$f87ae000,$e006e004
+ dc.l $e005e003,$e002e001,$e001303c,$000460bc
+ dc.l $303c0003,$60b6e9ee,$0006ff42,$0c000011
+ dc.l $67080c00,$00156750,$4e753028,$00000240
+ dc.l $7fff0c40,$3f806708,$0c40407f,$672c4e75
+ dc.l $02a87fff,$ffff0004,$671861ff,$000024cc
+ dc.l $44400640,$3f810268,$80000000,$81680000
+ dc.l $4e750268,$80000000,$4e750228,$007f0004
+ dc.l $00687fff,$00004e75,$30280000,$02407fff
+ dc.l $0c403c00,$67080c40,$43ff67de,$4e7502a8
+ dc.l $7fffffff,$00046606,$4aa80008,$67c461ff
+ dc.l $00002478,$44400640,$3c010268,$80000000
+ dc.l $81680000,$4e75e9ee,$00c3ff42,$0c000003
+ dc.l $670004a2,$0c000007,$6700049a,$02aeffff
+ dc.l $00ffff64,$f23c9000,$00000000,$f23c8800
+ dc.l $00000000,$302eff6c,$02407fff,$671041ee
+ dc.l $ff6c61ff,$0000246c,$1d40ff4e,$60061d7c
+ dc.l $0004ff4e,$4280102e,$ff6341ee,$ff6c2d56
+ dc.l $ffd461ff,$0000292a,$102eff62,$66000086
+ dc.l $2caeffd4,$082e0005,$00046626,$206effd8
+ dc.l $4e60f22e,$d0c0ffdc,$f22e9c00,$ff604cee
+ dc.l $0303ff9c,$4e5e0817,$0007667a,$60ffffff
+ dc.l $f7220c2e,$0008ff4a,$66d8f22e,$f080ff6c
+ dc.l $f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+ dc.l $ff9c2c56,$2f6f00c4,$00b82f6f,$00c800bc
+ dc.l $2f6f002c,$00c42f6f,$003000c8,$2f6f0034
+ dc.l $00ccdffc,$000000b8,$08170007,$662860ff
+ dc.l $fffff6d0,$c02eff66,$edc00608,$662a082e
+ dc.l $0004ff66,$6700ff6a,$082e0001,$ff626700
+ dc.l $ff606000,$01663f7c,$20240006,$f22fa400
+ dc.l $000860ff,$fffff78e,$04800000,$0018303b
+ dc.l $020a4efb,$00064afc,$00080000,$0000003a
+ dc.l $00640094,$00000140,$0000f22e,$d0c0ffdc
+ dc.l $f22e9c00,$ff604cee,$0303ff9c,$3d7c30d8
+ dc.l $000a3d7c,$e006ff6e,$f36eff6c,$4e5e60ff
+ dc.l $fffff6d4,$f22ed0c0,$ffdcf22e,$9c00ff60
+ dc.l $4cee0303,$ff9c3d7c,$30d0000a,$3d7ce004
+ dc.l $ff6ef36e,$ff6c4e5e,$60ffffff,$f694f22e
+ dc.l $f040ff6c,$f22ed0c0,$ffdcf22e,$9c00ff60
+ dc.l $4cee0303,$ff9c3d7c,$30d4000a,$3d7ce005
+ dc.l $ff6ef36e,$ff6c4e5e,$60ffffff,$f60c2cae
+ dc.l $ffd4082e,$00050004,$66000038,$206effd8
+ dc.l $4e60f22e,$f040ff6c,$f22ed0c0,$ffdcf22e
+ dc.l $9c00ff60,$4cee0303,$ff9c3d7c,$30cc000a
+ dc.l $3d7ce003,$ff6ef36e,$ff6c4e5e,$60ffffff
+ dc.l $f5de0c2e,$0008ff4a,$66c8f22e,$f080ff6c
+ dc.l $f22ef040,$ff78f22e,$d0c0ffdc,$f22e9c00
+ dc.l $ff604cee,$0303ff9c,$3d7c30cc,$000a3d7c
+ dc.l $e003ff7a,$f36eff78,$2c562f6f,$00c400b8
+ dc.l $2f6f00c8,$00bc2f6f,$00cc00c0,$2f6f002c
+ dc.l $00c42f6f,$003000c8,$2f6f0034,$00ccdffc
+ dc.l $000000b8,$60ffffff,$f576f22e,$f040ff6c
+ dc.l $f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+ dc.l $ff9c3d7c,$30c4000a,$3d7ce001,$ff6ef36e
+ dc.l $ff6c4e5e,$60ffffff,$f55c02ae,$00ff00ff
+ dc.l $ff64f23c,$90000000,$0000f23c,$88000000
+ dc.l $000061ff,$00005548,$41eeff6c,$61ff0000
+ dc.l $22721d40,$ff4ee9ee,$0183ff42,$082e0005
+ dc.l $ff436728,$0c2e003a,$ff436720,$61ff0000
+ dc.l $542a41ee,$ff7861ff,$00002248,$0c000006
+ dc.l $660661ff,$000021ac,$1d40ff4f,$4280102e
+ dc.l $ff63e9ee,$1047ff43,$41eeff6c,$43eeff78
+ dc.l $223b1d30,$0000110c,$4ebb1930,$00001104
+ dc.l $102eff62,$6600008a,$102eff43,$02000038
+ dc.l $0c000038,$670ce9ee,$0183ff42,$61ff0000
+ dc.l $545a082e,$00050004,$6600002a,$206effd8
+ dc.l $4e60f22e,$d0c0ffdc,$f22e9c00,$ff604cee
+ dc.l $0303ff9c,$4e5e0817,$00076600,$012660ff
+ dc.l $fffff440,$082e0002,$ff4a67d6,$f22ed0c0
+ dc.l $ffdcf22e,$9c00ff60,$4cee0303,$ff9c4e5e
+ dc.l $2f6f0004,$00102f6f,$0000000c,$dffc0000
+ dc.l $000c0817,$00076600,$00ea60ff,$fffff404
+ dc.l $c02eff66,$edc00608,$6618082e,$0004ff66
+ dc.l $6700ff66,$082e0001,$ff626700,$ff5c6000
+ dc.l $006e0480,$00000018,$0c000006,$6d14082e
+ dc.l $0003ff66,$66000060,$082e0004,$ff666600
+ dc.l $004e082e,$00050004,$66000054,$206effd8
+ dc.l $4e603d7b,$022aff6e,$f22ed0c0,$ffdcf22e
+ dc.l $9c00ff60,$4cee0303,$ff9cf36e,$ff6c4e5e
+ dc.l $08170007,$6600006c,$60ffffff,$f386e000
+ dc.l $e006e004,$e005e003,$e002e001,$e001303c
+ dc.l $00036000,$ffae303c,$00046000,$ffa6082e
+ dc.l $0002ff4a,$67ac3d7b,$02d6ff6e,$f22ed0c0
+ dc.l $ffdcf22e,$9c00ff60,$4cee0303,$ff9cf36e
+ dc.l $ff6c4e5e,$2f6f0004,$00102f6f,$0000000c
+ dc.l $dffc0000,$000c0817,$00076606,$60ffffff
+ dc.l $f3223f7c,$20240006,$f22fa400,$000860ff
+ dc.l $fffff402,$02aeffff,$00ffff64,$f23c9000
+ dc.l $00000000,$f23c8800,$00000000,$e9ee0183
+ dc.l $ff4261ff,$000051b4,$41eeff6c,$61ff0000
+ dc.l $20620c00,$00066606,$61ff0000,$1fc61d40
+ dc.l $ff4e4280,$102eff63,$41eeff6c,$2d56ffd4
+ dc.l $61ff0000,$248c102e,$ff626600,$00842cae
+ dc.l $ffd4082e,$00050004,$6628206e,$ffd84e60
+ dc.l $f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+ dc.l $ff9c4e5e,$08170007,$6600ff68,$60ffffff
+ dc.l $f282082e,$0003ff4a,$67d6f22e,$d0c0ffdc
+ dc.l $f22e9c00,$ff604cee,$0303ff9c,$2c562f6f
+ dc.l $00c400b8,$2f6f00c8,$00bc2f6f,$003800c4
+ dc.l $2f6f003c,$00c82f6f,$004000cc,$dffc0000
+ dc.l $00b80817,$00076600,$ff1a60ff,$fffff234
+ dc.l $c02eff66,$edc00608,$6700ff74,$2caeffd4
+ dc.l $0c00001a,$6e0000e8,$67000072,$082e0005
+ dc.l $0004660a,$206effd8,$4e606000,$fb8e0c2e
+ dc.l $0008ff4a,$6600fb84,$f22ed0c0,$ffdcf22e
+ dc.l $9c00ff60,$4cee0303,$ff9c3d7c,$30d8000a
+ dc.l $3d7ce006,$ff6ef36e,$ff6c2c56,$2f6f00c4
+ dc.l $00b82f6f,$00c800bc,$2f6f00cc,$00c02f6f
+ dc.l $003800c4,$2f6f003c,$00c82f6f,$004000cc
+ dc.l $dffc0000,$00b860ff,$fffff22c,$082e0005
+ dc.l $00046600,$000c206e,$ffd84e60,$6000fb46
+ dc.l $0c2e0008,$ff4a6600,$fb3cf22e,$d0c0ffdc
+ dc.l $f22e9c00,$ff604cee,$0303ff9c,$3d7c30d0
+ dc.l $000a3d7c,$e004ff6e,$f36eff6c,$2c562f6f
+ dc.l $00c400b8,$2f6f00c8,$00bc2f6f,$00cc00c0
+ dc.l $2f6f0038,$00c42f6f,$003c00c8,$2f6f0040
+ dc.l $00ccdffc,$000000b8,$60ffffff,$f1a4082e
+ dc.l $00050004,$6600000c,$206effd8,$4e606000
+ dc.l $fbda0c2e,$0008ff4a,$6600fbd0,$f22ed0c0
+ dc.l $ffdcf22e,$9c00ff60,$4cee0303,$ff9c3d7c
+ dc.l $30c4000a,$3d7ce001,$ff6ef36e,$ff6c2c56
+ dc.l $2f6f00c4,$00b82f6f,$00c800bc,$2f6f00cc
+ dc.l $00c02f6f,$003800c4,$2f6f003c,$00c82f6f
+ dc.l $004000cc,$dffc0000,$00b860ff,$fffff106
+ dc.l $e9ee00c3,$ff420c00,$00016708,$0c000005
+ dc.l $67344e75,$302eff6c,$02407fff,$67260c40
+ dc.l $3f806e20,$44400640,$3f81222e,$ff70e0a9
+ dc.l $08c1001f,$2d41ff70,$026e8000,$ff6c006e
+ dc.l $3f80ff6c,$4e75302e,$ff6c0240,$7fff673a
+ dc.l $0c403c00,$6e344a2e,$ff6c5bee,$ff6e3d40
+ dc.l $ff6c4280,$41eeff6c,$323c3c01,$61ff0000
+ dc.l $1a66303c,$3c004a2e,$ff6e6704,$08c0000f
+ dc.l $08ee0007,$ff703d40,$ff6c4e75,$082e0005
+ dc.l $000467ff,$fffff176,$2d680000,$ff782d68
+ dc.l $0004ff7c,$2d680008,$ff804281,$4e752f00
+ dc.l $4e7a0808,$08000001,$66000460,$201f4e56
+ dc.l $ff4048ee,$0303ff9c,$f22ebc00,$ff60f22e
+ dc.l $f0c0ffdc,$2d6e0006,$ff44206e,$ff4458ae
+ dc.l $ff4461ff,$fffff152,$2d40ff40,$4a406b00
+ dc.l $020e02ae,$00ff00ff,$ff640800,$000a6618
+ dc.l $206eff44,$43eeff6c,$700c61ff,$fffff0d2
+ dc.l $4a816600,$04926048,$206eff44,$43eeff6c
+ dc.l $700c61ff,$fffff0ba,$4a816600,$047ae9ee
+ dc.l $004fff6c,$0c407fff,$6726102e,$ff6f0200
+ dc.l $000f660c,$4aaeff70,$66064aae,$ff746710
+ dc.l $41eeff6c,$61ff0000,$501af22e,$f080ff6c
+ dc.l $06ae0000,$000cff44,$41eeff6c,$61ff0000
+ dc.l $1cd21d40,$ff4e0c00,$0006660a,$61ff0000
+ dc.l $1c321d40,$ff4e422e,$ff53082e,$0005ff43
+ dc.l $6748082e,$0004ff43,$662ce9ee,$0183ff42
+ dc.l $61ff0000,$4e7641ee,$ff7861ff,$00001c94
+ dc.l $1d40ff4f,$0c000006,$662061ff,$00001bf4
+ dc.l $1d40ff4f,$6014082e,$0003ff43,$670c50ee
+ dc.l $ff53082e,$0001ff43,$67c04280,$102eff63
+ dc.l $122eff43,$0241007f,$f23c9000,$00000000
+ dc.l $f23c8800,$00000000,$41eeff6c,$43eeff78
+ dc.l $223b1530,$00000b2c,$4ebb1930,$00000b24
+ dc.l $102eff62,$66404a2e,$ff53660c,$e9ee0183
+ dc.l $ff4261ff,$00004e84,$2d6e0006,$ff682d6e
+ dc.l $ff440006,$f22ed0c0,$ffdcf22e,$9c00ff60
+ dc.l $4cee0303,$ff9c4e5e,$08170007,$66000096
+ dc.l $60ffffff,$ee6ec02e,$ff66edc0,$06086612
+ dc.l $082e0004,$ff6667ae,$082e0001,$ff6267ac
+ dc.l $60340480,$00000018,$0c000006,$6610082e
+ dc.l $0004ff66,$6620082e,$0003ff66,$66203d7b
+ dc.l $0206ff6e,$601ee002,$e006e004,$e005e003
+ dc.l $e002e001,$e0013d7c,$e005ff6e,$60063d7c
+ dc.l $e003ff6e,$2d6e0006,$ff682d6e,$ff440006
+ dc.l $f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+ dc.l $ff9cf36e,$ff6c4e5e,$08170007,$660660ff
+ dc.l $ffffede0,$2f173f6f,$00080004,$3f7c2024
+ dc.l $0006f22f,$a4000008,$60ffffff,$eeb80800
+ dc.l $000e6700,$01c2082e,$00050004,$66164e68
+ dc.l $2d48ffd8,$61ff0000,$0bce206e,$ffd84e60
+ dc.l $600001aa,$422eff4a,$41ee000c,$2d48ffd8
+ dc.l $61ff0000,$0bb20c2e,$0008ff4a,$67000086
+ dc.l $0c2e0004,$ff4a6600,$0184082e,$00070004
+ dc.l $66363dae,$00040804,$2daeff44,$08063dbc
+ dc.l $00f0080a,$41f60804,$2d480004,$f22ed0c0
+ dc.l $ffdcf22e,$9c00ff60,$4cee0303,$ff9c4e5e
+ dc.l $2e5f60ff,$ffffed3c,$3dae0004,$08002dae
+ dc.l $ff440802,$3dbc2024,$08062dae,$00060808
+ dc.l $41f60800,$2d480004,$f22ed0c0,$ffdcf22e
+ dc.l $9c00ff60,$4cee0303,$ff9c4e5e,$2e5f60ff
+ dc.l $ffffedf2,$1d41000a,$1d40000b,$f22ed0c0
+ dc.l $ffdcf22e,$9c00ff60,$4cee0303,$ff9c2f16
+ dc.l $2f002f01,$2f2eff44,$4280102e,$000b4480
+ dc.l $082e0007,$0004671c,$3dae0004,$08002dae
+ dc.l $00060808,$2d9f0802,$3dbc2024,$08064876
+ dc.l $08006014,$3dae0004,$08042d9f,$08063dbc
+ dc.l $00f0080a,$48760804,$4281122e,$000a4a01
+ dc.l $6a0cf236,$f080080c,$06800000,$000ce309
+ dc.l $6a0cf236,$f040080c,$06800000,$000ce309
+ dc.l $6a0cf236,$f020080c,$06800000,$000ce309
+ dc.l $6a0cf236,$f010080c,$06800000,$000ce309
+ dc.l $6a0cf236,$f008080c,$06800000,$000ce309
+ dc.l $6a0cf236,$f004080c,$06800000,$000ce309
+ dc.l $6a0cf236,$f002080c,$06800000,$000ce309
+ dc.l $6a06f236,$f001080c,$222f0004,$202f0008
+ dc.l $2c6f000c,$2e5f0817,$000767ff,$ffffec04
+ dc.l $60ffffff,$ecf061ff,$00001244,$f22ed0c0
+ dc.l $ffdcf22e,$9c00ff60,$4cee0303,$ff9c082e
+ dc.l $00070004,$660e2d6e,$ff440006,$4e5e60ff
+ dc.l $ffffebd0,$2c563f6f,$00c400c0,$2f6f00c6
+ dc.l $00c82f6f,$000400c2,$3f7c2024,$00c6dffc
+ dc.l $000000c0,$60ffffff,$ec9c201f,$4e56ff40
+ dc.l $48ee0303,$ff9c2d6e,$0006ff44,$206eff44
+ dc.l $58aeff44,$61ffffff,$ed002d40,$ff404a40
+ dc.l $6b047010,$60260800,$000e6610,$e9c014c3
+ dc.l $700c0c01,$00076614,$58806010,$428061ff
+ dc.l $00000ce6,$202eff44,$90ae0006,$3d40000a
+ dc.l $4cee0303,$ff9c4e5e,$518f2f00,$3f6f000c
+ dc.l $00042f6f,$000e0006,$4280302f,$00122f6f
+ dc.l $00060010,$d1af0006,$3f7c402c,$000a201f
+ dc.l $60ffffff,$ebe44e7a,$08080800,$0001660c
+ dc.l $f22e9c00,$ff60f22e,$d0c0ffdc,$4cee0303
+ dc.l $ff9c4e5e,$514f2eaf,$00083f6f,$000c0004
+ dc.l $3f7c4008,$00062f6f,$00020008,$2f7c0942
+ dc.l $8001000c,$08170005,$670608ef,$0002000d
+ dc.l $60ffffff,$ebd64fee,$ff404e7a,$18080801
+ dc.l $0001660c,$f22ed0c0,$ffdcf22f,$9c000020
+ dc.l $2c562f6f,$00c400bc,$3f6f00c8,$00c03f7c
+ dc.l $400800c2,$2f4800c4,$3f4000c8,$3f7c0001
+ dc.l $00ca4cef,$0303005c,$defc00bc,$60a64e56
+ dc.l $ff40f32e,$ff6c48ee,$0303ff9c,$f22ebc00
+ dc.l $ff60f22e,$f0c0ffdc,$2d6eff68,$ff44206e
+ dc.l $ff4458ae,$ff4461ff,$ffffebce,$2d40ff40
+ dc.l $0800000d,$662841ee,$ff6c61ff,$fffff1ea
+ dc.l $f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+ dc.l $ff9cf36e,$ff6c4e5e,$60ffffff,$ea94322e
+ dc.l $ff6c0241,$7fff0c41,$7fff661a,$4aaeff74
+ dc.l $660c222e,$ff700281,$7fffffff,$67082d6e
+ dc.l $ff70ff54,$6012223c,$7fffffff,$4a2eff6c
+ dc.l $6a025281,$2d41ff54,$e9c004c3,$122eff41
+ dc.l $307b0206,$4efb8802,$006c0000,$0000ff98
+ dc.l $003e0000,$00100000,$102eff54,$0c010007
+ dc.l $6f16206e,$000c61ff,$ffffeb86,$4a8166ff
+ dc.l $00005436,$6000ff6a,$02410007,$61ff0000
+ dc.l $478e6000,$ff5c302e,$ff540c01,$00076f16
+ dc.l $206e000c,$61ffffff,$eb6e4a81,$66ff0000
+ dc.l $54166000,$ff3c0241,$000761ff,$00004724
+ dc.l $6000ff2e,$202eff54,$0c010007,$6f16206e
+ dc.l $000c61ff,$ffffeb56,$4a8166ff,$000053f6
+ dc.l $6000ff0e,$02410007,$61ff0000,$46ba6000
+ dc.l $ff004e56,$ff40f32e,$ff6c48ee,$0303ff9c
+ dc.l $f22ebc00,$ff60f22e,$f0c0ffdc,$2d6eff68
+ dc.l $ff44206e,$ff4458ae,$ff4461ff,$ffffea8a
+ dc.l $2d40ff40,$0800000d,$6600002a,$41eeff6c
+ dc.l $61ffffff,$f0a4f22e,$d0c0ffdc,$f22e9c00
+ dc.l $ff604cee,$0303ff9c,$f36eff6c,$4e5e60ff
+ dc.l $ffffe964,$e9c004c3,$122eff41,$307b0206
+ dc.l $4efb8802,$007400a6,$015a0000,$00420104
+ dc.l $00100000,$102eff70,$08c00006,$0c010007
+ dc.l $6f16206e,$000c61ff,$ffffea76,$4a8166ff
+ dc.l $00005326,$6000ffa0,$02410007,$61ff0000
+ dc.l $467e6000,$ff92302e,$ff7008c0,$000e0c01
+ dc.l $00076f16,$206e000c,$61ffffff,$ea5a4a81
+ dc.l $66ff0000,$53026000,$ff6e0241,$000761ff
+ dc.l $00004610,$6000ff60,$202eff70,$08c0001e
+ dc.l $0c010007,$6f16206e,$000c61ff,$ffffea3e
+ dc.l $4a8166ff,$000052de,$6000ff3c,$02410007
+ dc.l $61ff0000,$45a26000,$ff2e0c01,$00076f2e
+ dc.l $202eff6c,$02808000,$00000080,$7fc00000
+ dc.l $222eff70,$e0898081,$206e000c,$61ffffff
+ dc.l $e9fc4a81,$66ff0000,$529c6000,$fefa202e
+ dc.l $ff6c0280,$80000000,$00807fc0,$00002f01
+ dc.l $222eff70,$e0898081,$221f0241,$000761ff
+ dc.l $00004544,$6000fed0,$202eff6c,$02808000
+ dc.l $00000080,$7ff80000,$222eff70,$2d40ff84
+ dc.l $700be0a9,$83aeff84,$222eff70,$02810000
+ dc.l $07ffe0b9,$2d41ff88,$222eff74,$e0a983ae
+ dc.l $ff8841ee,$ff84226e,$000c7008,$61ffffff
+ dc.l $e8cc4a81,$66ff0000,$522a6000,$fe7a422e
+ dc.l $ff4a3d6e,$ff6cff84,$426eff86,$202eff70
+ dc.l $08c0001e,$2d40ff88,$2d6eff74,$ff8c082e
+ dc.l $00050004,$66384e68,$2d48ffd8,$2d56ffd4
+ dc.l $61ff0000,$02c22248,$2d48000c,$206effd8
+ dc.l $4e602cae,$ffd441ee,$ff84700c,$61ffffff
+ dc.l $e86c4a81,$66ff0000,$51d86000,$fe1a2d56
+ dc.l $ffd461ff,$00000290,$22482d48,$000c2cae
+ dc.l $ffd40c2e,$0008ff4a,$66ccf22e,$d0c0ffdc
+ dc.l $f22e9c00,$ff604cee,$0303ff9c,$f36eff6c
+ dc.l $2c6effd4,$2f6f00c4,$00b82f6f,$00c800bc
+ dc.l $2f6f00cc,$00c02f6f,$004400c4,$2f6f0048
+ dc.l $00c82f6f,$004c00cc,$dffc0000,$00b860ff
+ dc.l $ffffe734,$4e56ff40,$f32eff6c,$48ee0303
+ dc.l $ff9cf22e,$bc00ff60,$f22ef0c0,$ffdc2d6e
+ dc.l $ff68ff44,$206eff44,$58aeff44,$61ffffff
+ dc.l $e7f82d40,$ff400800,$000d6600,$0106e9c0
+ dc.l $04c36622,$0c6e401e,$ff6c661a,$f23c9000
+ dc.l $00000000,$f22e4000,$ff70f22e,$6800ff6c
+ dc.l $3d7ce001,$ff6e41ee,$ff6c61ff,$ffffedea
+ dc.l $02ae00ff,$01ffff64,$f23c9000,$00000000
+ dc.l $f23c8800,$00000000,$e9ee1006,$ff420c01
+ dc.l $00176700,$009641ee,$ff6c61ff,$00001394
+ dc.l $1d40ff4e,$082e0005,$ff43672e,$082e0004
+ dc.l $ff436626,$e9ee0183,$ff4261ff,$0000454c
+ dc.l $41eeff78,$61ff0000,$136a0c00,$00066606
+ dc.l $61ff0000,$12ce1d40,$ff4f4280,$102eff63
+ dc.l $122eff43,$0241007f,$41eeff6c,$43eeff78
+ dc.l $223b1530,$0000022c,$4ebb1930,$00000224
+ dc.l $e9ee0183,$ff4261ff,$00004590,$f22ed0c0
+ dc.l $ffdcf22e,$9c00ff60,$4cee0303,$ff9cf36e
+ dc.l $ff6c4e5e,$60ffffff,$e5cc4280,$102eff63
+ dc.l $122eff43,$02810000,$007f61ff,$00000396
+ dc.l $60be1d7c,$0000ff4e,$4280102e,$ff6302ae
+ dc.l $ffff00ff,$ff6441ee,$ff6c61ff,$00001722
+ dc.l $60aa4e56,$ff40f32e,$ff6c48ee,$0303ff9c
+ dc.l $f22ebc00,$ff60f22e,$f0c0ffdc,$2d6eff68
+ dc.l $ff44206e,$ff4458ae,$ff4461ff,$ffffe69a
+ dc.l $2d40ff40,$41eeff6c,$61ffffff,$ecbcf22e
+ dc.l $d0c0ffdc,$f22e9c00,$ff604cee,$0303ff9c
+ dc.l $f36eff6c,$4e5e60ff,$ffffe592,$0c6f402c
+ dc.l $000667ff,$ffffe5b2,$60ffffff,$e5962040
+ dc.l $102eff41,$22000240,$00380281,$00000007
+ dc.l $0c000018,$67240c00,$0020672c,$80410c00
+ dc.l $003c6706,$206e000c,$4e751d7c,$0080ff4a
+ dc.l $41f60162,$ff680004,$4e752008,$61ff0000
+ dc.l $42ca206e,$000c4e75,$200861ff,$0000430c
+ dc.l $206e000c,$0c00000c,$67024e75,$51882d48
+ dc.l $000c4e75,$102eff41,$22000240,$00380281
+ dc.l $00000007,$0c000018,$670e0c00,$00206700
+ dc.l $0076206e,$000c4e75,$323b120e,$206e000c
+ dc.l $4efb1006,$4afc0008,$0010001a,$0024002c
+ dc.l $0034003c,$0044004e,$06ae0000,$000cffa4
+ dc.l $4e7506ae,$0000000c,$ffa84e75,$d5fc0000
+ dc.l $000c4e75,$d7fc0000,$000c4e75,$d9fc0000
+ dc.l $000c4e75,$dbfc0000,$000c4e75,$06ae0000
+ dc.l $000cffd4,$4e751d7c,$0004ff4a,$06ae0000
+ dc.l $000cffd8,$4e75323b,$1214206e,$000c5188
+ dc.l $51ae000c,$4efb1006,$4afc0008,$00100016
+ dc.l $001c0020,$00240028,$002c0032,$2d48ffa4
+ dc.l $4e752d48,$ffa84e75,$24484e75,$26484e75
+ dc.l $28484e75,$2a484e75,$2d48ffd4,$4e752d48
+ dc.l $ffd81d7c,$0008ff4a,$4e754afc,$006d0000
+ dc.l $20700000,$2a660000,$00000000,$2b0a0000
+ dc.l $3db20000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $2bb00000,$00000000,$27460000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $233c0000,$00000000,$36220000,$1c7c0000
+ dc.l $32f20000,$00000000,$00000000,$2fb00000
+ dc.l $39ea0000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $2e4e0000,$00000000,$29f40000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $205e0000,$3da00000,$00000000,$00000000
+ dc.l $20680000,$3daa0000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $2b9e0000,$00000000,$27340000,$00000000
+ dc.l $2ba80000,$00000000,$273e0000,$00000000
+ dc.l $232a0000,$00000000,$36100000,$1c6a0000
+ dc.l $23340000,$00000000,$361a0000,$1c740000
+ dc.l $39d80000,$00000000,$00000000,$00000000
+ dc.l $39e260fe,$122eff43,$02410070,$e80961ff
+ dc.l $00003ed2,$02800000,$00ff2f00,$103b0920
+ dc.l $01482f00,$61ff0000,$0340201f,$221f6700
+ dc.l $0134082e,$0005ff42,$670000b8,$082e0004
+ dc.l $ff426600,$001a123b,$1120021e,$082e0005
+ dc.l $0004670a,$0c2e0008,$ff4a6602,$4e752248
+ dc.l $9fc041d7,$4a016a0c,$20eeffdc,$20eeffe0
+ dc.l $20eeffe4,$e3096a0c,$20eeffe8,$20eeffec
+ dc.l $20eefff0,$e3096a0a,$f210f020,$d1fc0000
+ dc.l $000ce309,$6a0af210,$f010d1fc,$0000000c
+ dc.l $e3096a0a,$f210f008,$d1fc0000,$000ce309
+ dc.l $6a0af210,$f004d1fc,$0000000c,$e3096a0a
+ dc.l $f210f002,$d1fc0000,$000ce309,$6a0af210
+ dc.l $f001d1fc,$0000000c,$2d49ff54,$41d72f00
+ dc.l $61ffffff,$e248201f,$dfc04a81,$6600071e
+ dc.l $4e752d48,$ff549fc0,$43d72f01,$2f0061ff
+ dc.l $ffffe214,$201f4a81,$6600070e,$221f41d7
+ dc.l $4a016a0c,$2d58ffdc,$2d58ffe0,$2d58ffe4
+ dc.l $e3096a0c,$2d58ffe8,$2d58ffec,$2d58fff0
+ dc.l $e3096a04,$f218d020,$e3096a04,$f218d010
+ dc.l $e3096a04,$f218d008,$e3096a04,$f218d004
+ dc.l $e3096a04,$f218d002,$e3096a04,$f218d001
+ dc.l $dfc04e75,$4e75000c,$0c180c18,$18240c18
+ dc.l $18241824,$24300c18,$18241824,$24301824
+ dc.l $24302430,$303c0c18,$18241824,$24301824
+ dc.l $24302430,$303c1824,$24302430,$303c2430
+ dc.l $303c303c,$3c480c18,$18241824,$24301824
+ dc.l $24302430,$303c1824,$24302430,$303c2430
+ dc.l $303c303c,$3c481824,$24302430,$303c2430
+ dc.l $303c303c,$3c482430,$303c303c,$3c48303c
+ dc.l $3c483c48,$48540c18,$18241824,$24301824
+ dc.l $24302430,$303c1824,$24302430,$303c2430
+ dc.l $303c303c,$3c481824,$24302430,$303c2430
+ dc.l $303c303c,$3c482430,$303c303c,$3c48303c
+ dc.l $3c483c48,$48541824,$24302430,$303c2430
+ dc.l $303c303c,$3c482430,$303c303c,$3c48303c
+ dc.l $3c483c48,$48542430,$303c303c,$3c48303c
+ dc.l $3c483c48,$4854303c,$3c483c48,$48543c48
+ dc.l $48544854,$54600080,$40c020a0,$60e01090
+ dc.l $50d030b0,$70f00888,$48c828a8,$68e81898
+ dc.l $58d838b8,$78f80484,$44c424a4,$64e41494
+ dc.l $54d434b4,$74f40c8c,$4ccc2cac,$6cec1c9c
+ dc.l $5cdc3cbc,$7cfc0282,$42c222a2,$62e21292
+ dc.l $52d232b2,$72f20a8a,$4aca2aaa,$6aea1a9a
+ dc.l $5ada3aba,$7afa0686,$46c626a6,$66e61696
+ dc.l $56d636b6,$76f60e8e,$4ece2eae,$6eee1e9e
+ dc.l $5ede3ebe,$7efe0181,$41c121a1,$61e11191
+ dc.l $51d131b1,$71f10989,$49c929a9,$69e91999
+ dc.l $59d939b9,$79f90585,$45c525a5,$65e51595
+ dc.l $55d535b5,$75f50d8d,$4dcd2dad,$6ded1d9d
+ dc.l $5ddd3dbd,$7dfd0383,$43c323a3,$63e31393
+ dc.l $53d333b3,$73f30b8b,$4bcb2bab,$6beb1b9b
+ dc.l $5bdb3bbb,$7bfb0787,$47c727a7,$67e71797
+ dc.l $57d737b7,$77f70f8f,$4fcf2faf,$6fef1f9f
+ dc.l $5fdf3fbf,$7fff2040,$302eff40,$32000240
+ dc.l $003f0281,$00000007,$303b020a,$4efb0006
+ dc.l $4afc0040,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00800086,$008c0090,$00940098
+ dc.l $009c00a0,$00a600b6,$00c600d2,$00de00ea
+ dc.l $00f60102,$01180126,$0134013e,$01480152
+ dc.l $015c0166,$017a0198,$01b601d2,$01ee020a
+ dc.l $02260242,$02600260,$02600260,$02600260
+ dc.l $02600260,$02c002da,$02f40314,$00000000
+ dc.l $00000000,$206effa4,$4e75206e,$ffa84e75
+ dc.l $204a4e75,$204b4e75,$204c4e75,$204d4e75
+ dc.l $20564e75,$206effd8,$4e75202e,$ffa42200
+ dc.l $d2882d41,$ffa42040,$4e75202e,$ffa82200
+ dc.l $d2882d41,$ffa82040,$4e75200a,$2200d288
+ dc.l $24412040,$4e75200b,$2200d288,$26412040
+ dc.l $4e75200c,$2200d288,$28412040,$4e75200d
+ dc.l $2200d288,$2a412040,$4e752016,$2200d288
+ dc.l $2c812040,$4e751d7c,$0004ff4a,$202effd8
+ dc.l $2200d288,$2d41ffd8,$20404e75,$202effa4
+ dc.l $90882d40,$ffa42040,$4e75202e,$ffa89088
+ dc.l $2d40ffa8,$20404e75,$200a9088,$24402040
+ dc.l $4e75200b,$90882640,$20404e75,$200c9088
+ dc.l $28402040,$4e75200d,$90882a40,$20404e75
+ dc.l $20169088,$2c802040,$4e751d7c,$0008ff4a
+ dc.l $202effd8,$90882d40,$ffd82040,$4e75206e
+ dc.l $ff4454ae,$ff4461ff,$ffffde38,$4a8166ff
+ dc.l $fffff1b6,$3040d1ee,$ffa44e75,$206eff44
+ dc.l $54aeff44,$61ffffff,$de1a4a81,$66ffffff
+ dc.l $f1983040,$d1eeffa8,$4e75206e,$ff4454ae
+ dc.l $ff4461ff,$ffffddfc,$4a8166ff,$fffff17a
+ dc.l $3040d1ca,$4e75206e,$ff4454ae,$ff4461ff
+ dc.l $ffffdde0,$4a8166ff,$fffff15e,$3040d1cb
+ dc.l $4e75206e,$ff4454ae,$ff4461ff,$ffffddc4
+ dc.l $4a8166ff,$fffff142,$3040d1cc,$4e75206e
+ dc.l $ff4454ae,$ff4461ff,$ffffdda8,$4a8166ff
+ dc.l $fffff126,$3040d1cd,$4e75206e,$ff4454ae
+ dc.l $ff4461ff,$ffffdd8c,$4a8166ff,$fffff10a
+ dc.l $3040d1d6,$4e75206e,$ff4454ae,$ff4461ff
+ dc.l $ffffdd70,$4a8166ff,$fffff0ee,$3040d1ee
+ dc.l $ffd84e75,$508161ff,$000038fa,$2f00206e
+ dc.l $ff4454ae,$ff4461ff,$ffffdd48,$4a8166ff
+ dc.l $fffff0c6,$205f0800,$00086600,$00e62d40
+ dc.l $ff542200,$e9590241,$000f61ff,$000038c6
+ dc.l $2f02242e,$ff540802,$000b6602,$48c02202
+ dc.l $ef590281,$00000003,$e3a849c2,$d082d1c0
+ dc.l $241f4e75,$206eff44,$54aeff44,$61ffffff
+ dc.l $dcf24a81,$66ffffff,$f0703040,$4e75206e
+ dc.l $ff4458ae,$ff4461ff,$ffffdcee,$4a8166ff
+ dc.l $fffff056,$20404e75,$206eff44,$54aeff44
+ dc.l $61ffffff,$dcbe4a81,$66ffffff,$f03c3040
+ dc.l $d1eeff44,$55884e75,$206eff44,$54aeff44
+ dc.l $61ffffff,$dc9e4a81,$66ffffff,$f01c206e
+ dc.l $ff445588,$08000008,$66000038,$2d40ff54
+ dc.l $2200e959,$0241000f,$61ff0000,$38182f02
+ dc.l $242eff54,$0802000b,$660248c0,$2202ef59
+ dc.l $02810000,$0003e3a8,$49c2d082,$d1c0241f
+ dc.l $4e750800,$0006670c,$48e73c00,$2a002608
+ dc.l $42826028,$2d40ff54,$e9c01404,$61ff0000
+ dc.l $37d448e7,$3c002400,$2a2eff54,$26080805
+ dc.l $000b6602,$48c2e9c5,$0542e1aa,$08050007
+ dc.l $67024283,$e9c50682,$0c000002,$6d346718
+ dc.l $206eff44,$58aeff44,$61ffffff,$dc0c4a81
+ dc.l $66ff0000,$00b06018,$206eff44,$54aeff44
+ dc.l $61ffffff,$dbde4a81,$66ff0000,$009848c0
+ dc.l $d680e9c5,$07826700,$006e0c00,$00026d34
+ dc.l $6718206e,$ff4458ae,$ff4461ff,$ffffdbca
+ dc.l $4a8166ff,$0000006e,$601c206e,$ff4454ae
+ dc.l $ff4461ff,$ffffdb9c,$4a8166ff,$00000056
+ dc.l $48c06002,$42802800,$08050002,$67142043
+ dc.l $61ffffff,$dbd64a81,$66000028,$d082d084
+ dc.l $6018d682,$204361ff,$ffffdbc0,$4a816600
+ dc.l $0012d084,$6004d682,$20032040,$4cdf003c
+ dc.l $4e752043,$4cdf003c,$303c0101,$60ffffff
+ dc.l $ef184cdf,$003c60ff,$ffffeebe,$61ff0000
+ dc.l $44ea303c,$00e1600a,$61ff0000,$44de303c
+ dc.l $0161206e,$ff5460ff,$ffffeeee,$102eff42
+ dc.l $0c00009c,$670000b2,$0c000098,$67000074
+ dc.l $0c000094,$6736206e,$ff4458ae,$ff4461ff
+ dc.l $ffffdb06,$4a8166ff,$ffffee6e,$2d40ff64
+ dc.l $206eff44,$58aeff44,$61ffffff,$daec4a81
+ dc.l $66ffffff,$ee542d40,$ff684e75,$206eff44
+ dc.l $58aeff44,$61ffffff,$dad04a81,$66ffffff
+ dc.l $ee382d40,$ff60206e,$ff4458ae,$ff4461ff
+ dc.l $ffffdab6,$4a8166ff,$ffffee1e,$2d40ff68
+ dc.l $4e75206e,$ff4458ae,$ff4461ff,$ffffda9a
+ dc.l $4a8166ff,$ffffee02,$2d40ff60,$206eff44
+ dc.l $58aeff44,$61ffffff,$da804a81,$66ffffff
+ dc.l $ede82d40,$ff644e75,$206eff44,$58aeff44
+ dc.l $61ffffff,$da644a81,$66ffffff,$edcc2d40
+ dc.l $ff60206e,$ff4458ae,$ff4461ff,$ffffda4a
+ dc.l $4a8166ff,$ffffedb2,$2d40ff64,$206eff44
+ dc.l $58aeff44,$61ffffff,$da304a81,$66ffffff
+ dc.l $ed982d40,$ff684e75,$2d680004,$ff882d69
+ dc.l $0004ff94,$2d680008,$ff8c2d69,$0008ff98
+ dc.l $30280000,$32290000,$3d40ff84,$3d41ff90
+ dc.l $02407fff,$02417fff,$3d40ff54,$3d41ff56
+ dc.l $b0416cff,$0000005c,$61ff0000,$015a2f00
+ dc.l $0c2e0004,$ff4e6610,$41eeff84,$61ff0000
+ dc.l $04fa4440,$3d40ff54,$302eff56,$04400042
+ dc.l $b06eff54,$6c1a302e,$ff54d06f,$0002322e
+ dc.l $ff840241,$80008041,$3d40ff84,$201f4e75
+ dc.l $026e8000,$ff8408ee,$0000ff85,$201f4e75
+ dc.l $61ff0000,$00562f00,$0c2e0004,$ff4f6610
+ dc.l $41eeff90,$61ff0000,$04a24440,$3d40ff56
+ dc.l $302eff54,$04400042,$b06eff56,$6c1a302e
+ dc.l $ff56d06f,$0002322e,$ff900241,$80008041
+ dc.l $3d40ff90,$201f4e75,$026e8000,$ff9008ee
+ dc.l $0000ff91,$201f4e75,$322eff84,$30010281
+ dc.l $00007fff,$02408000,$00403fff,$3d40ff84
+ dc.l $0c2e0004,$ff4e670a,$203c0000,$3fff9081
+ dc.l $4e7541ee,$ff8461ff,$00000430,$44802200
+ dc.l $60e60c2e,$0004ff4e,$673a322e,$ff840281
+ dc.l $00007fff,$026e8000,$ff840801,$00006712
+ dc.l $006e3fff,$ff84203c,$00003fff,$9081e280
+ dc.l $4e75006e,$3ffeff84,$203c0000,$3ffe9081
+ dc.l $e2804e75,$41eeff84,$61ff0000,$03de0800
+ dc.l $00006710,$006e3fff,$ff840680,$00003fff
+ dc.l $e2804e75,$006e3ffe,$ff840680,$00003ffe
+ dc.l $e2804e75,$322eff90,$30010281,$00007fff
+ dc.l $02408000,$00403fff,$3d40ff90,$0c2e0004
+ dc.l $ff4f670a,$203c0000,$3fff9081,$4e7541ee
+ dc.l $ff9061ff,$00000384,$44802200,$60e60c2e
+ dc.l $0005ff4f,$67320c2e,$0003ff4f,$673e0c2e
+ dc.l $0003ff4e,$671408ee,$0006ff70,$00ae0100
+ dc.l $4080ff64,$41eeff6c,$604200ae,$01000000
+ dc.l $ff6441ee,$ff6c6034,$00ae0100,$4080ff64
+ dc.l $08ee0006,$ff7c41ee,$ff786020,$41eeff78
+ dc.l $0c2e0005,$ff4e66ff,$0000000c,$00ae0000
+ dc.l $4080ff64,$00ae0100,$0000ff64,$08280007
+ dc.l $00006708,$00ae0800,$0000ff64,$f210d080
+ dc.l $4e7500ae,$01002080,$ff64f23b,$d0800170
+ dc.l $00000008,$4e757fff,$0000ffff,$ffffffff
+ dc.l $ffff0000,$3f813c01,$e408323b,$02f63001
+ dc.l $90680000,$0c400042,$6a164280,$082e0001
+ dc.l $ff666704,$08c0001d,$61ff0000,$001a4e75
+ dc.l $203c2000,$00003141,$000042a8,$000442a8
+ dc.l $00084e75,$2d680008,$ff542d40,$ff582001
+ dc.l $92680000,$6f100c41,$00206d10,$0c410040
+ dc.l $6d506000,$009a202e,$ff584e75,$2f023140
+ dc.l $00007020,$90410c41,$001d6d08,$142eff58
+ dc.l $852eff57,$e9e82020,$0004e9e8,$18000004
+ dc.l $e9ee0800,$ff542142,$00042141,$0008e8c0
+ dc.l $009e6704,$08c0001d,$0280e000,$0000241f
+ dc.l $4e752f02,$31400000,$04410020,$70209041
+ dc.l $142eff58,$852eff57,$e9e82020,$0004e9e8
+ dc.l $18000004,$e8c1009e,$660ce8ee,$081fff54
+ dc.l $66042001,$60062001,$08c0001d,$42a80004
+ dc.l $21420008,$0280e000,$0000241f,$4e753140
+ dc.l $00000c41,$00416d12,$672442a8,$000442a8
+ dc.l $0008203c,$20000000,$4e752028,$00042200
+ dc.l $0280c000,$00000281,$3fffffff,$60122028
+ dc.l $00040280,$80000000,$e2880281,$7fffffff
+ dc.l $66164aa8,$00086610,$4a2eff58,$660a42a8
+ dc.l $000442a8,$00084e75,$08c0001d,$42a80004
+ dc.l $42a80008,$4e7561ff,$00000110,$4a806700
+ dc.l $00fa006e,$0208ff66,$327b1206,$4efb9802
+ dc.l $004000ea,$00240008,$4a280002,$6b0000dc
+ dc.l $70ff4841,$0c010004,$6700003e,$6e000094
+ dc.l $60000064,$4a280002,$6a0000c0,$70ff4841
+ dc.l $0c010004,$67000022,$6e000078,$60000048
+ dc.l $e3806400,$00a64841,$0c010004,$6700000a
+ dc.l $6e000060,$60000030,$06a80000,$01000004
+ dc.l $640ce4e8,$0004e4e8,$00065268,$00004a80
+ dc.l $66060268,$fe000006,$02a8ffff,$ff000004
+ dc.l $42a80008,$4e7552a8,$0008641a,$52a80004
+ dc.l $6414e4e8,$0004e4e8,$0006e4e8,$0008e4e8
+ dc.l $000a5268,$00004a80,$66060228,$00fe000b
+ dc.l $4e7506a8,$00000800,$0008641a,$52a80004
+ dc.l $6414e4e8,$0004e4e8,$0006e4e8,$0008e4e8
+ dc.l $000a5268,$00004a80,$66060268,$f000000a
+ dc.l $02a8ffff,$f8000008,$4e754841,$0c010004
+ dc.l $6700ff86,$6eea4e75,$48414a01,$66044841
+ dc.l $4e7548e7,$30000c01,$00046622,$e9e83602
+ dc.l $0004741e,$e5ab2428,$00040282,$0000003f
+ dc.l $66284aa8,$00086622,$4a80661e,$6020e9e8
+ dc.l $35420008,$741ee5ab,$24280008,$02820000
+ dc.l $01ff6606,$4a806602,$600408c3,$001d2003
+ dc.l $4cdf000c,$48414e75,$2f022f03,$20280004
+ dc.l $22280008,$edc02000,$671ae5a8,$e9c13022
+ dc.l $8083e5a9,$21400004,$21410008,$2002261f
+ dc.l $241f4e75,$edc12000,$e5a90682,$00000020
+ dc.l $21410004,$42a80008,$2002261f,$241f4e75
+ dc.l $ede80000,$0004660e,$ede80000,$00086700
+ dc.l $00740640,$00204281,$32280000,$02417fff
+ dc.l $b0416e1c,$92403028,$00000240,$80008240
+ dc.l $31410000,$61ffffff,$ff82103c,$00004e75
+ dc.l $0c010020,$6e20e9e8,$08400004,$21400004
+ dc.l $20280008,$e3a82140,$00080268,$80000000
+ dc.l $103c0004,$4e750441,$00202028,$0008e3a8
+ dc.l $21400004,$42a80008,$02688000,$0000103c
+ dc.l $00044e75,$02688000,$0000103c,$00014e75
+ dc.l $30280000,$02407fff,$0c407fff,$67480828
+ dc.l $00070004,$6706103c,$00004e75,$4a406618
+ dc.l $4aa80004,$660c4aa8,$00086606,$103c0001
+ dc.l $4e75103c,$00044e75,$4aa80004,$66124aa8
+ dc.l $0008660c,$02688000,$0000103c,$00014e75
+ dc.l $103c0006,$4e754aa8,$00086612,$20280004
+ dc.l $02807fff,$ffff6606,$103c0002,$4e750828
+ dc.l $00060004,$6706103c,$00034e75,$103c0005
+ dc.l $4e752028,$00002200,$02807ff0,$0000670e
+ dc.l $0c807ff0,$00006728,$103c0000,$4e750281
+ dc.l $000fffff,$66ff0000,$00144aa8,$000466ff
+ dc.l $0000000a,$103c0001,$4e75103c,$00044e75
+ dc.l $0281000f,$ffff66ff,$00000014,$4aa80004
+ dc.l $66ff0000,$000a103c,$00024e75,$08010013
+ dc.l $66ff0000,$000a103c,$00054e75,$103c0003
+ dc.l $4e752028,$00002200,$02807f80,$0000670e
+ dc.l $0c807f80,$0000671e,$103c0000,$4e750281
+ dc.l $007fffff,$66ff0000,$000a103c,$00014e75
+ dc.l $103c0004,$4e750281,$007fffff,$66ff0000
+ dc.l $000a103c,$00024e75,$08010016,$66ff0000
+ dc.l $000a103c,$00054e75,$103c0003,$4e752f01
+ dc.l $08280007,$000056e8,$00023228,$00000241
+ dc.l $7fff9240,$31410000,$2f08202f,$00040240
+ dc.l $00c0e848,$61ffffff,$fae22057,$322f0006
+ dc.l $024100c0,$e8494841,$322f0006,$02410030
+ dc.l $e84961ff,$fffffc22,$205f08a8,$00070000
+ dc.l $4a280002,$670a08e8,$00070000,$42280002
+ dc.l $42804aa8,$0004660a,$4aa80008,$660408c0
+ dc.l $0002082e,$0001ff66,$670608ee,$0005ff67
+ dc.l $588f4e75,$2f010828,$00070000,$56e80002
+ dc.l $32280000,$02417fff,$92403141,$00002f08
+ dc.l $428061ff,$fffffa64,$2057323c,$00044841
+ dc.l $322f0006,$02410030,$e84961ff,$fffffbaa
+ dc.l $205f08a8,$00070000,$4a280002,$670a08e8
+ dc.l $00070000,$42280002,$42804aa8,$0004660a
+ dc.l $4aa80008,$660408c0,$0002082e,$0001ff66
+ dc.l $670608ee,$0005ff67,$588f4e75,$02410010
+ dc.l $e8088200,$3001e309,$600e0241,$00108200
+ dc.l $48408200,$3001e309,$103b0008,$41fb1620
+ dc.l $4e750200,$00020200,$00020200,$00020000
+ dc.l $00000a08,$0a080a08,$0a080a08,$0a087fff
+ dc.l $00000000,$00000000,$00000000,$00007ffe
+ dc.l $0000ffff,$ffffffff,$ffff0000,$00007ffe
+ dc.l $0000ffff,$ffffffff,$ffff0000,$00007fff
+ dc.l $00000000,$00000000,$00000000,$00007fff
+ dc.l $00000000,$00000000,$00000000,$0000407e
+ dc.l $0000ffff,$ff000000,$00000000,$0000407e
+ dc.l $0000ffff,$ff000000,$00000000,$00007fff
+ dc.l $00000000,$00000000,$00000000,$00007fff
+ dc.l $00000000,$00000000,$00000000,$000043fe
+ dc.l $0000ffff,$ffffffff,$f8000000,$000043fe
+ dc.l $0000ffff,$ffffffff,$f8000000,$00007fff
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$00000000
+ dc.l $00000000,$00000000,$00000000,$0000ffff
+ dc.l $00000000,$00000000,$00000000,$0000fffe
+ dc.l $0000ffff,$ffffffff,$ffff0000,$0000ffff
+ dc.l $00000000,$00000000,$00000000,$0000fffe
+ dc.l $0000ffff,$ffffffff,$ffff0000,$0000ffff
+ dc.l $00000000,$00000000,$00000000,$0000c07e
+ dc.l $0000ffff,$ff000000,$00000000,$0000ffff
+ dc.l $00000000,$00000000,$00000000,$0000c07e
+ dc.l $0000ffff,$ff000000,$00000000,$0000ffff
+ dc.l $00000000,$00000000,$00000000,$0000c3fe
+ dc.l $0000ffff,$ffffffff,$f8000000,$0000ffff
+ dc.l $00000000,$00000000,$00000000,$0000c3fe
+ dc.l $0000ffff,$ffffffff,$f8000000,$0000e9ee
+ dc.l $10c3ff42,$327b120a,$4efb9806,$4afc0008
+ dc.l $00e001e0,$01480620,$0078041a,$00100620
+ dc.l $4a2eff4e,$664cf228,$d0800000,$f2009000
+ dc.l $f2007800,$f23c9000,$00000000,$f201a800
+ dc.l $836eff66,$122eff41,$02010038,$6714206e
+ dc.l $000c61ff,$ffffcfaa,$4a8166ff,$0000385a
+ dc.l $4e75122e,$ff410241,$000761ff,$00002bb0
+ dc.l $4e752228,$00000281,$80000000,$00810080
+ dc.l $0000f201,$440060a4,$4a2eff4e,$664cf228
+ dc.l $d0800000,$f2009000,$f2007000,$f23c9000
+ dc.l $00000000,$f201a800,$836eff66,$122eff41
+ dc.l $02010038,$6714206e,$000c61ff,$ffffcf58
+ dc.l $4a8166ff,$00003800,$4e75122e,$ff410241
+ dc.l $000761ff,$00002b0c,$4e752228,$00000281
+ dc.l $80000000,$00810080,$0000f201,$440060a4
+ dc.l $4a2eff4e,$664cf228,$d0800000,$f2009000
+ dc.l $f2006000,$f23c9000,$00000000,$f201a800
+ dc.l $836eff66,$122eff41,$02010038,$6714206e
+ dc.l $000c61ff,$ffffcf06,$4a8166ff,$000037a6
+ dc.l $4e75122e,$ff410241,$000761ff,$00002a68
+ dc.l $4e752228,$00000281,$80000000,$00810080
+ dc.l $0000f201,$440060a4,$3d680000,$ff84426e
+ dc.l $ff862d68,$0004ff88,$2d680008,$ff8cf228
+ dc.l $d0800000,$61ffffff,$e83e2248,$41eeff84
+ dc.l $700c0c2e,$0008ff4a,$672661ff,$ffffcdee
+ dc.l $4a816600,$00524a2e,$ff4e6602,$4e7508ee
+ dc.l $0003ff66,$102eff62,$0200000a,$66164e75
+ dc.l $61ffffff,$dc4a4a81,$6600002c,$4a2eff4e
+ dc.l $66dc4e75,$41eeff84,$61ffffff,$f90e4440
+ dc.l $02407fff,$026e8000,$ff84816e,$ff84f22e
+ dc.l $d040ff84,$4e752cae,$ffd460ff,$00003702
+ dc.l $02000030,$00000040,$2d40ff5c,$30280000
+ dc.l $02407fff,$0c40407e,$6e0000e6,$67000152
+ dc.l $0c403f81,$6d000058,$f228d080,$0000f22e
+ dc.l $9000ff5c,$f23c8800,$00000000,$f2006400
+ dc.l $f23c9000,$00000000,$f201a800,$836eff66
+ dc.l $122eff41,$02010038,$6714206e,$000c61ff
+ dc.l $ffffcdda,$4a8166ff,$0000367a,$4e75122e
+ dc.l $ff410241,$000761ff,$0000293c,$4e7508ee
+ dc.l $0003ff66,$3d680000,$ff842d68,$0004ff88
+ dc.l $2d680008,$ff8c2f08,$42800c2e,$0004ff4e
+ dc.l $660a41ee,$ff8461ff,$fffff840,$41eeff84
+ dc.l $222eff5c,$61ffffff,$fa5841ee,$ff8461ff
+ dc.l $0000034c,$122eff41,$02010038,$6714206e
+ dc.l $000c61ff,$ffffcd66,$4a8166ff,$00003606
+ dc.l $600e122e,$ff410241,$000761ff,$000028c8
+ dc.l $122eff62,$0201000a,$660000b8,$588f4e75
+ dc.l $4a280007,$660e4aa8,$00086608,$006e1048
+ dc.l $ff666006,$006e1248,$ff662f08,$4a280000
+ dc.l $5bc1202e,$ff5c61ff,$fffffae4,$f210d080
+ dc.l $f2006400,$122eff41,$02010038,$6714206e
+ dc.l $000c61ff,$ffffccf6,$4a8166ff,$00003596
+ dc.l $600e122e,$ff410241,$000761ff,$00002858
+ dc.l $122eff62,$0201000a,$6600007c,$588f4e75
+ dc.l $32280000,$02418000,$00413fff,$3d41ff84
+ dc.l $2d680004,$ff882d68,$0008ff8c,$f22e9000
+ dc.l $ff5cf22e,$4800ff84,$f23c9000,$00000000
+ dc.l $f2000018,$f23c5838,$0002f294,$fe7c6000
+ dc.l $ff50205f,$3d680000,$ff842d68,$0004ff88
+ dc.l $2d680008,$ff8c0c2e,$0004ff4e,$662c41ee
+ dc.l $ff8461ff,$fffff714,$44800240,$7fffefee
+ dc.l $004fff84,$6014205f,$3d680000,$ff842d68
+ dc.l $0004ff88,$2d680008,$ff8c08ae,$0007ff84
+ dc.l $56eeff86,$41eeff84,$122eff5f,$e8090241
+ dc.l $000c4841,$122eff5f,$e8090241,$00034280
+ dc.l $61ffffff,$f5544a2e,$ff866706,$08ee0007
+ dc.l $ff84f22e,$d040ff84,$4e750200,$00300000
+ dc.l $00802d40,$ff5c3028,$00000240,$7fff0c40
+ dc.l $43fe6e00,$00c86700,$01200c40,$3c016d00
+ dc.l $0046f228,$d0800000,$f22e9000,$ff5cf23c
+ dc.l $88000000,$0000f22e,$7400ff54,$f23c9000
+ dc.l $00000000,$f200a800,$816eff66,$226e000c
+ dc.l $41eeff54,$700861ff,$ffffcaf2,$4a8166ff
+ dc.l $00003450,$4e7508ee,$0003ff66,$3d680000
+ dc.l $ff842d68,$0004ff88,$2d680008,$ff8c2f08
+ dc.l $42800c2e,$0004ff4e,$660a41ee,$ff8461ff
+ dc.l $fffff618,$41eeff84,$222eff5c,$61ffffff
+ dc.l $f83041ee,$ff8461ff,$000000d2,$2d40ff54
+ dc.l $2d41ff58,$226e000c,$41eeff54,$700861ff
+ dc.l $ffffca8a,$4a8166ff,$000033e8,$122eff62
+ dc.l $0201000a,$6600fe9c,$588f4e75,$3028000a
+ dc.l $024007ff,$6608006e,$1048ff66,$6006006e
+ dc.l $1248ff66,$2f084a28,$00005bc1,$202eff5c
+ dc.l $61ffffff,$f8caf210,$d080f22e,$7400ff54
+ dc.l $226e000c,$41eeff54,$700861ff,$ffffca2e
+ dc.l $4a8166ff,$0000338c,$122eff62,$0201000a
+ dc.l $6600fe74,$588f4e75,$32280000,$02418000
+ dc.l $00413fff,$3d41ff84,$2d680004,$ff882d68
+ dc.l $0008ff8c,$f22e9000,$ff5cf22e,$4800ff84
+ dc.l $f23c9000,$00000000,$f2000018,$f23c5838
+ dc.l $0002f294,$feae6000,$ff644280,$30280000
+ dc.l $04403fff,$064003ff,$4a280004,$6b025340
+ dc.l $4840e988,$4a280000,$6a0408c0,$001f2228
+ dc.l $0004e9c1,$10548081,$2d40ff54,$22280004
+ dc.l $7015e1a9,$2d41ff58,$22280008,$e9c10015
+ dc.l $222eff58,$8280202e,$ff544e75,$42803028
+ dc.l $00000440,$3fff0640,$007f4a28,$00046b02
+ dc.l $53404840,$ef884a28,$00006a04,$08c0001f
+ dc.l $22280004,$02817fff,$ff00e089,$80814e75
+ dc.l $61ffffff,$e3822f08,$102eff4e,$66000082
+ dc.l $082e0004,$ff426712,$122eff43,$e8090241
+ dc.l $000761ff,$000024de,$6004102e,$ff43ebc0
+ dc.l $06472f00,$41eeff6c,$61ff0000,$2b2002ae
+ dc.l $cffff00f,$ff84201f,$4a2eff87,$66164aae
+ dc.l $ff886610,$4aaeff8c,$660a4a80,$6606026e
+ dc.l $f000ff84,$41eeff84,$225f700c,$0c2e0008
+ dc.l $ff4a670e,$61ffffff,$c8d44a81,$6600fb38
+ dc.l $4e7561ff,$ffffd748,$4a816600,$fb2a4e75
+ dc.l $0c000004,$6700ff7a,$41eeff6c,$426eff6e
+ dc.l $0c000005,$670260c0,$006e4080,$ff6608ee
+ dc.l $0006ff70,$60b251fc,$51fc51fc,$51fc51fc
+ dc.l $ffffc001,$ffffff81,$fffffc01,$00004000
+ dc.l $0000007f,$000003ff,$02000030,$00000040
+ dc.l $60080200,$00300000,$00802d40,$ff5c4241
+ dc.l $122eff4f,$e709822e,$ff4e6600,$02e43d69
+ dc.l $0000ff90,$2d690004,$ff942d69,$0008ff98
+ dc.l $3d680000,$ff842d68,$0004ff88,$2d680008
+ dc.l $ff8c61ff,$ffffef24,$2f0061ff,$ffffefc8
+ dc.l $d197322e,$ff5eec09,$201fb0bb,$14846700
+ dc.l $011e6d00,$0062b0bb,$14846700,$021a6e00
+ dc.l $014af22e,$d080ff90,$f22e9000,$ff5cf23c
+ dc.l $88000000,$0000f22e,$4823ff84,$f201a800
+ dc.l $f23c9000,$00000000,$83aeff64,$f22ef080
+ dc.l $ff842f02,$322eff84,$24010281,$00007fff
+ dc.l $02428000,$92808242,$3d41ff84,$241ff22e
+ dc.l $d080ff84,$4e75f22e,$d080ff90,$f22e9000
+ dc.l $ff5cf23c,$88000000,$0000f22e,$4823ff84
+ dc.l $f201a800,$f23c9000,$00000000,$83aeff64
+ dc.l $00ae0000,$1048ff64,$122eff62,$02010013
+ dc.l $661c082e,$0003ff64,$56c1202e,$ff5c61ff
+ dc.l $fffff5dc,$812eff64,$f210d080,$4e75222e
+ dc.l $ff5c0201,$00c06634,$f22ef080,$ff842f02
+ dc.l $322eff84,$34010281,$00007fff,$92800481
+ dc.l $00006000,$02417fff,$02428000,$82423d41
+ dc.l $ff84241f,$f22ed040,$ff8460a6,$f22ed080
+ dc.l $ff90222e,$ff5c0201,$0030f201,$9000f22e
+ dc.l $4823ff84,$f23c9000,$00000000,$60aaf22e
+ dc.l $d080ff90,$f22e9000,$ff5cf23c,$88000000
+ dc.l $0000f22e,$4823ff84,$f201a800,$f23c9000
+ dc.l $00000000,$83aeff64,$f2000098,$f23c58b8
+ dc.l $0002f293,$ff3c6000,$fee408ee,$0003ff66
+ dc.l $f22ed080,$ff90f23c,$90000000,$0010f23c
+ dc.l $88000000,$0000f22e,$4823ff84,$f201a800
+ dc.l $f23c9000,$00000000,$83aeff64,$122eff62
+ dc.l $0201000b,$6620f22e,$f080ff84,$41eeff84
+ dc.l $222eff5c,$61ffffff,$f3e8812e,$ff64f22e
+ dc.l $d080ff84,$4e75f22e,$d040ff90,$222eff5c
+ dc.l $020100c0,$6652f22e,$9000ff5c,$f23c8800
+ dc.l $00000000,$f22e48a3,$ff84f23c,$90000000
+ dc.l $0000f22e,$f040ff84,$2f02322e,$ff842401
+ dc.l $02810000,$7fff0242,$80009280,$06810000
+ dc.l $60000241,$7fff8242,$3d41ff84,$241ff22e
+ dc.l $d040ff84,$6000ff80,$222eff5c,$02010030
+ dc.l $f2019000,$60a6f22e,$d080ff90,$f22e9000
+ dc.l $ff5cf23c,$88000000,$0000f22e,$4823ff84
+ dc.l $f201a800,$f23c9000,$00000000,$83aeff64
+ dc.l $f2000098,$f23c58b8,$0002f292,$fde0f294
+ dc.l $fefaf22e,$d040ff90,$222eff5c,$020100c0
+ dc.l $00010010,$f2019000,$f23c8800,$00000000
+ dc.l $f22e48a3,$ff84f23c,$90000000,$0000f200
+ dc.l $0498f23c,$58b80002,$f293fda2,$6000febc
+ dc.l $323b120a,$4efb1006,$4afc0030,$fd120072
+ dc.l $00cc006c,$fd120066,$00000000,$00720072
+ dc.l $0060006c,$00720066,$00000000,$009e0060
+ dc.l $009e006c,$009e0066,$00000000,$006c006c
+ dc.l $006c006c,$006c0066,$00000000,$fd120072
+ dc.l $00cc006c,$fd120066,$00000000,$00660066
+ dc.l $00660066,$00660066,$00000000,$60ffffff
+ dc.l $ed6460ff,$ffffecda,$60ffffff,$ecd41028
+ dc.l $00001229,$0000b101,$6a10f23c,$44008000
+ dc.l $00001d7c,$000cff64,$4e75f23c,$44000000
+ dc.l $00001d7c,$0004ff64,$4e75f229,$d0800000
+ dc.l $10280000,$12290000,$b1016a10,$f2000018
+ dc.l $f200001a,$1d7c000a,$ff644e75,$f2000018
+ dc.l $1d7c0002,$ff644e75,$f228d080,$00001028
+ dc.l $00001229,$0000b101,$6ae260d0,$02000030
+ dc.l $00000040,$60080200,$00300000,$00802d40
+ dc.l $ff5c122e,$ff4e6600,$02620200,$00c06600
+ dc.l $007c4a28,$00006a06,$08ee0003,$ff64f228
+ dc.l $d0800000,$4e750200,$00c06600,$006008ee
+ dc.l $0003ff66,$4a280000,$6a0608ee,$0003ff64
+ dc.l $f228d080,$0000082e,$0003ff62,$66024e75
+ dc.l $3d680000,$ff842d68,$0004ff88,$2d680008
+ dc.l $ff8c41ee,$ff8461ff,$ffffef60,$44400640
+ dc.l $6000322e,$ff840241,$80000240,$7fff8041
+ dc.l $3d40ff84,$f22ed040,$ff844e75,$0c000040
+ dc.l $667e3d68,$0000ff84,$2d680004,$ff882d68
+ dc.l $0008ff8c,$61ffffff,$eac20c80,$0000007f
+ dc.l $6c000092,$0c80ffff,$ff816700,$01786d00
+ dc.l $00f4f23c,$88000000,$0000f22e,$9000ff5c
+ dc.l $f22e4800,$ff84f201,$a800f23c,$90000000
+ dc.l $000083ae,$ff642f02,$f22ef080,$ff84322e
+ dc.l $ff843401,$02810000,$7fff9280,$02428000
+ dc.l $84413d42,$ff84241f,$f22ed080,$ff844e75
+ dc.l $3d680000,$ff842d68,$0004ff88,$2d680008
+ dc.l $ff8c61ff,$ffffea44,$0c800000,$03ff6c00
+ dc.l $00140c80,$fffffc01,$670000fa,$6d000076
+ dc.l $6000ff80,$08ee0003,$ff664a2e,$ff846a06
+ dc.l $08ee0003,$ff64122e,$ff620201,$000b661a
+ dc.l $41eeff84,$222eff5c,$61ffffff,$f084812e
+ dc.l $ff64f22e,$d080ff84,$4e752d6e,$ff88ff94
+ dc.l $2d6eff8c,$ff98322e,$ff842f02,$34010281
+ dc.l $00007fff,$92800242,$80000681,$00006000
+ dc.l $02417fff,$84413d42,$ff90f22e,$d040ff90
+ dc.l $241f60ac,$f23c8800,$00000000,$f22e9000
+ dc.l $ff5cf22e,$4800ff84,$f23c9000,$00000000
+ dc.l $f201a800,$83aeff64,$00ae0000,$1048ff64
+ dc.l $122eff62,$02010013,$661c082e,$0003ff64
+ dc.l $56c1202e,$ff5c61ff,$fffff0f4,$812eff64
+ dc.l $f210d080,$4e752f02,$322eff84,$24010281
+ dc.l $00007fff,$02428000,$92800481,$00006000
+ dc.l $02417fff,$82423d41,$ff84241f,$f22ed040
+ dc.l $ff8460b6,$f23c8800,$00000000,$f22e9000
+ dc.l $ff5cf22e,$4800ff84,$f201a800,$f23c9000
+ dc.l $00000000,$83aeff64,$f2000098,$f23c58b8
+ dc.l $0002f293,$ff746000,$fe7e0c01,$00046700
+ dc.l $fdb60c01,$000567ff,$ffffe9ee,$0c010003
+ dc.l $67ffffff,$e9f8f228,$48000000,$f200a800
+ dc.l $e1981d40,$ff644e75,$51fc51fc,$51fc51fc
+ dc.l $00003fff,$0000007e,$000003fe,$ffffc001
+ dc.l $ffffff81,$fffffc01,$02000030,$00000040
+ dc.l $60080200,$00300000,$00802d40,$ff5c4241
+ dc.l $122eff4f,$e709822e,$ff4e6600,$02d63d69
+ dc.l $0000ff90,$2d690004,$ff942d69,$0008ff98
+ dc.l $3d680000,$ff842d68,$0004ff88,$2d680008
+ dc.l $ff8c61ff,$ffffe864,$2f0061ff,$ffffe908
+ dc.l $4497d197,$322eff5e,$ec09201f,$b0bb148e
+ dc.l $6f000074,$b0bb1520,$ff7a6700,$020c6e00
+ dc.l $013cf22e,$d080ff90,$f22e9000,$ff5cf23c
+ dc.l $88000000,$0000f22e,$4820ff84,$f201a800
+ dc.l $f23c9000,$00000000,$83aeff64,$f22ef080
+ dc.l $ff842f02,$322eff84,$24010281,$00007fff
+ dc.l $02428000,$92808242,$3d41ff84,$241ff22e
+ dc.l $d080ff84,$4e750000,$7fff0000,$407f0000
+ dc.l $43ff201f,$60c62f00,$f22ed080,$ff90f22e
+ dc.l $9000ff5c,$f23c8800,$00000000,$f22e4820
+ dc.l $ff84f200,$a800f23c,$90000000,$000081ae
+ dc.l $ff64f227,$e0013017,$dffc0000,$000c0280
+ dc.l $00007fff,$9097b0bb,$14ae6db6,$201f00ae
+ dc.l $00001048,$ff64122e,$ff620201,$0013661c
+ dc.l $082e0003,$ff6456c1,$202eff5c,$61ffffff
+ dc.l $eeee812e,$ff64f210,$d0804e75,$222eff5c
+ dc.l $020100c0,$6634f22e,$f080ff84,$2f02322e
+ dc.l $ff843401,$02810000,$7fff9280,$04810000
+ dc.l $60000241,$7fff0242,$80008242,$3d41ff84
+ dc.l $241ff22e,$d040ff84,$60a6f22e,$d080ff90
+ dc.l $222eff5c,$02010030,$f2019000,$f22e4820
+ dc.l $ff84f23c,$90000000,$000060aa,$08ee0003
+ dc.l $ff66f22e,$d080ff90,$f23c9000,$00000010
+ dc.l $f23c8800,$00000000,$f22e4820,$ff84f201
+ dc.l $a800f23c,$90000000,$000083ae,$ff64122e
+ dc.l $ff620201,$000b6620,$f22ef080,$ff8441ee
+ dc.l $ff84222e,$ff5c61ff,$ffffed36,$812eff64
+ dc.l $f22ed080,$ff844e75,$f22ed040,$ff90222e
+ dc.l $ff5c0201,$00c06652,$f22e9000,$ff5cf23c
+ dc.l $88000000,$0000f22e,$48a0ff84,$f23c9000
+ dc.l $00000000,$f22ef040,$ff842f02,$322eff84
+ dc.l $24010281,$00007fff,$02428000,$92800681
+ dc.l $00006000,$02417fff,$82423d41,$ff84241f
+ dc.l $f22ed040,$ff846000,$ff80222e,$ff5c0201
+ dc.l $0030f201,$900060a6,$f22ed080,$ff90f22e
+ dc.l $9000ff5c,$f23c8800,$00000000,$f22e4820
+ dc.l $ff84f201,$a800f23c,$90000000,$000083ae
+ dc.l $ff64f200,$0098f23c,$58b80001,$f292fdee
+ dc.l $f294fefa,$f22ed040,$ff90222e,$ff5c0201
+ dc.l $00c00001,$0010f201,$9000f23c,$88000000
+ dc.l $0000f22e,$48a0ff84,$f23c9000,$00000000
+ dc.l $f2000498,$f23c58b8,$0001f293,$fdb06000
+ dc.l $febc323b,$120a4efb,$10064afc,$0030fd20
+ dc.l $009e0072,$0060fd20,$00660000,$00000072
+ dc.l $006c0072,$00600072,$00660000,$000000d0
+ dc.l $00d0006c,$006000d0,$00660000,$00000060
+ dc.l $00600060,$00600060,$00660000,$0000fd20
+ dc.l $009e0072,$0060fd20,$00660000,$00000066
+ dc.l $00660066,$00660066,$00660000,$000060ff
+ dc.l $ffffe62e,$60ffffff,$e62860ff,$ffffe6a6
+ dc.l $10280000,$12290000,$b1016a10,$f23c4400
+ dc.l $80000000,$1d7c000c,$ff644e75,$f23c4400
+ dc.l $00000000,$1d7c0004,$ff644e75,$006e0410
+ dc.l $ff661028,$00001229,$0000b101,$6a10f23c
+ dc.l $4400ff80,$00001d7c,$000aff64,$4e75f23c
+ dc.l $44007f80,$00001d7c,$0002ff64,$4e751029
+ dc.l $00001228,$0000b101,$6a16f229,$d0800000
+ dc.l $f2000018,$f200001a,$1d7c000a,$ff644e75
+ dc.l $f229d080,$0000f200,$00181d7c,$0002ff64
+ dc.l $4e750200,$00300000,$00406008,$02000030
+ dc.l $00000080,$2d40ff5c,$122eff4e,$66000276
+ dc.l $020000c0,$66000090,$2d680004,$ff882d68
+ dc.l $0008ff8c,$30280000,$0a408000,$6a061d7c
+ dc.l $0008ff64,$3d40ff84,$f22ed080,$ff844e75
+ dc.l $020000c0,$666008ee,$0003ff66,$2d680004
+ dc.l $ff882d68,$0008ff8c,$30280000,$0a408000
+ dc.l $6a061d7c,$0008ff64,$3d40ff84,$f22ed080
+ dc.l $ff84082e,$0003ff62,$66024e75,$41eeff84
+ dc.l $61ffffff,$e8764440,$06406000,$322eff84
+ dc.l $02418000,$02407fff,$80413d40,$ff84f22e
+ dc.l $d040ff84,$4e750c00,$0040667e,$3d680000
+ dc.l $ff842d68,$0004ff88,$2d680008,$ff8c61ff
+ dc.l $ffffe3d8,$0c800000,$007f6c00,$00900c80
+ dc.l $ffffff81,$67000178,$6d0000f4,$f23c8800
+ dc.l $00000000,$f22e9000,$ff5cf22e,$481aff84
+ dc.l $f201a800,$f23c9000,$00000000,$83aeff64
+ dc.l $2f02f22e,$f080ff84,$322eff84,$34010281
+ dc.l $00007fff,$92800242,$80008441,$3d42ff84
+ dc.l $241ff22e,$d080ff84,$4e753d68,$0000ff84
+ dc.l $2d680004,$ff882d68,$0008ff8c,$61ffffff
+ dc.l $e35a0c80,$000003ff,$6c120c80,$fffffc01
+ dc.l $670000fc,$6d000078,$6000ff82,$08ee0003
+ dc.l $ff660a2e,$0080ff84,$6a0608ee,$0003ff64
+ dc.l $122eff62,$0201000b,$661a41ee,$ff84222e
+ dc.l $ff5c61ff,$ffffe99a,$812eff64,$f22ed080
+ dc.l $ff844e75,$2d6eff88,$ff942d6e,$ff8cff98
+ dc.l $322eff84,$2f022401,$02810000,$7fff0242
+ dc.l $80009280,$06810000,$60000241,$7fff8242
+ dc.l $3d41ff90,$f22ed040,$ff90241f,$60acf23c
+ dc.l $88000000,$0000f22e,$9000ff5c,$f22e481a
+ dc.l $ff84f23c,$90000000,$0000f201,$a80083ae
+ dc.l $ff6400ae,$00001048,$ff64122e,$ff620201
+ dc.l $0013661c,$082e0003,$ff6456c1,$202eff5c
+ dc.l $61ffffff,$ea0a812e,$ff64f210,$d0804e75
+ dc.l $2f02322e,$ff842401,$02810000,$7fff0242
+ dc.l $80009280,$04810000,$60000241,$7fff8242
+ dc.l $3d41ff84,$f22ed040,$ff84241f,$60b6f23c
+ dc.l $88000000,$0000f22e,$9000ff5c,$f22e481a
+ dc.l $ff84f201,$a800f23c,$90000000,$000083ae
+ dc.l $ff64f200,$0098f23c,$58b80002,$f293ff74
+ dc.l $6000fe7e,$0c010004,$6700fdb6,$0c010005
+ dc.l $67ffffff,$e3040c01,$000367ff,$ffffe30e
+ dc.l $f228481a,$0000f200,$a800e198,$1d40ff64
+ dc.l $4e75122e,$ff4e6610,$4a280000,$6b024e75
+ dc.l $1d7c0008,$ff644e75,$0c010001,$67400c01
+ dc.l $00026724,$0c010005,$67ffffff,$e2bc0c01
+ dc.l $000367ff,$ffffe2c6,$4a280000,$6b024e75
+ dc.l $1d7c0008,$ff644e75,$4a280000,$6b081d7c
+ dc.l $0002ff64,$4e751d7c,$000aff64,$4e754a28
+ dc.l $00006b08,$1d7c0004,$ff644e75,$1d7c000c
+ dc.l $ff644e75,$122eff4e,$66280200,$0030f200
+ dc.l $9000f23c,$88000000,$0000f228,$48010000
+ dc.l $f23c9000,$00000000,$f200a800,$81aeff64
+ dc.l $4e750c01,$0001672e,$0c010002,$674e0c01
+ dc.l $00046710,$0c010005,$67ffffff,$e22c60ff
+ dc.l $ffffe23a,$3d680000,$ff841d7c,$0080ff88
+ dc.l $41eeff84,$60a44a28,$00006b10,$f23c4400
+ dc.l $00000000,$1d7c0004,$ff644e75,$f23c4400
+ dc.l $80000000,$1d7c000c,$ff644e75,$f228d080
+ dc.l $00004a28,$00006b08,$1d7c0002,$ff644e75
+ dc.l $1d7c000a,$ff644e75,$122eff4e,$6618f23c
+ dc.l $88000000,$0000f228,$48030000,$f200a800
+ dc.l $81aeff64,$4e750c01,$0001672e,$0c010002
+ dc.l $674e0c01,$00046710,$0c010005,$67ffffff
+ dc.l $e19860ff,$ffffe1a6,$3d680000,$ff841d7c
+ dc.l $0080ff88,$41eeff84,$60b44a28,$00006b10
+ dc.l $f23c4400,$00000000,$1d7c0004,$ff644e75
+ dc.l $f23c4400,$80000000,$1d7c000c,$ff644e75
+ dc.l $f228d080,$00004a28,$00006b08,$1d7c0002
+ dc.l $ff644e75,$1d7c000a,$ff644e75,$02000030
+ dc.l $00000040,$60080200,$00300000,$00802d40
+ dc.l $ff5c122e,$ff4e6600,$025c0200,$00c0667e
+ dc.l $2d680004,$ff882d68,$0008ff8c,$32280000
+ dc.l $0881000f,$3d41ff84,$f22ed080,$ff844e75
+ dc.l $020000c0,$665808ee,$0003ff66,$2d680004
+ dc.l $ff882d68,$0008ff8c,$30280000,$0880000f
+ dc.l $3d40ff84,$f22ed080,$ff84082e,$0003ff62
+ dc.l $66024e75,$41eeff84,$61ffffff,$e41e4440
+ dc.l $06406000,$322eff84,$02418000,$02407fff
+ dc.l $80413d40,$ff84f22e,$d040ff84,$4e750c00
+ dc.l $0040667e,$3d680000,$ff842d68,$0004ff88
+ dc.l $2d680008,$ff8c61ff,$ffffdf80,$0c800000
+ dc.l $007f6c00,$00900c80,$ffffff81,$67000170
+ dc.l $6d0000ec,$f23c8800,$00000000,$f22e9000
+ dc.l $ff5cf22e,$4818ff84,$f201a800,$f23c9000
+ dc.l $00000000,$83aeff64,$2f02f22e,$f080ff84
+ dc.l $322eff84,$24010281,$00007fff,$92800242
+ dc.l $80008441,$3d42ff84,$241ff22e,$d080ff84
+ dc.l $4e753d68,$0000ff84,$2d680004,$ff882d68
+ dc.l $0008ff8c,$61ffffff,$df020c80,$000003ff
+ dc.l $6c120c80,$fffffc01,$670000f4,$6d000070
+ dc.l $6000ff82,$08ee0003,$ff6608ae,$0007ff84
+ dc.l $122eff62,$0201000b,$661a41ee,$ff84222e
+ dc.l $ff5c61ff,$ffffe54a,$812eff64,$f22ed080
+ dc.l $ff844e75,$2d6eff88,$ff942d6e,$ff8cff98
+ dc.l $322eff84,$2f022401,$02810000,$7fff0242
+ dc.l $80009280,$06810000,$60000241,$7fff8242
+ dc.l $3d41ff90,$f22ed040,$ff90241f,$60acf23c
+ dc.l $88000000,$0000f22e,$9000ff5c,$f22e4818
+ dc.l $ff84f23c,$90000000,$0000f201,$a80083ae
+ dc.l $ff6400ae,$00001048,$ff64122e,$ff620201
+ dc.l $0013661c,$082e0003,$ff6456c1,$202eff5c
+ dc.l $61ffffff,$e5ba812e,$ff64f210,$d0804e75
+ dc.l $2f02322e,$ff842401,$02810000,$7fff0242
+ dc.l $80009280,$04810000,$60000241,$7fff8242
+ dc.l $3d41ff84,$f22ed040,$ff84241f,$60b6f23c
+ dc.l $88000000,$0000f22e,$9000ff5c,$f22e4818
+ dc.l $ff84f201,$a800f23c,$90000000,$000083ae
+ dc.l $ff64f200,$0098f23c,$58b80002,$f293ff74
+ dc.l $6000fe86,$0c010004,$6700fdc6,$0c010005
+ dc.l $67ffffff,$deb40c01,$000367ff,$ffffdebe
+ dc.l $f2284818,$00000c01,$00026708,$1d7c0004
+ dc.l $ff644e75,$1d7c0002,$ff644e75,$4241122e
+ dc.l $ff4fe709,$822eff4e,$6618f229,$d0800000
+ dc.l $f2284838,$0000f200,$a800e198,$1d40ff64
+ dc.l $4e75323b,$120a4efb,$10064afc,$0030ffdc
+ dc.l $ffdcffdc,$006000f8,$006e0000,$0000ffdc
+ dc.l $ffdcffdc,$0060007c,$006e0000,$0000ffdc
+ dc.l $ffdcffdc,$0060007c,$006e0000,$00000060
+ dc.l $00600060,$00600060,$006e0000,$00000114
+ dc.l $009c009c,$006000bc,$006e0000,$0000006e
+ dc.l $006e006e,$006e006e,$006e0000,$000061ff
+ dc.l $ffffddde,$022e00f7,$ff644e75,$61ffffff
+ dc.l $ddd0022e,$00f7ff64,$4e753d68,$0000ff84
+ dc.l $20280004,$08c0001f,$2d40ff88,$2d680008
+ dc.l $ff8c41ee,$ff846000,$ff422d69,$0000ff84
+ dc.l $20290004,$08c0001f,$2d40ff88,$2d690008
+ dc.l $ff8c43ee,$ff846000,$ff223d69,$0000ff90
+ dc.l $3d680000,$ff842029,$000408c0,$001f2d40
+ dc.l $ff942028,$000408c0,$001f2d40,$ff882d69
+ dc.l $0008ff98,$2d680008,$ff8c43ee,$ff9041ee
+ dc.l $ff846000,$fee61028,$00001229,$0000b101
+ dc.l $6b00ff78,$4a006b02,$4e751d7c,$0008ff64
+ dc.l $4e751028,$00001229,$0000b101,$6b00ff7c
+ dc.l $4a006a02,$4e751d7c,$0008ff64,$4e752d40
+ dc.l $ff5c4241,$122eff4f,$e709822e,$ff4e6600
+ dc.l $02a03d69,$0000ff90,$2d690004,$ff942d69
+ dc.l $0008ff98,$3d680000,$ff842d68,$0004ff88
+ dc.l $2d680008,$ff8c61ff,$ffffdbf0,$2f0061ff
+ dc.l $ffffdc94,$d09f0c80,$ffffc001,$670000f8
+ dc.l $6d000064,$0c800000,$40006700,$01da6e00
+ dc.l $0122f22e,$d080ff90,$f22e9000,$ff5cf23c
+ dc.l $88000000,$0000f22e,$4827ff84,$f201a800
+ dc.l $f23c9000,$00000000,$83aeff64,$f22ef080
+ dc.l $ff842f02,$322eff84,$24010281,$00007fff
+ dc.l $02428000,$92808242,$3d41ff84,$241ff22e
+ dc.l $d080ff84,$4e75f22e,$d080ff90,$f22e9000
+ dc.l $ff5cf23c,$88000000,$0000f22e,$4827ff84
+ dc.l $f201a800,$f23c9000,$00000000,$83aeff64
+ dc.l $00ae0000,$1048ff64,$122eff62,$02010013
+ dc.l $6620082e,$0003ff64,$56c1202e,$ff5c0200
+ dc.l $003061ff,$ffffe2a8,$812eff64,$f210d080
+ dc.l $4e75f22e,$f080ff84,$2f02322e,$ff842401
+ dc.l $02810000,$7fff9280,$04810000,$60000241
+ dc.l $7fff0242,$80008242,$3d41ff84,$241ff22e
+ dc.l $d040ff84,$60acf22e,$d080ff90,$f22e9000
+ dc.l $ff5cf23c,$88000000,$0000f22e,$4827ff84
+ dc.l $f201a800,$f23c9000,$00000000,$83aeff64
+ dc.l $f2000098,$f23c58b8,$0002f293,$ff646000
+ dc.l $ff0c08ee,$0003ff66,$f22ed080,$ff90f23c
+ dc.l $90000000,$0010f23c,$88000000,$0000f22e
+ dc.l $4827ff84,$f201a800,$f23c9000,$00000000
+ dc.l $83aeff64,$122eff62,$0201000b,$6620f22e
+ dc.l $f080ff84,$41eeff84,$222eff5c,$61ffffff
+ dc.l $e166812e,$ff64f22e,$d080ff84,$4e75f22e
+ dc.l $d040ff90,$f22e9000,$ff5cf23c,$88000000
+ dc.l $0000f22e,$48a7ff84,$f23c9000,$00000000
+ dc.l $f22ef040,$ff842f02,$322eff84,$24010281
+ dc.l $00007fff,$02428000,$92800681,$00006000
+ dc.l $02417fff,$82423d41,$ff84241f,$f22ed040
+ dc.l $ff846000,$ff8af22e,$d080ff90,$f22e9000
+ dc.l $ff5cf23c,$88000000,$0000f22e,$4827ff84
+ dc.l $f201a800,$f23c9000,$00000000,$83aeff64
+ dc.l $f2000098,$f23c58b8,$0002f292,$fe20f294
+ dc.l $ff12f22e,$d040ff90,$222eff5c,$020100c0
+ dc.l $00010010,$f2019000,$f23c8800,$00000000
+ dc.l $f22e48a7,$ff84f23c,$90000000,$0000f200
+ dc.l $0498f23c,$58b80002,$f293fde2,$6000fed4
+ dc.l $323b120a,$4efb1006,$4afc0030,$fd560072
+ dc.l $0078006c,$fd560066,$00000000,$00720072
+ dc.l $0060006c,$00720066,$00000000,$007e0060
+ dc.l $007e006c,$007e0066,$00000000,$006c006c
+ dc.l $006c006c,$006c0066,$00000000,$fd560072
+ dc.l $0078006c,$fd560066,$00000000,$00660066
+ dc.l $00660066,$00660066,$00000000,$60ffffff
+ dc.l $da7460ff,$ffffd9ea,$60ffffff,$d9e460ff
+ dc.l $ffffed0e,$60ffffff,$ed6260ff,$ffffed2e
+ dc.l $2d40ff5c,$4241122e,$ff4fe709,$822eff4e
+ dc.l $6600027c,$3d690000,$ff902d69,$0004ff94
+ dc.l $2d690008,$ff983d68,$0000ff84,$2d680004
+ dc.l $ff882d68,$0008ff8c,$61ffffff,$d8ae2f00
+ dc.l $61ffffff,$d9524497,$d197322e,$ff5eec09
+ dc.l $201f0c80,$ffffc001,$6f000064,$0c800000
+ dc.l $3fff6700,$01b66e00,$0100f22e,$d080ff90
+ dc.l $f22e9000,$ff5cf23c,$88000000,$0000f22e
+ dc.l $4824ff84,$f201a800,$f23c9000,$00000000
+ dc.l $83aeff64,$f22ef080,$ff842f02,$322eff84
+ dc.l $24010281,$00007fff,$02428000,$92808242
+ dc.l $3d41ff84,$241ff22e,$d080ff84,$4e75f22e
+ dc.l $d080ff90,$f22e9000,$ff5cf23c,$88000000
+ dc.l $0000f22e,$4824ff84,$f201a800,$f23c9000
+ dc.l $00000000,$83aeff64,$f227e001,$3217dffc
+ dc.l $0000000c,$02810000,$7fff9280,$0c810000
+ dc.l $7fff6d90,$006e1048,$ff66122e,$ff620201
+ dc.l $00136620,$082e0003,$ff6456c1,$202eff5c
+ dc.l $02000030,$61ffffff,$df46812e,$ff64f210
+ dc.l $d0804e75,$f22ef080,$ff842f02,$322eff84
+ dc.l $24010281,$00007fff,$02428000,$92800481
+ dc.l $00006000,$02417fff,$82423d41,$ff84241f
+ dc.l $f22ed040,$ff8460ac,$08ee0003,$ff66f22e
+ dc.l $d080ff90,$f23c9000,$00000010,$f23c8800
+ dc.l $00000000,$f22e4824,$ff84f201,$a800f23c
+ dc.l $90000000,$000083ae,$ff64122e,$ff620201
+ dc.l $000b6620,$f22ef080,$ff8441ee,$ff84222e
+ dc.l $ff5c61ff,$ffffde40,$812eff64,$f22ed080
+ dc.l $ff844e75,$f22ed040,$ff90f22e,$9000ff5c
+ dc.l $f23c8800,$00000000,$f22e48a4,$ff84f23c
+ dc.l $90000000,$0000f22e,$f040ff84,$2f02322e
+ dc.l $ff842401,$02810000,$7fff0242,$80009280
+ dc.l $06810000,$60000241,$7fff8242,$3d41ff84
+ dc.l $241ff22e,$d040ff84,$608af22e,$d080ff90
+ dc.l $f22e9000,$ff5cf23c,$88000000,$0000f22e
+ dc.l $4824ff84,$f201a800,$f23c9000,$00000000
+ dc.l $83aeff64,$f2000098,$f23c58b8,$0001f292
+ dc.l $fe44f294,$ff14f22e,$d040ff90,$42810001
+ dc.l $0010f201,$9000f23c,$88000000,$0000f22e
+ dc.l $48a4ff84,$f23c9000,$00000000,$f2000498
+ dc.l $f23c58b8,$0001f293,$fe0c6000,$fedc323b
+ dc.l $120a4efb,$10064afc,$0030fd7a,$00720078
+ dc.l $0060fd7a,$00660000,$00000078,$006c0078
+ dc.l $00600078,$00660000,$0000007e,$007e006c
+ dc.l $0060007e,$00660000,$00000060,$00600060
+ dc.l $00600060,$00660000,$0000fd7a,$00720078
+ dc.l $0060fd7a,$00660000,$00000066,$00660066
+ dc.l $00660066,$00660000,$000060ff,$ffffd6d2
+ dc.l $60ffffff,$d6cc60ff,$ffffd74a,$60ffffff
+ dc.l $f0ce60ff,$fffff09c,$60ffffff,$f0f40200
+ dc.l $00300000,$00406008,$02000030,$00000080
+ dc.l $2d40ff5c,$4241122e,$ff4fe709,$822eff4e
+ dc.l $6600024c,$61ffffff,$d4b2f22e,$d080ff90
+ dc.l $f23c8800,$00000000,$f22e9000,$ff5cf22e
+ dc.l $4822ff84,$f23c9000,$00000000,$f201a800
+ dc.l $83aeff64,$f281003c,$2f02f227,$e001322e
+ dc.l $ff5eec09,$34170282,$00007fff,$9480b4bb
+ dc.l $14246c38,$b4bb142a,$6d0000b8,$67000184
+ dc.l $32170241,$80008242,$3e81f21f,$d080241f
+ dc.l $4e754e75,$00007fff,$0000407f,$000043ff
+ dc.l $00000000,$00003f81,$00003c01,$00ae0000
+ dc.l $1048ff64,$122eff62,$02010013,$6624dffc
+ dc.l $0000000c,$082e0003,$ff6456c1,$202eff5c
+ dc.l $61ffffff,$dc7a812e,$ff64f210,$d080241f
+ dc.l $4e75122e,$ff5c0201,$00c0661a,$32170241
+ dc.l $80000482,$00006000,$02427fff,$82423e81
+ dc.l $f21fd040,$60bef22e,$d080ff90,$222eff5c
+ dc.l $02010030,$f2019000,$f22e4822,$ff84f23c
+ dc.l $90000000,$0000dffc,$0000000c,$f227e001
+ dc.l $60ba08ee,$0003ff66,$dffc0000,$000cf22e
+ dc.l $d080ff90,$f23c9000,$00000010,$f23c8800
+ dc.l $00000000,$f22e4822,$ff84f23c,$90000000
+ dc.l $0000f201,$a80083ae,$ff64122e,$ff620201
+ dc.l $000b6622,$f22ef080,$ff8441ee,$ff84222e
+ dc.l $ff5c61ff,$ffffdaca,$812eff64,$f22ed080
+ dc.l $ff84241f,$4e75f22e,$d040ff90,$222eff5c
+ dc.l $020100c0,$664ef22e,$9000ff5c,$f23c8800
+ dc.l $00000000,$f22e48a2,$ff84f23c,$90000000
+ dc.l $0000f22e,$f040ff84,$322eff84,$24010281
+ dc.l $00007fff,$02428000,$92800681,$00006000
+ dc.l $02417fff,$82423d41,$ff84f22e,$d040ff84
+ dc.l $6000ff82,$222eff5c,$02010030,$f2019000
+ dc.l $60aa222e,$ff5c0201,$00c06700,$fe74222f
+ dc.l $00040c81,$80000000,$6600fe66,$4aaf0008
+ dc.l $6600fe5e,$082e0001,$ff666700,$fe54f22e
+ dc.l $d040ff90,$222eff5c,$020100c0,$00010010
+ dc.l $f2019000,$f23c8800,$00000000,$f22e48a2
+ dc.l $ff84f23c,$90000000,$0000f200,$0018f200
+ dc.l $0498f200,$0438f292,$feca6000,$fe14323b
+ dc.l $120a4efb,$10064afc,$0030fdaa,$00e4011c
+ dc.l $0060fdaa,$00660000,$000000bc,$006c011c
+ dc.l $006000bc,$00660000,$00000130,$0130010c
+ dc.l $00600130,$00660000,$00000060,$00600060
+ dc.l $00600060,$00660000,$0000fdaa,$00e4011c
+ dc.l $0060fdaa,$00660000,$00000066,$00660066
+ dc.l $00660066,$00660000,$000060ff,$ffffd3d2
+ dc.l $60ffffff,$d3cc1028,$00001229,$0000b101
+ dc.l $6b000016,$4a006b2e,$f23c4400,$00000000
+ dc.l $1d7c0004,$ff644e75,$122eff5f,$02010030
+ dc.l $0c010020,$6710f23c,$44000000,$00001d7c
+ dc.l $0004ff64,$4e75f23c,$44008000,$00001d7c
+ dc.l $000cff64,$4e753d68,$0000ff84,$2d680004
+ dc.l $ff882d68,$0008ff8c,$61ffffff,$d27e426e
+ dc.l $ff9042ae,$ff9442ae,$ff986000,$fcce3d69
+ dc.l $0000ff90,$2d690004,$ff942d69,$0008ff98
+ dc.l $61ffffff,$d302426e,$ff8442ae,$ff8842ae
+ dc.l $ff8c6000,$fca61028,$00001229,$0000b300
+ dc.l $6bffffff,$d3a0f228,$d0800000,$4a280000
+ dc.l $6a1c1d7c,$000aff64,$4e75f229,$d0800000
+ dc.l $4a290000,$6a081d7c,$000aff64,$4e751d7c
+ dc.l $0002ff64,$4e750200,$00300000,$00406008
+ dc.l $02000030,$00000080,$2d40ff5c,$4241122e
+ dc.l $ff4fe709,$822eff4e,$6600024c,$61ffffff
+ dc.l $d0eaf22e,$d080ff90,$f23c8800,$00000000
+ dc.l $f22e9000,$ff5cf22e,$4828ff84,$f23c9000
+ dc.l $00000000,$f201a800,$83aeff64,$f281003c
+ dc.l $2f02f227,$e001322e,$ff5eec09,$34170282
+ dc.l $00007fff,$9480b4bb,$14246c38,$b4bb142a
+ dc.l $6d0000b8,$67000184,$32170241,$80008242
+ dc.l $3e81f21f,$d080241f,$4e754e75,$00007fff
+ dc.l $0000407f,$000043ff,$00000000,$00003f81
+ dc.l $00003c01,$00ae0000,$1048ff64,$122eff62
+ dc.l $02010013,$6624dffc,$0000000c,$082e0003
+ dc.l $ff6456c1,$202eff5c,$61ffffff,$d8b2812e
+ dc.l $ff64f210,$d080241f,$4e75122e,$ff5c0201
+ dc.l $00c0661a,$32170241,$80000482,$00006000
+ dc.l $02427fff,$82423e81,$f21fd040,$60bef22e
+ dc.l $d080ff90,$222eff5c,$02010030,$f2019000
+ dc.l $f22e4828,$ff84f23c,$90000000,$0000dffc
+ dc.l $0000000c,$f227e001,$60ba08ee,$0003ff66
+ dc.l $dffc0000,$000cf22e,$d080ff90,$f23c9000
+ dc.l $00000010,$f23c8800,$00000000,$f22e4828
+ dc.l $ff84f23c,$90000000,$0000f201,$a80083ae
+ dc.l $ff64122e,$ff620201,$000b6622,$f22ef080
+ dc.l $ff8441ee,$ff84222e,$ff5c61ff,$ffffd702
+ dc.l $812eff64,$f22ed080,$ff84241f,$4e75f22e
+ dc.l $d040ff90,$222eff5c,$020100c0,$664ef22e
+ dc.l $9000ff5c,$f23c8800,$00000000,$f22e48a8
+ dc.l $ff84f23c,$90000000,$0000f22e,$f040ff84
+ dc.l $322eff84,$24010281,$00007fff,$02428000
+ dc.l $92800681,$00006000,$02417fff,$82423d41
+ dc.l $ff84f22e,$d040ff84,$6000ff82,$222eff5c
+ dc.l $02010030,$f2019000,$60aa222e,$ff5c0201
+ dc.l $00c06700,$fe74222f,$00040c81,$80000000
+ dc.l $6600fe66,$4aaf0008,$6600fe5e,$082e0001
+ dc.l $ff666700,$fe54f22e,$d040ff90,$222eff5c
+ dc.l $020100c0,$00010010,$f2019000,$f23c8800
+ dc.l $00000000,$f22e48a8,$ff84f23c,$90000000
+ dc.l $0000f200,$0018f200,$0498f200,$0438f292
+ dc.l $feca6000,$fe14323b,$120a4efb,$10064afc
+ dc.l $0030fdaa,$00e2011a,$0060fdaa,$00660000
+ dc.l $000000ba,$006c011a,$006000ba,$00660000
+ dc.l $00000130,$0130010a,$00600130,$00660000
+ dc.l $00000060,$00600060,$00600060,$00660000
+ dc.l $0000fdaa,$00e2011a,$0060fdaa,$00660000
+ dc.l $00000066,$00660066,$00660066,$00660000
+ dc.l $000060ff,$ffffd00a,$60ffffff,$d0041028
+ dc.l $00001229,$0000b300,$6a144a00,$6b2ef23c
+ dc.l $44000000,$00001d7c,$0004ff64,$4e75122e
+ dc.l $ff5f0201,$00300c01,$00206710,$f23c4400
+ dc.l $00000000,$1d7c0004,$ff644e75,$f23c4400
+ dc.l $80000000,$1d7c000c,$ff644e75,$3d680000
+ dc.l $ff842d68,$0004ff88,$2d680008,$ff8c61ff
+ dc.l $ffffceb8,$426eff90,$42aeff94,$42aeff98
+ dc.l $6000fcd0,$3d690000,$ff902d69,$0004ff94
+ dc.l $2d690008,$ff9861ff,$ffffcf3c,$426eff84
+ dc.l $42aeff88,$42aeff8c,$6000fca8,$10280000
+ dc.l $12290000,$b3006aff,$ffffcfda,$f228d080
+ dc.l $0000f200,$001af293,$001e1d7c,$000aff64
+ dc.l $4e75f229,$d0800000,$4a290000,$6a081d7c
+ dc.l $000aff64,$4e751d7c,$0002ff64,$4e750200
+ dc.l $00300000,$00406008,$02000030,$00000080
+ dc.l $2d40ff5c,$4241122e,$ff4e6600,$02744a28
+ dc.l $00006bff,$ffffcf7e,$020000c0,$6648f22e
+ dc.l $9000ff5c,$f23c8800,$00000000,$f2104804
+ dc.l $f201a800,$83aeff64,$4e754a28,$00006bff
+ dc.l $ffffcf52,$020000c0,$661c3d68,$0000ff84
+ dc.l $2d680004,$ff882d68,$0008ff8c,$61ffffff
+ dc.l $ce046000,$003e0c00,$00406600,$00843d68
+ dc.l $0000ff84,$2d680004,$ff882d68,$0008ff8c
+ dc.l $61ffffff,$cde00c80,$0000007e,$67000098
+ dc.l $6e00009e,$0c80ffff,$ff806700,$01a46d00
+ dc.l $0120f23c,$88000000,$0000f22e,$9000ff5c
+ dc.l $f22e4804,$ff84f201,$a800f23c,$90000000
+ dc.l $000083ae,$ff642f02,$f22ef080,$ff84322e
+ dc.l $ff842401,$02810000,$7fff9280,$02428000
+ dc.l $84413d42,$ff84241f,$f22ed080,$ff844e75
+ dc.l $3d680000,$ff842d68,$0004ff88,$2d680008
+ dc.l $ff8c61ff,$ffffcd5e,$0c800000,$03fe6700
+ dc.l $00166e1c,$0c80ffff,$fc006700,$01246d00
+ dc.l $00a06000,$ff7e082e,$0000ff85,$6600ff74
+ dc.l $08ee0003,$ff66f23c,$90000000,$0010f23c
+ dc.l $88000000,$0000f22e,$4804ff84,$f201a800
+ dc.l $f23c9000,$00000000,$83aeff64,$122eff62
+ dc.l $0201000b,$6620f22e,$f080ff84,$41eeff84
+ dc.l $222eff5c,$61ffffff,$d338812e,$ff64f22e
+ dc.l $d080ff84,$4e752d6e,$ff88ff94,$2d6eff8c
+ dc.l $ff98322e,$ff842f02,$24010281,$00007fff
+ dc.l $02428000,$92800681,$00006000,$02417fff
+ dc.l $82423d41,$ff90f22e,$d040ff90,$241f60a6
+ dc.l $f23c8800,$00000000,$f22e9000,$ff5cf22e
+ dc.l $4804ff84,$f23c9000,$00000000,$f201a800
+ dc.l $83aeff64,$00ae0000,$1048ff64,$122eff62
+ dc.l $02010013,$661c082e,$0003ff64,$56c1202e
+ dc.l $ff5c61ff,$ffffd3a8,$812eff64,$f210d080
+ dc.l $4e752f02,$322eff84,$24010281,$00007fff
+ dc.l $02428000,$92800481,$00006000,$02417fff
+ dc.l $82423d41,$ff84f22e,$d040ff84,$241f60b6
+ dc.l $082e0000,$ff856600,$ff78f23c,$88000000
+ dc.l $0000f22e,$9000ff5c,$f22e4804,$ff84f201
+ dc.l $a800f23c,$90000000,$000083ae,$ff64f200
+ dc.l $0080f23c,$58b80001,$f293ff6a,$6000fe48
+ dc.l $0c010004,$6700fdb4,$0c010001,$67160c01
+ dc.l $00026736,$0c010005,$67ffffff,$cc8c60ff
+ dc.l $ffffcc9a,$4a280000,$6b10f23c,$44000000
+ dc.l $00001d7c,$0004ff64,$4e75f23c,$44008000
+ dc.l $00001d7c,$000cff64,$4e754a28,$00006bff
+ dc.l $ffffccc2,$f228d080,$00001d7c,$0002ff64
+ dc.l $4e75303b,$12064efb,$00020020,$0026002c
+ dc.l $00300034,$0038003c,$00400044,$004a0050
+ dc.l $00540058,$005c0060,$0064202e,$ff9c4e75
+ dc.l $202effa0,$4e752002,$4e752003,$4e752004
+ dc.l $4e752005,$4e752006,$4e752007,$4e75202e
+ dc.l $ffa44e75,$202effa8,$4e75200a,$4e75200b
+ dc.l $4e75200c,$4e75200d,$4e752016,$4e75202e
+ dc.l $ffd84e75,$323b1206,$4efb1002,$00100016
+ dc.l $001c0020,$00240028,$002c0030,$2d40ff9c
+ dc.l $4e752d40,$ffa04e75,$24004e75,$26004e75
+ dc.l $28004e75,$2a004e75,$2c004e75,$2e004e75
+ dc.l $323b1206,$4efb1002,$00100016,$001c0020
+ dc.l $00240028,$002c0030,$3d40ff9e,$4e753d40
+ dc.l $ffa24e75,$34004e75,$36004e75,$38004e75
+ dc.l $3a004e75,$3c004e75,$3e004e75,$323b1206
+ dc.l $4efb1002,$00100016,$001c0020,$00240028
+ dc.l $002c0030,$1d40ff9f,$4e751d40,$ffa34e75
+ dc.l $14004e75,$16004e75,$18004e75,$1a004e75
+ dc.l $1c004e75,$1e004e75,$323b1206,$4efb1002
+ dc.l $00100016,$001c0020,$00240028,$002c0030
+ dc.l $d1aeffa4,$4e75d1ae,$ffa84e75,$d5c04e75
+ dc.l $d7c04e75,$d9c04e75,$dbc04e75,$d1964e75
+ dc.l $1d7c0004,$ff4a0c00,$00016706,$d1aeffd8
+ dc.l $4e7554ae,$ffd84e75,$323b1206,$4efb1002
+ dc.l $00100016,$001c0020,$00240028,$002c0030
+ dc.l $91aeffa4,$4e7591ae,$ffa84e75,$95c04e75
+ dc.l $97c04e75,$99c04e75,$9bc04e75,$91964e75
+ dc.l $1d7c0008,$ff4a0c00,$00016706,$91aeffd8
+ dc.l $4e7555ae,$ffd84e75,$303b0206,$4efb0002
+ dc.l $00100028,$0040004c,$00580064,$0070007c
+ dc.l $2d6effdc,$ff6c2d6e,$ffe0ff70,$2d6effe4
+ dc.l $ff7441ee,$ff6c4e75,$2d6effe8,$ff6c2d6e
+ dc.l $ffecff70,$2d6efff0,$ff7441ee,$ff6c4e75
+ dc.l $f22ef020,$ff6c41ee,$ff6c4e75,$f22ef010
+ dc.l $ff6c41ee,$ff6c4e75,$f22ef008,$ff6c41ee
+ dc.l $ff6c4e75,$f22ef004,$ff6c41ee,$ff6c4e75
+ dc.l $f22ef002,$ff6c41ee,$ff6c4e75,$f22ef001
+ dc.l $ff6c41ee,$ff6c4e75,$303b0206,$4efb0002
+ dc.l $00100028,$0040004c,$00580064,$0070007c
+ dc.l $2d6effdc,$ff782d6e,$ffe0ff7c,$2d6effe4
+ dc.l $ff8041ee,$ff784e75,$2d6effe8,$ff782d6e
+ dc.l $ffecff7c,$2d6efff0,$ff8041ee,$ff784e75
+ dc.l $f22ef020,$ff7841ee,$ff784e75,$f22ef010
+ dc.l $ff7841ee,$ff784e75,$f22ef008,$ff7841ee
+ dc.l $ff784e75,$f22ef004,$ff7841ee,$ff784e75
+ dc.l $f22ef002,$ff7841ee,$ff784e75,$f22ef001
+ dc.l $ff7841ee,$ff784e75,$303b0206,$4efb0002
+ dc.l $00100018,$0020002a,$0034003e,$00480052
+ dc.l $f22ef080,$ffdc4e75,$f22ef080,$ffe84e75
+ dc.l $f227e001,$f21fd020,$4e75f227,$e001f21f
+ dc.l $d0104e75,$f227e001,$f21fd008,$4e75f227
+ dc.l $e001f21f,$d0044e75,$f227e001,$f21fd002
+ dc.l $4e75f227,$e001f21f,$d0014e75,$700c61ff
+ dc.l $ffffbace,$43eeff6c,$700c61ff,$ffffa0d8
+ dc.l $4a8166ff,$00000a14,$e9ee004f,$ff6c0c40
+ dc.l $7fff6602,$4e75102e,$ff6f0200,$000f660e
+ dc.l $4aaeff70,$66084aae,$ff746602,$4e7541ee
+ dc.l $ff6c61ff,$0000001c,$f22ef080,$ff6c4e75
+ dc.l $00000000,$02030203,$02030302,$03020203
+ dc.l $2d680000,$ff842d68,$0004ff88,$2d680008
+ dc.l $ff8c41ee,$ff8448e7,$3c00f227,$e0017402
+ dc.l $76042810,$42814c3c,$10010000,$000ae9c4
+ dc.l $08c4d280,$580351ca,$ffee0804,$001e6702
+ dc.l $44810481,$00000010,$6c0e4481,$00844000
+ dc.l $00000090,$40000000,$2f017201,$f23c4400
+ dc.l $00000000,$e9d00704,$f2005822,$28301c00
+ dc.l $76007407,$f23c4423,$41200000,$e9c408c4
+ dc.l $f2005822,$580351ca,$ffec5281,$0c810000
+ dc.l $00026fd8,$0810001f,$6704f200,$001a2217
+ dc.l $0c810000,$001b6f00,$00e40810,$001e6674
+ dc.l $42812810,$e9c40704,$66245281,$7a012830
+ dc.l $5c006608,$50815285,$28305c00,$42837407
+ dc.l $e9c408c4,$66085883,$528151ca,$fff42001
+ dc.l $22179280,$6c104481,$28100084,$40000000
+ dc.l $00904000,$000043fb,$01700000,$06664283
+ dc.l $f23c4480,$3f800000,$7403e280,$6406f231
+ dc.l $48a33800,$06830000,$000c4a80,$66ecf200
+ dc.l $04236068,$42817a02,$28305c00,$66085385
+ dc.l $50812830,$5c00761c,$7407e9c4,$08c46608
+ dc.l $59835281,$51cafff4,$20012217,$92806e10
+ dc.l $44812810,$0284bfff,$ffff0290,$bfffffff
+ dc.l $43fb0170,$000005fc,$4283f23c,$44803f80
+ dc.l $00007403,$e2806406,$f23148a3,$38000683
+ dc.l $0000000c,$4a8066ec,$f2000420,$262eff60
+ dc.l $e9c32682,$2810e582,$e9c40002,$d48043fa
+ dc.l $fe501031,$28004283,$efc30682,$f2039000
+ dc.l $e280640a,$43fb0170,$00000644,$6016e280
+ dc.l $640a43fb,$01700000,$06d26008,$43fb0170
+ dc.l $00000590,$20016a08,$44800090,$40000000
+ dc.l $4283f23c,$44803f80,$0000e280,$6406f231
+ dc.l $48a33800,$06830000,$000c4a80,$66ec0810
+ dc.l $001e6706,$f2000420,$6004f200,$0423f200
+ dc.l $a8000880,$00096706,$006e0108,$ff66588f
+ dc.l $f21fd040,$4cdf003c,$f23c9000,$00000000
+ dc.l $f23c8800,$00000000,$4e753ffd,$00009a20
+ dc.l $9a84fbcf,$f7980000,$00003ffd,$00009a20
+ dc.l $9a84fbcf,$f7990000,$00003f80,$00000000
+ dc.l $00000000,$00000000,$00004000,$00000000
+ dc.l $00000000,$00000000,$00004120,$00000000
+ dc.l $00000000,$00000000,$0000459a,$28000000
+ dc.l $00000000,$00000000,$00000000,$00000303
+ dc.l $02020302,$02030203,$030248e7,$3f20f227
+ dc.l $e007f23c,$90000000,$00202d50,$ff582e00
+ dc.l $422eff50,$0c2e0004,$ff4e6600,$00303010
+ dc.l $02407fff,$22280004,$24280008,$5340e38a
+ dc.l $e3914a81,$6cf64a40,$6e0450ee,$ff500240
+ dc.l $7fff3080,$21410004,$21420008,$2d50ff90
+ dc.l $2d680004,$ff942d68,$0008ff98,$02ae7fff
+ dc.l $ffffff90,$4a2eff50,$67082c3c,$ffffecbb
+ dc.l $6038302e,$ff903d7c,$3fffff90,$f22e4800
+ dc.l $ff900440,$3ffff200,$5022f23a,$4428ff1c
+ dc.l $f293000e,$f23a4823,$ff02f206,$6000600a
+ dc.l $f23a4823,$fee6f206,$6000f23c,$88000000
+ dc.l $00004245,$4a876f04,$28076006,$28069887
+ dc.l $52844a84,$6f180c84,$00000011,$6f127811
+ dc.l $4a876f0c,$00ae0000,$2080ff64,$60027801
+ dc.l $4a876e06,$be866d02,$2c072006,$52809084
+ dc.l $48454245,$42424a80,$6c145245,$0c80ffff
+ dc.l $ecd46e08,$06800000,$00187418,$4480f23a
+ dc.l $4480fe98,$e9ee1682,$ff60e349,$d245e349
+ dc.l $4aaeff58,$6c025281,$45fafec0,$16321800
+ dc.l $e98bf203,$9000e88b,$4a03660a,$43fb0170
+ dc.l $00000370,$6016e20b,$640a43fb,$01700000
+ dc.l $03fe6008,$43fb0170,$00000490,$4283e288
+ dc.l $6406f231,$48a33800,$06830000,$000c4a80
+ dc.l $66ecf23c,$88000000,$0000f23c,$90000000
+ dc.l $0010f210,$4800f200,$00184a45,$6608f200
+ dc.l $04206000,$008e4a2e,$ff506700,$0072f227
+ dc.l $e0023617,$02437fff,$00508000,$d6500443
+ dc.l $3fffd669,$00240443,$3fffd669,$00300443
+ dc.l $3fff6b00,$00480257,$80008757,$02507fff
+ dc.l $2f280008,$2f280004,$2f3c3fff,$0000f21f
+ dc.l $d080f21f,$48232f29,$002c2f29,$00282f3c
+ dc.l $3fff0000,$2f290038,$2f290034,$2f3c3fff
+ dc.l $0000f21f,$4823f21f,$48236016,$60fe4a42
+ dc.l $670cf229,$48230024,$f2294823,$0030f200
+ dc.l $0423f200,$a800f22e,$6800ff90,$45eeff90
+ dc.l $08000009,$670e00aa,$00000001,$0008f22e
+ dc.l $4800ff90,$2d6eff60,$ff5402ae,$00000030
+ dc.l $ff6048e7,$c0c02f2e,$ff542f2e,$ff5841ee
+ dc.l $ff90f210,$68004aae,$ff586c06,$00908000
+ dc.l $00002f2e,$ff64f22e,$9000ff60,$f23c8800
+ dc.l $00000000,$f22e4801,$ff90f200,$a800816e
+ dc.l $ff661d57,$ff64588f,$2d5fff58,$2d5fff54
+ dc.l $4cdf0303,$2d6eff58,$ff902d6e,$ff54ff60
+ dc.l $48454a45,$66ff0000,$0086f23a,$4500fcec
+ dc.l $20045380,$4283e288,$6406f231,$49233800
+ dc.l $06830000,$000c4a80,$66ec4a2e,$ff50670a
+ dc.l $f2000018,$60ff0000,$0028f200,$0018f200
+ dc.l $0838f293,$001a5386,$3a3c0001,$f23c9000
+ dc.l $00000020,$f23a4523,$fcc26000,$fda8f23a
+ dc.l $4523fcb8,$f2000838,$f294005c,$f292000c
+ dc.l $f23a4420,$fca65286,$604c5286,$3a3c0001
+ dc.l $f23c9000,$00000020,$6000fd7a,$f23a4500
+ dc.l $fc6a2004,$4283e288,$6406f231,$49233800
+ dc.l $06830000,$000c4a80,$66ecf200,$0018f200
+ dc.l $0838f28e,$0012f23a,$4420fc60,$52865284
+ dc.l $f23a4523,$fc56f23c,$90000000,$0010f200
+ dc.l $082041ee,$ff84f210,$68002428,$00042628
+ dc.l $000842a8,$000442a8,$00082010,$48406714
+ dc.l $04800000,$3ffd4a80,$6e0a4480,$e28ae293
+ dc.l $51c8fffa,$4a826604,$4a836710,$42810683
+ dc.l $00000080,$d5810283,$ffffff80,$20045688
+ dc.l $61ff0000,$02b04a2e,$ff506728,$f200003a
+ dc.l $f281000c,$f2064000,$f2000018,$602e4a87
+ dc.l $6d08f23a,$4400fbe4,$6022f206,$4000f200
+ dc.l $00186018,$f200003a,$f28e000a,$f23a4400
+ dc.l $fb9a6008,$f2064000,$f2000018,$f2294820
+ dc.l $0018f22e,$6800ff90,$242a0004,$262a0008
+ dc.l $3012670e,$04403ffd,$4440e28a,$e29351c8
+ dc.l $fffa4281,$06830000,$0080d581,$0283ffff
+ dc.l $ff807004,$41eeff54,$61ff0000,$0228202e
+ dc.l $ff54720c,$e2a8efee,$010cff84,$e2a8efee
+ dc.l $0404ff84,$4a006708,$00ae0000,$2080ff64
+ dc.l $4280022e,$000fff84,$4aaeff58,$6c027002
+ dc.l $4a866c02,$5280efee,$0002ff84,$f23c8800
+ dc.l $00000000,$f21fd0e0,$4cdf04fc,$4e754002
+ dc.l $0000a000,$00000000,$00004005,$0000c800
+ dc.l $00000000,$0000400c,$00009c40,$00000000
+ dc.l $00004019,$0000bebc,$20000000,$00004034
+ dc.l $00008e1b,$c9bf0400,$00004069,$00009dc5
+ dc.l $ada82b70,$b59e40d3,$0000c278,$1f49ffcf
+ dc.l $a6d541a8,$000093ba,$47c980e9,$8ce04351
+ dc.l $0000aa7e,$ebfb9df9,$de8e46a3,$0000e319
+ dc.l $a0aea60e,$91c74d48,$0000c976,$75868175
+ dc.l $0c175a92,$00009e8b,$3b5dc53d,$5de57525
+ dc.l $0000c460,$52028a20,$979b4002,$0000a000
+ dc.l $00000000,$00004005,$0000c800,$00000000
+ dc.l $0000400c,$00009c40,$00000000,$00004019
+ dc.l $0000bebc,$20000000,$00004034,$00008e1b
+ dc.l $c9bf0400,$00004069,$00009dc5,$ada82b70
+ dc.l $b59e40d3,$0000c278,$1f49ffcf,$a6d641a8
+ dc.l $000093ba,$47c980e9,$8ce04351,$0000aa7e
+ dc.l $ebfb9df9,$de8e46a3,$0000e319,$a0aea60e
+ dc.l $91c74d48,$0000c976,$75868175,$0c185a92
+ dc.l $00009e8b,$3b5dc53d,$5de57525,$0000c460
+ dc.l $52028a20,$979b4002,$0000a000,$00000000
+ dc.l $00004005,$0000c800,$00000000,$0000400c
+ dc.l $00009c40,$00000000,$00004019,$0000bebc
+ dc.l $20000000,$00004034,$00008e1b,$c9bf0400
+ dc.l $00004069,$00009dc5,$ada82b70,$b59d40d3
+ dc.l $0000c278,$1f49ffcf,$a6d541a8,$000093ba
+ dc.l $47c980e9,$8cdf4351,$0000aa7e,$ebfb9df9
+ dc.l $de8d46a3,$0000e319,$a0aea60e,$91c64d48
+ dc.l $0000c976,$75868175,$0c175a92,$00009e8b
+ dc.l $3b5dc53d,$5de47525,$0000c460,$52028a20
+ dc.l $979a48e7,$ff007e01,$53802802,$2a03e9c2
+ dc.l $1003e782,$e9c36003,$e7838486,$e385e394
+ dc.l $4846d346,$d6854e71,$d5844e71,$d3464846
+ dc.l $4a476712,$4847e947,$de4110c7,$48474247
+ dc.l $51c8ffc8,$60124847,$3e014847,$524751c8
+ dc.l $ffba4847,$e94f10c7,$4cdf00ff,$4e757001
+ dc.l $610000d6,$3d7c0121,$000a6000,$007e7002
+ dc.l $610000c6,$3d7c0141,$000a606e,$70046100
+ dc.l $00b83d7c,$0101000a,$60607008,$610000aa
+ dc.l $3d7c0161,$000a6052,$700c6100,$009c3d7c
+ dc.l $0161000a,$60447001,$6100008e,$3d7c00a1
+ dc.l $000a6036,$70026100,$00803d7c,$00c1000a
+ dc.l $60287004,$61000072,$3d7c0081,$000a601a
+ dc.l $70086100,$00643d7c,$00e1000a,$600c700c
+ dc.l $61000056,$3d7c00e1,$000a2d6e,$ff680006
+ dc.l $f22ed0c0,$ffdcf22e,$9c00ff60,$4cee0303
+ dc.l $ff9c4e5e,$2f172f6f,$00080004,$2f6f000c
+ dc.l $00082f7c,$00000001,$000c3f6f,$0006000c
+ dc.l $3f7c4008,$00060817,$00056706,$08ef0002
+ dc.l $000d60ff,$ffff95f4,$122eff41,$02010038
+ dc.l $0c010018,$6700000c,$0c010020,$67000060
+ dc.l $4e75122e,$ff410241,$0007323b,$12064efb
+ dc.l $10020010,$0016001c,$00200024,$0028002c
+ dc.l $003091ae,$ffa44e75,$91aeffa8,$4e7595c0
+ dc.l $4e7597c0,$4e7599c0,$4e759bc0,$4e759196
+ dc.l $4e750c2e,$0030000a,$6612082e,$00050004
+ dc.l $660a4e7a,$880091c0,$4e7b8800,$4e754480
+ dc.l $60a051fc,$00000000,$00000000,$00000000
diff --git a/arch/m68k/ifpsp060/src/README-SRC b/arch/m68k/ifpsp060/src/README-SRC
new file mode 100644
index 00000000000..6be5cff2a6a
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/README-SRC
@@ -0,0 +1,12 @@
+This is the original source code from Motorola for the 68060 processor
+support code, providing emulation for rarely used m68k instructions
+not implemented in the 68060 silicon.
+
+The code provided here will not assemble out of the box using the GNU
+assembler, however it is being included in order to comply with the
+GNU General Public License.
+
+You don't need to actually assemble these files in order to compile a
+workin m68k kernel, the precompiled .sa files in arch/m68k/ifpsp060
+are sufficient and were generated from these source files by
+Motorola.
diff --git a/arch/m68k/ifpsp060/src/fplsp.S b/arch/m68k/ifpsp060/src/fplsp.S
new file mode 100644
index 00000000000..fdb79b927ef
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/fplsp.S
@@ -0,0 +1,10980 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# lfptop.s:
+# This file is appended to the top of the 060ILSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located here.
+#
+
+ bra.l _facoss_
+ short 0x0000
+ bra.l _facosd_
+ short 0x0000
+ bra.l _facosx_
+ short 0x0000
+
+ bra.l _fasins_
+ short 0x0000
+ bra.l _fasind_
+ short 0x0000
+ bra.l _fasinx_
+ short 0x0000
+
+ bra.l _fatans_
+ short 0x0000
+ bra.l _fatand_
+ short 0x0000
+ bra.l _fatanx_
+ short 0x0000
+
+ bra.l _fatanhs_
+ short 0x0000
+ bra.l _fatanhd_
+ short 0x0000
+ bra.l _fatanhx_
+ short 0x0000
+
+ bra.l _fcoss_
+ short 0x0000
+ bra.l _fcosd_
+ short 0x0000
+ bra.l _fcosx_
+ short 0x0000
+
+ bra.l _fcoshs_
+ short 0x0000
+ bra.l _fcoshd_
+ short 0x0000
+ bra.l _fcoshx_
+ short 0x0000
+
+ bra.l _fetoxs_
+ short 0x0000
+ bra.l _fetoxd_
+ short 0x0000
+ bra.l _fetoxx_
+ short 0x0000
+
+ bra.l _fetoxm1s_
+ short 0x0000
+ bra.l _fetoxm1d_
+ short 0x0000
+ bra.l _fetoxm1x_
+ short 0x0000
+
+ bra.l _fgetexps_
+ short 0x0000
+ bra.l _fgetexpd_
+ short 0x0000
+ bra.l _fgetexpx_
+ short 0x0000
+
+ bra.l _fgetmans_
+ short 0x0000
+ bra.l _fgetmand_
+ short 0x0000
+ bra.l _fgetmanx_
+ short 0x0000
+
+ bra.l _flog10s_
+ short 0x0000
+ bra.l _flog10d_
+ short 0x0000
+ bra.l _flog10x_
+ short 0x0000
+
+ bra.l _flog2s_
+ short 0x0000
+ bra.l _flog2d_
+ short 0x0000
+ bra.l _flog2x_
+ short 0x0000
+
+ bra.l _flogns_
+ short 0x0000
+ bra.l _flognd_
+ short 0x0000
+ bra.l _flognx_
+ short 0x0000
+
+ bra.l _flognp1s_
+ short 0x0000
+ bra.l _flognp1d_
+ short 0x0000
+ bra.l _flognp1x_
+ short 0x0000
+
+ bra.l _fmods_
+ short 0x0000
+ bra.l _fmodd_
+ short 0x0000
+ bra.l _fmodx_
+ short 0x0000
+
+ bra.l _frems_
+ short 0x0000
+ bra.l _fremd_
+ short 0x0000
+ bra.l _fremx_
+ short 0x0000
+
+ bra.l _fscales_
+ short 0x0000
+ bra.l _fscaled_
+ short 0x0000
+ bra.l _fscalex_
+ short 0x0000
+
+ bra.l _fsins_
+ short 0x0000
+ bra.l _fsind_
+ short 0x0000
+ bra.l _fsinx_
+ short 0x0000
+
+ bra.l _fsincoss_
+ short 0x0000
+ bra.l _fsincosd_
+ short 0x0000
+ bra.l _fsincosx_
+ short 0x0000
+
+ bra.l _fsinhs_
+ short 0x0000
+ bra.l _fsinhd_
+ short 0x0000
+ bra.l _fsinhx_
+ short 0x0000
+
+ bra.l _ftans_
+ short 0x0000
+ bra.l _ftand_
+ short 0x0000
+ bra.l _ftanx_
+ short 0x0000
+
+ bra.l _ftanhs_
+ short 0x0000
+ bra.l _ftanhd_
+ short 0x0000
+ bra.l _ftanhx_
+ short 0x0000
+
+ bra.l _ftentoxs_
+ short 0x0000
+ bra.l _ftentoxd_
+ short 0x0000
+ bra.l _ftentoxx_
+ short 0x0000
+
+ bra.l _ftwotoxs_
+ short 0x0000
+ bra.l _ftwotoxd_
+ short 0x0000
+ bra.l _ftwotoxx_
+ short 0x0000
+
+ bra.l _fabss_
+ short 0x0000
+ bra.l _fabsd_
+ short 0x0000
+ bra.l _fabsx_
+ short 0x0000
+
+ bra.l _fadds_
+ short 0x0000
+ bra.l _faddd_
+ short 0x0000
+ bra.l _faddx_
+ short 0x0000
+
+ bra.l _fdivs_
+ short 0x0000
+ bra.l _fdivd_
+ short 0x0000
+ bra.l _fdivx_
+ short 0x0000
+
+ bra.l _fints_
+ short 0x0000
+ bra.l _fintd_
+ short 0x0000
+ bra.l _fintx_
+ short 0x0000
+
+ bra.l _fintrzs_
+ short 0x0000
+ bra.l _fintrzd_
+ short 0x0000
+ bra.l _fintrzx_
+ short 0x0000
+
+ bra.l _fmuls_
+ short 0x0000
+ bra.l _fmuld_
+ short 0x0000
+ bra.l _fmulx_
+ short 0x0000
+
+ bra.l _fnegs_
+ short 0x0000
+ bra.l _fnegd_
+ short 0x0000
+ bra.l _fnegx_
+ short 0x0000
+
+ bra.l _fsqrts_
+ short 0x0000
+ bra.l _fsqrtd_
+ short 0x0000
+ bra.l _fsqrtx_
+ short 0x0000
+
+ bra.l _fsubs_
+ short 0x0000
+ bra.l _fsubd_
+ short 0x0000
+ bra.l _fsubx_
+ short 0x0000
+
+# leave room for future possible additions
+ align 0x400
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE, 192 # stack frame size(bytes)
+set LV, -LOCAL_SIZE # stack offset
+
+set EXC_SR, 0x4 # stack status register
+set EXC_PC, 0x6 # stack pc
+set EXC_VOFF, 0xa # stacked vector offset
+set EXC_EA, 0xc # stacked <ea>
+
+set EXC_FP, 0x0 # frame pointer
+
+set EXC_AREGS, -68 # offset of all address regs
+set EXC_DREGS, -100 # offset of all data regs
+set EXC_FPREGS, -36 # offset of all fp regs
+
+set EXC_A7, EXC_AREGS+(7*4) # offset of saved a7
+set OLD_A7, EXC_AREGS+(6*4) # extra copy of saved a7
+set EXC_A6, EXC_AREGS+(6*4) # offset of saved a6
+set EXC_A5, EXC_AREGS+(5*4)
+set EXC_A4, EXC_AREGS+(4*4)
+set EXC_A3, EXC_AREGS+(3*4)
+set EXC_A2, EXC_AREGS+(2*4)
+set EXC_A1, EXC_AREGS+(1*4)
+set EXC_A0, EXC_AREGS+(0*4)
+set EXC_D7, EXC_DREGS+(7*4)
+set EXC_D6, EXC_DREGS+(6*4)
+set EXC_D5, EXC_DREGS+(5*4)
+set EXC_D4, EXC_DREGS+(4*4)
+set EXC_D3, EXC_DREGS+(3*4)
+set EXC_D2, EXC_DREGS+(2*4)
+set EXC_D1, EXC_DREGS+(1*4)
+set EXC_D0, EXC_DREGS+(0*4)
+
+set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
+set EXC_FP1, EXC_FPREGS+(1*12) # offset of saved fp1
+set EXC_FP2, EXC_FPREGS+(2*12) # offset of saved fp2 (not used)
+
+set FP_SCR1, LV+80 # fp scratch 1
+set FP_SCR1_EX, FP_SCR1+0
+set FP_SCR1_SGN, FP_SCR1+2
+set FP_SCR1_HI, FP_SCR1+4
+set FP_SCR1_LO, FP_SCR1+8
+
+set FP_SCR0, LV+68 # fp scratch 0
+set FP_SCR0_EX, FP_SCR0+0
+set FP_SCR0_SGN, FP_SCR0+2
+set FP_SCR0_HI, FP_SCR0+4
+set FP_SCR0_LO, FP_SCR0+8
+
+set FP_DST, LV+56 # fp destination operand
+set FP_DST_EX, FP_DST+0
+set FP_DST_SGN, FP_DST+2
+set FP_DST_HI, FP_DST+4
+set FP_DST_LO, FP_DST+8
+
+set FP_SRC, LV+44 # fp source operand
+set FP_SRC_EX, FP_SRC+0
+set FP_SRC_SGN, FP_SRC+2
+set FP_SRC_HI, FP_SRC+4
+set FP_SRC_LO, FP_SRC+8
+
+set USER_FPIAR, LV+40 # FP instr address register
+
+set USER_FPSR, LV+36 # FP status register
+set FPSR_CC, USER_FPSR+0 # FPSR condition codes
+set FPSR_QBYTE, USER_FPSR+1 # FPSR qoutient byte
+set FPSR_EXCEPT, USER_FPSR+2 # FPSR exception status byte
+set FPSR_AEXCEPT, USER_FPSR+3 # FPSR accrued exception byte
+
+set USER_FPCR, LV+32 # FP control register
+set FPCR_ENABLE, USER_FPCR+2 # FPCR exception enable
+set FPCR_MODE, USER_FPCR+3 # FPCR rounding mode control
+
+set L_SCR3, LV+28 # integer scratch 3
+set L_SCR2, LV+24 # integer scratch 2
+set L_SCR1, LV+20 # integer scratch 1
+
+set STORE_FLG, LV+19 # flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2, LV+24 # temporary space
+set EXC_TEMP, LV+16 # temporary space
+
+set DTAG, LV+15 # destination operand type
+set STAG, LV+14 # source operand type
+
+set SPCOND_FLG, LV+10 # flag: special case (see below)
+
+set EXC_CC, LV+8 # saved condition codes
+set EXC_EXTWPTR, LV+4 # saved current PC (active)
+set EXC_EXTWORD, LV+2 # saved extension word
+set EXC_CMDREG, LV+2 # saved extension word
+set EXC_OPWORD, LV+0 # saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP, 0 # offsets within an
+set FTEMP_EX, 0 # extended precision
+set FTEMP_SGN, 2 # value saved in memory.
+set FTEMP_HI, 4
+set FTEMP_LO, 8
+set FTEMP_GRS, 12
+
+set LOCAL, 0 # offsets within an
+set LOCAL_EX, 0 # extended precision
+set LOCAL_SGN, 2 # value saved in memory.
+set LOCAL_HI, 4
+set LOCAL_LO, 8
+set LOCAL_GRS, 12
+
+set DST, 0 # offsets within an
+set DST_EX, 0 # extended precision
+set DST_HI, 4 # value saved in memory.
+set DST_LO, 8
+
+set SRC, 0 # offsets within an
+set SRC_EX, 0 # extended precision
+set SRC_HI, 4 # value saved in memory.
+set SRC_LO, 8
+
+set SGL_LO, 0x3f81 # min sgl prec exponent
+set SGL_HI, 0x407e # max sgl prec exponent
+set DBL_LO, 0x3c01 # min dbl prec exponent
+set DBL_HI, 0x43fe # max dbl prec exponent
+set EXT_LO, 0x0 # min ext prec exponent
+set EXT_HI, 0x7ffe # max ext prec exponent
+
+set EXT_BIAS, 0x3fff # extended precision bias
+set SGL_BIAS, 0x007f # single precision bias
+set DBL_BIAS, 0x03ff # double precision bias
+
+set NORM, 0x00 # operand type for STAG/DTAG
+set ZERO, 0x01 # operand type for STAG/DTAG
+set INF, 0x02 # operand type for STAG/DTAG
+set QNAN, 0x03 # operand type for STAG/DTAG
+set DENORM, 0x04 # operand type for STAG/DTAG
+set SNAN, 0x05 # operand type for STAG/DTAG
+set UNNORM, 0x06 # operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit, 0x3 # negative result
+set z_bit, 0x2 # zero result
+set inf_bit, 0x1 # infinite result
+set nan_bit, 0x0 # NAN result
+
+set q_sn_bit, 0x7 # sign bit of quotient byte
+
+set bsun_bit, 7 # branch on unordered
+set snan_bit, 6 # signalling NAN
+set operr_bit, 5 # operand error
+set ovfl_bit, 4 # overflow
+set unfl_bit, 3 # underflow
+set dz_bit, 2 # divide by zero
+set inex2_bit, 1 # inexact result 2
+set inex1_bit, 0 # inexact result 1
+
+set aiop_bit, 7 # accrued inexact operation bit
+set aovfl_bit, 6 # accrued overflow bit
+set aunfl_bit, 5 # accrued underflow bit
+set adz_bit, 4 # accrued dz bit
+set ainex_bit, 3 # accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask, 0x08000000 # negative bit mask (lw)
+set inf_mask, 0x02000000 # infinity bit mask (lw)
+set z_mask, 0x04000000 # zero bit mask (lw)
+set nan_mask, 0x01000000 # nan bit mask (lw)
+
+set neg_bmask, 0x08 # negative bit mask (byte)
+set inf_bmask, 0x02 # infinity bit mask (byte)
+set z_bmask, 0x04 # zero bit mask (byte)
+set nan_bmask, 0x01 # nan bit mask (byte)
+
+set bsun_mask, 0x00008000 # bsun exception mask
+set snan_mask, 0x00004000 # snan exception mask
+set operr_mask, 0x00002000 # operr exception mask
+set ovfl_mask, 0x00001000 # overflow exception mask
+set unfl_mask, 0x00000800 # underflow exception mask
+set dz_mask, 0x00000400 # dz exception mask
+set inex2_mask, 0x00000200 # inex2 exception mask
+set inex1_mask, 0x00000100 # inex1 exception mask
+
+set aiop_mask, 0x00000080 # accrued illegal operation
+set aovfl_mask, 0x00000040 # accrued overflow
+set aunfl_mask, 0x00000020 # accrued underflow
+set adz_mask, 0x00000010 # accrued divide by zero
+set ainex_mask, 0x00000008 # accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask, inf_mask+dz_mask+adz_mask
+set opnan_mask, nan_mask+operr_mask+aiop_mask
+set nzi_mask, 0x01ffffff #clears N, Z, and I
+set unfinx_mask, unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask, unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask, ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask, inex1_mask+ainex_mask
+set inx2a_mask, inex2_mask+ainex_mask
+set snaniop_mask, nan_mask+snan_mask+aiop_mask
+set snaniop2_mask, snan_mask+aiop_mask
+set naniop_mask, nan_mask+aiop_mask
+set neginf_mask, neg_mask+inf_mask
+set infaiop_mask, inf_mask+aiop_mask
+set negz_mask, neg_mask+z_mask
+set opaop_mask, operr_mask+aiop_mask
+set unfl_inx_mask, unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask, ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit, 29 # stky bit pos in longword
+
+set sign_bit, 0x7 # sign bit
+set signan_bit, 0x6 # signalling nan bit
+
+set sgl_thresh, 0x3f81 # minimum sgl exponent
+set dbl_thresh, 0x3c01 # minimum dbl exponent
+
+set x_mode, 0x0 # extended precision
+set s_mode, 0x4 # single precision
+set d_mode, 0x8 # double precision
+
+set rn_mode, 0x0 # round-to-nearest
+set rz_mode, 0x1 # round-to-zero
+set rm_mode, 0x2 # round-tp-minus-infinity
+set rp_mode, 0x3 # round-to-plus-infinity
+
+set mantissalen, 64 # length of mantissa in bits
+
+set BYTE, 1 # len(byte) == 1 byte
+set WORD, 2 # len(word) == 2 bytes
+set LONG, 4 # len(longword) == 2 bytes
+
+set BSUN_VEC, 0xc0 # bsun vector offset
+set INEX_VEC, 0xc4 # inexact vector offset
+set DZ_VEC, 0xc8 # dz vector offset
+set UNFL_VEC, 0xcc # unfl vector offset
+set OPERR_VEC, 0xd0 # operr vector offset
+set OVFL_VEC, 0xd4 # ovfl vector offset
+set SNAN_VEC, 0xd8 # snan vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
+set fbsun_flg, 0x02 # flag bit: bsun exception
+set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
+set mda7_flg, 0x08 # flag bit: -(a7) <ea>
+set fmovm_flg, 0x40 # flag bit: fmovm instruction
+set immed_flg, 0x80 # flag bit: &<data> <ea>
+
+set ftrapcc_bit, 0x0
+set fbsun_bit, 0x1
+set mia7_bit, 0x2
+set mda7_bit, 0x3
+set immed_bit, 0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP, 0x0 # fmul instr performed last
+set FDIV_OP, 0x1 # fdiv performed last
+set FADD_OP, 0x2 # fadd performed last
+set FMOV_OP, 0x3 # fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
+T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
+
+PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+ long 0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fsins_
+_fsins_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L0_2s
+ bsr.l ssin # operand is a NORM
+ bra.b _L0_6s
+_L0_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L0_3s # no
+ bsr.l src_zero # yes
+ bra.b _L0_6s
+_L0_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L0_4s # no
+ bsr.l t_operr # yes
+ bra.b _L0_6s
+_L0_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L0_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L0_6s
+_L0_5s:
+ bsr.l ssind # operand is a DENORM
+_L0_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fsind_
+_fsind_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L0_2d
+ bsr.l ssin # operand is a NORM
+ bra.b _L0_6d
+_L0_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L0_3d # no
+ bsr.l src_zero # yes
+ bra.b _L0_6d
+_L0_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L0_4d # no
+ bsr.l t_operr # yes
+ bra.b _L0_6d
+_L0_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L0_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L0_6d
+_L0_5d:
+ bsr.l ssind # operand is a DENORM
+_L0_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fsinx_
+_fsinx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L0_2x
+ bsr.l ssin # operand is a NORM
+ bra.b _L0_6x
+_L0_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L0_3x # no
+ bsr.l src_zero # yes
+ bra.b _L0_6x
+_L0_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L0_4x # no
+ bsr.l t_operr # yes
+ bra.b _L0_6x
+_L0_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L0_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L0_6x
+_L0_5x:
+ bsr.l ssind # operand is a DENORM
+_L0_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fcoss_
+_fcoss_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L1_2s
+ bsr.l scos # operand is a NORM
+ bra.b _L1_6s
+_L1_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L1_3s # no
+ bsr.l ld_pone # yes
+ bra.b _L1_6s
+_L1_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L1_4s # no
+ bsr.l t_operr # yes
+ bra.b _L1_6s
+_L1_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L1_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L1_6s
+_L1_5s:
+ bsr.l scosd # operand is a DENORM
+_L1_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fcosd_
+_fcosd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L1_2d
+ bsr.l scos # operand is a NORM
+ bra.b _L1_6d
+_L1_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L1_3d # no
+ bsr.l ld_pone # yes
+ bra.b _L1_6d
+_L1_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L1_4d # no
+ bsr.l t_operr # yes
+ bra.b _L1_6d
+_L1_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L1_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L1_6d
+_L1_5d:
+ bsr.l scosd # operand is a DENORM
+_L1_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fcosx_
+_fcosx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L1_2x
+ bsr.l scos # operand is a NORM
+ bra.b _L1_6x
+_L1_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L1_3x # no
+ bsr.l ld_pone # yes
+ bra.b _L1_6x
+_L1_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L1_4x # no
+ bsr.l t_operr # yes
+ bra.b _L1_6x
+_L1_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L1_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L1_6x
+_L1_5x:
+ bsr.l scosd # operand is a DENORM
+_L1_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fsinhs_
+_fsinhs_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L2_2s
+ bsr.l ssinh # operand is a NORM
+ bra.b _L2_6s
+_L2_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L2_3s # no
+ bsr.l src_zero # yes
+ bra.b _L2_6s
+_L2_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L2_4s # no
+ bsr.l src_inf # yes
+ bra.b _L2_6s
+_L2_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L2_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L2_6s
+_L2_5s:
+ bsr.l ssinhd # operand is a DENORM
+_L2_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fsinhd_
+_fsinhd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L2_2d
+ bsr.l ssinh # operand is a NORM
+ bra.b _L2_6d
+_L2_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L2_3d # no
+ bsr.l src_zero # yes
+ bra.b _L2_6d
+_L2_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L2_4d # no
+ bsr.l src_inf # yes
+ bra.b _L2_6d
+_L2_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L2_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L2_6d
+_L2_5d:
+ bsr.l ssinhd # operand is a DENORM
+_L2_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fsinhx_
+_fsinhx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L2_2x
+ bsr.l ssinh # operand is a NORM
+ bra.b _L2_6x
+_L2_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L2_3x # no
+ bsr.l src_zero # yes
+ bra.b _L2_6x
+_L2_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L2_4x # no
+ bsr.l src_inf # yes
+ bra.b _L2_6x
+_L2_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L2_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L2_6x
+_L2_5x:
+ bsr.l ssinhd # operand is a DENORM
+_L2_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _flognp1s_
+_flognp1s_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L3_2s
+ bsr.l slognp1 # operand is a NORM
+ bra.b _L3_6s
+_L3_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L3_3s # no
+ bsr.l src_zero # yes
+ bra.b _L3_6s
+_L3_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L3_4s # no
+ bsr.l sopr_inf # yes
+ bra.b _L3_6s
+_L3_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L3_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L3_6s
+_L3_5s:
+ bsr.l slognp1d # operand is a DENORM
+_L3_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _flognp1d_
+_flognp1d_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L3_2d
+ bsr.l slognp1 # operand is a NORM
+ bra.b _L3_6d
+_L3_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L3_3d # no
+ bsr.l src_zero # yes
+ bra.b _L3_6d
+_L3_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L3_4d # no
+ bsr.l sopr_inf # yes
+ bra.b _L3_6d
+_L3_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L3_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L3_6d
+_L3_5d:
+ bsr.l slognp1d # operand is a DENORM
+_L3_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _flognp1x_
+_flognp1x_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L3_2x
+ bsr.l slognp1 # operand is a NORM
+ bra.b _L3_6x
+_L3_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L3_3x # no
+ bsr.l src_zero # yes
+ bra.b _L3_6x
+_L3_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L3_4x # no
+ bsr.l sopr_inf # yes
+ bra.b _L3_6x
+_L3_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L3_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L3_6x
+_L3_5x:
+ bsr.l slognp1d # operand is a DENORM
+_L3_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fetoxm1s_
+_fetoxm1s_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L4_2s
+ bsr.l setoxm1 # operand is a NORM
+ bra.b _L4_6s
+_L4_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L4_3s # no
+ bsr.l src_zero # yes
+ bra.b _L4_6s
+_L4_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L4_4s # no
+ bsr.l setoxm1i # yes
+ bra.b _L4_6s
+_L4_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L4_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L4_6s
+_L4_5s:
+ bsr.l setoxm1d # operand is a DENORM
+_L4_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fetoxm1d_
+_fetoxm1d_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L4_2d
+ bsr.l setoxm1 # operand is a NORM
+ bra.b _L4_6d
+_L4_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L4_3d # no
+ bsr.l src_zero # yes
+ bra.b _L4_6d
+_L4_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L4_4d # no
+ bsr.l setoxm1i # yes
+ bra.b _L4_6d
+_L4_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L4_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L4_6d
+_L4_5d:
+ bsr.l setoxm1d # operand is a DENORM
+_L4_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fetoxm1x_
+_fetoxm1x_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L4_2x
+ bsr.l setoxm1 # operand is a NORM
+ bra.b _L4_6x
+_L4_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L4_3x # no
+ bsr.l src_zero # yes
+ bra.b _L4_6x
+_L4_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L4_4x # no
+ bsr.l setoxm1i # yes
+ bra.b _L4_6x
+_L4_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L4_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L4_6x
+_L4_5x:
+ bsr.l setoxm1d # operand is a DENORM
+_L4_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _ftanhs_
+_ftanhs_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L5_2s
+ bsr.l stanh # operand is a NORM
+ bra.b _L5_6s
+_L5_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L5_3s # no
+ bsr.l src_zero # yes
+ bra.b _L5_6s
+_L5_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L5_4s # no
+ bsr.l src_one # yes
+ bra.b _L5_6s
+_L5_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L5_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L5_6s
+_L5_5s:
+ bsr.l stanhd # operand is a DENORM
+_L5_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _ftanhd_
+_ftanhd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L5_2d
+ bsr.l stanh # operand is a NORM
+ bra.b _L5_6d
+_L5_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L5_3d # no
+ bsr.l src_zero # yes
+ bra.b _L5_6d
+_L5_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L5_4d # no
+ bsr.l src_one # yes
+ bra.b _L5_6d
+_L5_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L5_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L5_6d
+_L5_5d:
+ bsr.l stanhd # operand is a DENORM
+_L5_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _ftanhx_
+_ftanhx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L5_2x
+ bsr.l stanh # operand is a NORM
+ bra.b _L5_6x
+_L5_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L5_3x # no
+ bsr.l src_zero # yes
+ bra.b _L5_6x
+_L5_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L5_4x # no
+ bsr.l src_one # yes
+ bra.b _L5_6x
+_L5_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L5_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L5_6x
+_L5_5x:
+ bsr.l stanhd # operand is a DENORM
+_L5_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fatans_
+_fatans_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L6_2s
+ bsr.l satan # operand is a NORM
+ bra.b _L6_6s
+_L6_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L6_3s # no
+ bsr.l src_zero # yes
+ bra.b _L6_6s
+_L6_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L6_4s # no
+ bsr.l spi_2 # yes
+ bra.b _L6_6s
+_L6_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L6_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L6_6s
+_L6_5s:
+ bsr.l satand # operand is a DENORM
+_L6_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fatand_
+_fatand_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L6_2d
+ bsr.l satan # operand is a NORM
+ bra.b _L6_6d
+_L6_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L6_3d # no
+ bsr.l src_zero # yes
+ bra.b _L6_6d
+_L6_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L6_4d # no
+ bsr.l spi_2 # yes
+ bra.b _L6_6d
+_L6_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L6_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L6_6d
+_L6_5d:
+ bsr.l satand # operand is a DENORM
+_L6_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fatanx_
+_fatanx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L6_2x
+ bsr.l satan # operand is a NORM
+ bra.b _L6_6x
+_L6_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L6_3x # no
+ bsr.l src_zero # yes
+ bra.b _L6_6x
+_L6_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L6_4x # no
+ bsr.l spi_2 # yes
+ bra.b _L6_6x
+_L6_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L6_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L6_6x
+_L6_5x:
+ bsr.l satand # operand is a DENORM
+_L6_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fasins_
+_fasins_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L7_2s
+ bsr.l sasin # operand is a NORM
+ bra.b _L7_6s
+_L7_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L7_3s # no
+ bsr.l src_zero # yes
+ bra.b _L7_6s
+_L7_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L7_4s # no
+ bsr.l t_operr # yes
+ bra.b _L7_6s
+_L7_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L7_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L7_6s
+_L7_5s:
+ bsr.l sasind # operand is a DENORM
+_L7_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fasind_
+_fasind_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L7_2d
+ bsr.l sasin # operand is a NORM
+ bra.b _L7_6d
+_L7_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L7_3d # no
+ bsr.l src_zero # yes
+ bra.b _L7_6d
+_L7_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L7_4d # no
+ bsr.l t_operr # yes
+ bra.b _L7_6d
+_L7_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L7_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L7_6d
+_L7_5d:
+ bsr.l sasind # operand is a DENORM
+_L7_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fasinx_
+_fasinx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L7_2x
+ bsr.l sasin # operand is a NORM
+ bra.b _L7_6x
+_L7_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L7_3x # no
+ bsr.l src_zero # yes
+ bra.b _L7_6x
+_L7_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L7_4x # no
+ bsr.l t_operr # yes
+ bra.b _L7_6x
+_L7_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L7_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L7_6x
+_L7_5x:
+ bsr.l sasind # operand is a DENORM
+_L7_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fatanhs_
+_fatanhs_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L8_2s
+ bsr.l satanh # operand is a NORM
+ bra.b _L8_6s
+_L8_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L8_3s # no
+ bsr.l src_zero # yes
+ bra.b _L8_6s
+_L8_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L8_4s # no
+ bsr.l t_operr # yes
+ bra.b _L8_6s
+_L8_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L8_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L8_6s
+_L8_5s:
+ bsr.l satanhd # operand is a DENORM
+_L8_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fatanhd_
+_fatanhd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L8_2d
+ bsr.l satanh # operand is a NORM
+ bra.b _L8_6d
+_L8_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L8_3d # no
+ bsr.l src_zero # yes
+ bra.b _L8_6d
+_L8_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L8_4d # no
+ bsr.l t_operr # yes
+ bra.b _L8_6d
+_L8_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L8_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L8_6d
+_L8_5d:
+ bsr.l satanhd # operand is a DENORM
+_L8_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fatanhx_
+_fatanhx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L8_2x
+ bsr.l satanh # operand is a NORM
+ bra.b _L8_6x
+_L8_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L8_3x # no
+ bsr.l src_zero # yes
+ bra.b _L8_6x
+_L8_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L8_4x # no
+ bsr.l t_operr # yes
+ bra.b _L8_6x
+_L8_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L8_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L8_6x
+_L8_5x:
+ bsr.l satanhd # operand is a DENORM
+_L8_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _ftans_
+_ftans_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L9_2s
+ bsr.l stan # operand is a NORM
+ bra.b _L9_6s
+_L9_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L9_3s # no
+ bsr.l src_zero # yes
+ bra.b _L9_6s
+_L9_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L9_4s # no
+ bsr.l t_operr # yes
+ bra.b _L9_6s
+_L9_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L9_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L9_6s
+_L9_5s:
+ bsr.l stand # operand is a DENORM
+_L9_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _ftand_
+_ftand_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L9_2d
+ bsr.l stan # operand is a NORM
+ bra.b _L9_6d
+_L9_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L9_3d # no
+ bsr.l src_zero # yes
+ bra.b _L9_6d
+_L9_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L9_4d # no
+ bsr.l t_operr # yes
+ bra.b _L9_6d
+_L9_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L9_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L9_6d
+_L9_5d:
+ bsr.l stand # operand is a DENORM
+_L9_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _ftanx_
+_ftanx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L9_2x
+ bsr.l stan # operand is a NORM
+ bra.b _L9_6x
+_L9_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L9_3x # no
+ bsr.l src_zero # yes
+ bra.b _L9_6x
+_L9_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L9_4x # no
+ bsr.l t_operr # yes
+ bra.b _L9_6x
+_L9_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L9_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L9_6x
+_L9_5x:
+ bsr.l stand # operand is a DENORM
+_L9_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fetoxs_
+_fetoxs_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L10_2s
+ bsr.l setox # operand is a NORM
+ bra.b _L10_6s
+_L10_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L10_3s # no
+ bsr.l ld_pone # yes
+ bra.b _L10_6s
+_L10_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L10_4s # no
+ bsr.l szr_inf # yes
+ bra.b _L10_6s
+_L10_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L10_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L10_6s
+_L10_5s:
+ bsr.l setoxd # operand is a DENORM
+_L10_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fetoxd_
+_fetoxd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L10_2d
+ bsr.l setox # operand is a NORM
+ bra.b _L10_6d
+_L10_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L10_3d # no
+ bsr.l ld_pone # yes
+ bra.b _L10_6d
+_L10_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L10_4d # no
+ bsr.l szr_inf # yes
+ bra.b _L10_6d
+_L10_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L10_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L10_6d
+_L10_5d:
+ bsr.l setoxd # operand is a DENORM
+_L10_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fetoxx_
+_fetoxx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L10_2x
+ bsr.l setox # operand is a NORM
+ bra.b _L10_6x
+_L10_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L10_3x # no
+ bsr.l ld_pone # yes
+ bra.b _L10_6x
+_L10_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L10_4x # no
+ bsr.l szr_inf # yes
+ bra.b _L10_6x
+_L10_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L10_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L10_6x
+_L10_5x:
+ bsr.l setoxd # operand is a DENORM
+_L10_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _ftwotoxs_
+_ftwotoxs_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L11_2s
+ bsr.l stwotox # operand is a NORM
+ bra.b _L11_6s
+_L11_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L11_3s # no
+ bsr.l ld_pone # yes
+ bra.b _L11_6s
+_L11_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L11_4s # no
+ bsr.l szr_inf # yes
+ bra.b _L11_6s
+_L11_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L11_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L11_6s
+_L11_5s:
+ bsr.l stwotoxd # operand is a DENORM
+_L11_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _ftwotoxd_
+_ftwotoxd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L11_2d
+ bsr.l stwotox # operand is a NORM
+ bra.b _L11_6d
+_L11_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L11_3d # no
+ bsr.l ld_pone # yes
+ bra.b _L11_6d
+_L11_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L11_4d # no
+ bsr.l szr_inf # yes
+ bra.b _L11_6d
+_L11_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L11_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L11_6d
+_L11_5d:
+ bsr.l stwotoxd # operand is a DENORM
+_L11_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _ftwotoxx_
+_ftwotoxx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L11_2x
+ bsr.l stwotox # operand is a NORM
+ bra.b _L11_6x
+_L11_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L11_3x # no
+ bsr.l ld_pone # yes
+ bra.b _L11_6x
+_L11_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L11_4x # no
+ bsr.l szr_inf # yes
+ bra.b _L11_6x
+_L11_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L11_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L11_6x
+_L11_5x:
+ bsr.l stwotoxd # operand is a DENORM
+_L11_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _ftentoxs_
+_ftentoxs_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L12_2s
+ bsr.l stentox # operand is a NORM
+ bra.b _L12_6s
+_L12_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L12_3s # no
+ bsr.l ld_pone # yes
+ bra.b _L12_6s
+_L12_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L12_4s # no
+ bsr.l szr_inf # yes
+ bra.b _L12_6s
+_L12_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L12_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L12_6s
+_L12_5s:
+ bsr.l stentoxd # operand is a DENORM
+_L12_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _ftentoxd_
+_ftentoxd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L12_2d
+ bsr.l stentox # operand is a NORM
+ bra.b _L12_6d
+_L12_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L12_3d # no
+ bsr.l ld_pone # yes
+ bra.b _L12_6d
+_L12_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L12_4d # no
+ bsr.l szr_inf # yes
+ bra.b _L12_6d
+_L12_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L12_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L12_6d
+_L12_5d:
+ bsr.l stentoxd # operand is a DENORM
+_L12_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _ftentoxx_
+_ftentoxx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L12_2x
+ bsr.l stentox # operand is a NORM
+ bra.b _L12_6x
+_L12_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L12_3x # no
+ bsr.l ld_pone # yes
+ bra.b _L12_6x
+_L12_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L12_4x # no
+ bsr.l szr_inf # yes
+ bra.b _L12_6x
+_L12_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L12_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L12_6x
+_L12_5x:
+ bsr.l stentoxd # operand is a DENORM
+_L12_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _flogns_
+_flogns_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L13_2s
+ bsr.l slogn # operand is a NORM
+ bra.b _L13_6s
+_L13_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L13_3s # no
+ bsr.l t_dz2 # yes
+ bra.b _L13_6s
+_L13_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L13_4s # no
+ bsr.l sopr_inf # yes
+ bra.b _L13_6s
+_L13_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L13_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L13_6s
+_L13_5s:
+ bsr.l slognd # operand is a DENORM
+_L13_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _flognd_
+_flognd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L13_2d
+ bsr.l slogn # operand is a NORM
+ bra.b _L13_6d
+_L13_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L13_3d # no
+ bsr.l t_dz2 # yes
+ bra.b _L13_6d
+_L13_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L13_4d # no
+ bsr.l sopr_inf # yes
+ bra.b _L13_6d
+_L13_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L13_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L13_6d
+_L13_5d:
+ bsr.l slognd # operand is a DENORM
+_L13_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _flognx_
+_flognx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L13_2x
+ bsr.l slogn # operand is a NORM
+ bra.b _L13_6x
+_L13_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L13_3x # no
+ bsr.l t_dz2 # yes
+ bra.b _L13_6x
+_L13_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L13_4x # no
+ bsr.l sopr_inf # yes
+ bra.b _L13_6x
+_L13_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L13_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L13_6x
+_L13_5x:
+ bsr.l slognd # operand is a DENORM
+_L13_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _flog10s_
+_flog10s_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L14_2s
+ bsr.l slog10 # operand is a NORM
+ bra.b _L14_6s
+_L14_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L14_3s # no
+ bsr.l t_dz2 # yes
+ bra.b _L14_6s
+_L14_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L14_4s # no
+ bsr.l sopr_inf # yes
+ bra.b _L14_6s
+_L14_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L14_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L14_6s
+_L14_5s:
+ bsr.l slog10d # operand is a DENORM
+_L14_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _flog10d_
+_flog10d_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L14_2d
+ bsr.l slog10 # operand is a NORM
+ bra.b _L14_6d
+_L14_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L14_3d # no
+ bsr.l t_dz2 # yes
+ bra.b _L14_6d
+_L14_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L14_4d # no
+ bsr.l sopr_inf # yes
+ bra.b _L14_6d
+_L14_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L14_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L14_6d
+_L14_5d:
+ bsr.l slog10d # operand is a DENORM
+_L14_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _flog10x_
+_flog10x_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L14_2x
+ bsr.l slog10 # operand is a NORM
+ bra.b _L14_6x
+_L14_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L14_3x # no
+ bsr.l t_dz2 # yes
+ bra.b _L14_6x
+_L14_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L14_4x # no
+ bsr.l sopr_inf # yes
+ bra.b _L14_6x
+_L14_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L14_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L14_6x
+_L14_5x:
+ bsr.l slog10d # operand is a DENORM
+_L14_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _flog2s_
+_flog2s_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L15_2s
+ bsr.l slog2 # operand is a NORM
+ bra.b _L15_6s
+_L15_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L15_3s # no
+ bsr.l t_dz2 # yes
+ bra.b _L15_6s
+_L15_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L15_4s # no
+ bsr.l sopr_inf # yes
+ bra.b _L15_6s
+_L15_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L15_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L15_6s
+_L15_5s:
+ bsr.l slog2d # operand is a DENORM
+_L15_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _flog2d_
+_flog2d_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L15_2d
+ bsr.l slog2 # operand is a NORM
+ bra.b _L15_6d
+_L15_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L15_3d # no
+ bsr.l t_dz2 # yes
+ bra.b _L15_6d
+_L15_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L15_4d # no
+ bsr.l sopr_inf # yes
+ bra.b _L15_6d
+_L15_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L15_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L15_6d
+_L15_5d:
+ bsr.l slog2d # operand is a DENORM
+_L15_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _flog2x_
+_flog2x_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L15_2x
+ bsr.l slog2 # operand is a NORM
+ bra.b _L15_6x
+_L15_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L15_3x # no
+ bsr.l t_dz2 # yes
+ bra.b _L15_6x
+_L15_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L15_4x # no
+ bsr.l sopr_inf # yes
+ bra.b _L15_6x
+_L15_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L15_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L15_6x
+_L15_5x:
+ bsr.l slog2d # operand is a DENORM
+_L15_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fcoshs_
+_fcoshs_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L16_2s
+ bsr.l scosh # operand is a NORM
+ bra.b _L16_6s
+_L16_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L16_3s # no
+ bsr.l ld_pone # yes
+ bra.b _L16_6s
+_L16_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L16_4s # no
+ bsr.l ld_pinf # yes
+ bra.b _L16_6s
+_L16_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L16_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L16_6s
+_L16_5s:
+ bsr.l scoshd # operand is a DENORM
+_L16_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fcoshd_
+_fcoshd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L16_2d
+ bsr.l scosh # operand is a NORM
+ bra.b _L16_6d
+_L16_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L16_3d # no
+ bsr.l ld_pone # yes
+ bra.b _L16_6d
+_L16_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L16_4d # no
+ bsr.l ld_pinf # yes
+ bra.b _L16_6d
+_L16_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L16_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L16_6d
+_L16_5d:
+ bsr.l scoshd # operand is a DENORM
+_L16_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fcoshx_
+_fcoshx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L16_2x
+ bsr.l scosh # operand is a NORM
+ bra.b _L16_6x
+_L16_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L16_3x # no
+ bsr.l ld_pone # yes
+ bra.b _L16_6x
+_L16_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L16_4x # no
+ bsr.l ld_pinf # yes
+ bra.b _L16_6x
+_L16_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L16_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L16_6x
+_L16_5x:
+ bsr.l scoshd # operand is a DENORM
+_L16_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _facoss_
+_facoss_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L17_2s
+ bsr.l sacos # operand is a NORM
+ bra.b _L17_6s
+_L17_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L17_3s # no
+ bsr.l ld_ppi2 # yes
+ bra.b _L17_6s
+_L17_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L17_4s # no
+ bsr.l t_operr # yes
+ bra.b _L17_6s
+_L17_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L17_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L17_6s
+_L17_5s:
+ bsr.l sacosd # operand is a DENORM
+_L17_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _facosd_
+_facosd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L17_2d
+ bsr.l sacos # operand is a NORM
+ bra.b _L17_6d
+_L17_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L17_3d # no
+ bsr.l ld_ppi2 # yes
+ bra.b _L17_6d
+_L17_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L17_4d # no
+ bsr.l t_operr # yes
+ bra.b _L17_6d
+_L17_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L17_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L17_6d
+_L17_5d:
+ bsr.l sacosd # operand is a DENORM
+_L17_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _facosx_
+_facosx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L17_2x
+ bsr.l sacos # operand is a NORM
+ bra.b _L17_6x
+_L17_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L17_3x # no
+ bsr.l ld_ppi2 # yes
+ bra.b _L17_6x
+_L17_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L17_4x # no
+ bsr.l t_operr # yes
+ bra.b _L17_6x
+_L17_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L17_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L17_6x
+_L17_5x:
+ bsr.l sacosd # operand is a DENORM
+_L17_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fgetexps_
+_fgetexps_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L18_2s
+ bsr.l sgetexp # operand is a NORM
+ bra.b _L18_6s
+_L18_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L18_3s # no
+ bsr.l src_zero # yes
+ bra.b _L18_6s
+_L18_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L18_4s # no
+ bsr.l t_operr # yes
+ bra.b _L18_6s
+_L18_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L18_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L18_6s
+_L18_5s:
+ bsr.l sgetexpd # operand is a DENORM
+_L18_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fgetexpd_
+_fgetexpd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L18_2d
+ bsr.l sgetexp # operand is a NORM
+ bra.b _L18_6d
+_L18_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L18_3d # no
+ bsr.l src_zero # yes
+ bra.b _L18_6d
+_L18_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L18_4d # no
+ bsr.l t_operr # yes
+ bra.b _L18_6d
+_L18_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L18_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L18_6d
+_L18_5d:
+ bsr.l sgetexpd # operand is a DENORM
+_L18_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fgetexpx_
+_fgetexpx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L18_2x
+ bsr.l sgetexp # operand is a NORM
+ bra.b _L18_6x
+_L18_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L18_3x # no
+ bsr.l src_zero # yes
+ bra.b _L18_6x
+_L18_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L18_4x # no
+ bsr.l t_operr # yes
+ bra.b _L18_6x
+_L18_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L18_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L18_6x
+_L18_5x:
+ bsr.l sgetexpd # operand is a DENORM
+_L18_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fgetmans_
+_fgetmans_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L19_2s
+ bsr.l sgetman # operand is a NORM
+ bra.b _L19_6s
+_L19_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L19_3s # no
+ bsr.l src_zero # yes
+ bra.b _L19_6s
+_L19_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L19_4s # no
+ bsr.l t_operr # yes
+ bra.b _L19_6s
+_L19_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L19_5s # no
+ bsr.l src_qnan # yes
+ bra.b _L19_6s
+_L19_5s:
+ bsr.l sgetmand # operand is a DENORM
+_L19_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fgetmand_
+_fgetmand_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L19_2d
+ bsr.l sgetman # operand is a NORM
+ bra.b _L19_6d
+_L19_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L19_3d # no
+ bsr.l src_zero # yes
+ bra.b _L19_6d
+_L19_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L19_4d # no
+ bsr.l t_operr # yes
+ bra.b _L19_6d
+_L19_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L19_5d # no
+ bsr.l src_qnan # yes
+ bra.b _L19_6d
+_L19_5d:
+ bsr.l sgetmand # operand is a DENORM
+_L19_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fgetmanx_
+_fgetmanx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L19_2x
+ bsr.l sgetman # operand is a NORM
+ bra.b _L19_6x
+_L19_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L19_3x # no
+ bsr.l src_zero # yes
+ bra.b _L19_6x
+_L19_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L19_4x # no
+ bsr.l t_operr # yes
+ bra.b _L19_6x
+_L19_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L19_5x # no
+ bsr.l src_qnan # yes
+ bra.b _L19_6x
+_L19_5x:
+ bsr.l sgetmand # operand is a DENORM
+_L19_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# MONADIC TEMPLATE #
+#########################################################################
+ global _fsincoss_
+_fsincoss_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L20_2s
+ bsr.l ssincos # operand is a NORM
+ bra.b _L20_6s
+_L20_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L20_3s # no
+ bsr.l ssincosz # yes
+ bra.b _L20_6s
+_L20_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L20_4s # no
+ bsr.l ssincosi # yes
+ bra.b _L20_6s
+_L20_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L20_5s # no
+ bsr.l ssincosqnan # yes
+ bra.b _L20_6s
+_L20_5s:
+ bsr.l ssincosd # operand is a DENORM
+_L20_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x &0x03,-(%sp) # store off fp0/fp1
+ fmovm.x (%sp)+,&0x40 # fp0 now in fp1
+ fmovm.x (%sp)+,&0x80 # fp1 now in fp0
+ unlk %a6
+ rts
+
+ global _fsincosd_
+_fsincosd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl input
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ mov.b %d1,STAG(%a6)
+ tst.b %d1
+ bne.b _L20_2d
+ bsr.l ssincos # operand is a NORM
+ bra.b _L20_6d
+_L20_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L20_3d # no
+ bsr.l ssincosz # yes
+ bra.b _L20_6d
+_L20_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L20_4d # no
+ bsr.l ssincosi # yes
+ bra.b _L20_6d
+_L20_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L20_5d # no
+ bsr.l ssincosqnan # yes
+ bra.b _L20_6d
+_L20_5d:
+ bsr.l ssincosd # operand is a DENORM
+_L20_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x &0x03,-(%sp) # store off fp0/fp1
+ fmovm.x (%sp)+,&0x40 # fp0 now in fp1
+ fmovm.x (%sp)+,&0x80 # fp1 now in fp0
+ unlk %a6
+ rts
+
+ global _fsincosx_
+_fsincosx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_SRC(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext input
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.b %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ tst.b %d1
+ bne.b _L20_2x
+ bsr.l ssincos # operand is a NORM
+ bra.b _L20_6x
+_L20_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L20_3x # no
+ bsr.l ssincosz # yes
+ bra.b _L20_6x
+_L20_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L20_4x # no
+ bsr.l ssincosi # yes
+ bra.b _L20_6x
+_L20_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L20_5x # no
+ bsr.l ssincosqnan # yes
+ bra.b _L20_6x
+_L20_5x:
+ bsr.l ssincosd # operand is a DENORM
+_L20_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x &0x03,-(%sp) # store off fp0/fp1
+ fmovm.x (%sp)+,&0x40 # fp0 now in fp1
+ fmovm.x (%sp)+,&0x80 # fp1 now in fp0
+ unlk %a6
+ rts
+
+
+#########################################################################
+# DYADIC TEMPLATE #
+#########################################################################
+ global _frems_
+_frems_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl dst
+ fmov.x %fp0,FP_DST(%a6)
+ lea FP_DST(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ fmov.s 0xc(%a6),%fp0 # load sgl src
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L21_2s
+ bsr.l srem_snorm # operand is a NORM
+ bra.b _L21_6s
+_L21_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L21_3s # no
+ bsr.l srem_szero # yes
+ bra.b _L21_6s
+_L21_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L21_4s # no
+ bsr.l srem_sinf # yes
+ bra.b _L21_6s
+_L21_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L21_5s # no
+ bsr.l sop_sqnan # yes
+ bra.b _L21_6s
+_L21_5s:
+ bsr.l srem_sdnrm # operand is a DENORM
+_L21_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fremd_
+_fremd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl dst
+ fmov.x %fp0,FP_DST(%a6)
+ lea FP_DST(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ fmov.d 0x10(%a6),%fp0 # load dbl src
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L21_2d
+ bsr.l srem_snorm # operand is a NORM
+ bra.b _L21_6d
+_L21_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L21_3d # no
+ bsr.l srem_szero # yes
+ bra.b _L21_6d
+_L21_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L21_4d # no
+ bsr.l srem_sinf # yes
+ bra.b _L21_6d
+_L21_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L21_5d # no
+ bsr.l sop_sqnan # yes
+ bra.b _L21_6d
+_L21_5d:
+ bsr.l srem_sdnrm # operand is a DENORM
+_L21_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fremx_
+_fremx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_DST(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext dst
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ lea FP_SRC(%a6),%a0
+ mov.l 0x14+0x0(%a6),0x0(%a0) # load ext src
+ mov.l 0x14+0x4(%a6),0x4(%a0)
+ mov.l 0x14+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L21_2x
+ bsr.l srem_snorm # operand is a NORM
+ bra.b _L21_6x
+_L21_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L21_3x # no
+ bsr.l srem_szero # yes
+ bra.b _L21_6x
+_L21_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L21_4x # no
+ bsr.l srem_sinf # yes
+ bra.b _L21_6x
+_L21_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L21_5x # no
+ bsr.l sop_sqnan # yes
+ bra.b _L21_6x
+_L21_5x:
+ bsr.l srem_sdnrm # operand is a DENORM
+_L21_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# DYADIC TEMPLATE #
+#########################################################################
+ global _fmods_
+_fmods_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl dst
+ fmov.x %fp0,FP_DST(%a6)
+ lea FP_DST(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ fmov.s 0xc(%a6),%fp0 # load sgl src
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L22_2s
+ bsr.l smod_snorm # operand is a NORM
+ bra.b _L22_6s
+_L22_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L22_3s # no
+ bsr.l smod_szero # yes
+ bra.b _L22_6s
+_L22_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L22_4s # no
+ bsr.l smod_sinf # yes
+ bra.b _L22_6s
+_L22_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L22_5s # no
+ bsr.l sop_sqnan # yes
+ bra.b _L22_6s
+_L22_5s:
+ bsr.l smod_sdnrm # operand is a DENORM
+_L22_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fmodd_
+_fmodd_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl dst
+ fmov.x %fp0,FP_DST(%a6)
+ lea FP_DST(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ fmov.d 0x10(%a6),%fp0 # load dbl src
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L22_2d
+ bsr.l smod_snorm # operand is a NORM
+ bra.b _L22_6d
+_L22_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L22_3d # no
+ bsr.l smod_szero # yes
+ bra.b _L22_6d
+_L22_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L22_4d # no
+ bsr.l smod_sinf # yes
+ bra.b _L22_6d
+_L22_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L22_5d # no
+ bsr.l sop_sqnan # yes
+ bra.b _L22_6d
+_L22_5d:
+ bsr.l smod_sdnrm # operand is a DENORM
+_L22_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fmodx_
+_fmodx_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_DST(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext dst
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ lea FP_SRC(%a6),%a0
+ mov.l 0x14+0x0(%a6),0x0(%a0) # load ext src
+ mov.l 0x14+0x4(%a6),0x4(%a0)
+ mov.l 0x14+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L22_2x
+ bsr.l smod_snorm # operand is a NORM
+ bra.b _L22_6x
+_L22_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L22_3x # no
+ bsr.l smod_szero # yes
+ bra.b _L22_6x
+_L22_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L22_4x # no
+ bsr.l smod_sinf # yes
+ bra.b _L22_6x
+_L22_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L22_5x # no
+ bsr.l sop_sqnan # yes
+ bra.b _L22_6x
+_L22_5x:
+ bsr.l smod_sdnrm # operand is a DENORM
+_L22_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# DYADIC TEMPLATE #
+#########################################################################
+ global _fscales_
+_fscales_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.s 0x8(%a6),%fp0 # load sgl dst
+ fmov.x %fp0,FP_DST(%a6)
+ lea FP_DST(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ fmov.s 0xc(%a6),%fp0 # load sgl src
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L23_2s
+ bsr.l sscale_snorm # operand is a NORM
+ bra.b _L23_6s
+_L23_2s:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L23_3s # no
+ bsr.l sscale_szero # yes
+ bra.b _L23_6s
+_L23_3s:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L23_4s # no
+ bsr.l sscale_sinf # yes
+ bra.b _L23_6s
+_L23_4s:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L23_5s # no
+ bsr.l sop_sqnan # yes
+ bra.b _L23_6s
+_L23_5s:
+ bsr.l sscale_sdnrm # operand is a DENORM
+_L23_6s:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fscaled_
+_fscaled_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ fmov.d 0x8(%a6),%fp0 # load dbl dst
+ fmov.x %fp0,FP_DST(%a6)
+ lea FP_DST(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ fmov.d 0x10(%a6),%fp0 # load dbl src
+ fmov.x %fp0,FP_SRC(%a6)
+ lea FP_SRC(%a6),%a0
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L23_2d
+ bsr.l sscale_snorm # operand is a NORM
+ bra.b _L23_6d
+_L23_2d:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L23_3d # no
+ bsr.l sscale_szero # yes
+ bra.b _L23_6d
+_L23_3d:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L23_4d # no
+ bsr.l sscale_sinf # yes
+ bra.b _L23_6d
+_L23_4d:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L23_5d # no
+ bsr.l sop_sqnan # yes
+ bra.b _L23_6d
+_L23_5d:
+ bsr.l sscale_sdnrm # operand is a DENORM
+_L23_6d:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+ global _fscalex_
+_fscalex_:
+ link %a6,&-LOCAL_SIZE
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FP0(%a6) # save fp0/fp1
+
+ fmov.l &0x0,%fpcr # zero FPCR
+
+#
+# copy, convert, and tag input argument
+#
+ lea FP_DST(%a6),%a0
+ mov.l 0x8+0x0(%a6),0x0(%a0) # load ext dst
+ mov.l 0x8+0x4(%a6),0x4(%a0)
+ mov.l 0x8+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,DTAG(%a6)
+
+ lea FP_SRC(%a6),%a0
+ mov.l 0x14+0x0(%a6),0x0(%a0) # load ext src
+ mov.l 0x14+0x4(%a6),0x4(%a0)
+ mov.l 0x14+0x8(%a6),0x8(%a0)
+ bsr.l tag # fetch operand type
+ mov.b %d0,STAG(%a6)
+ mov.l %d0,%d1
+
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd mode,prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ lea FP_DST(%a6),%a1 # pass ptr to dst
+
+ tst.b %d1
+ bne.b _L23_2x
+ bsr.l sscale_snorm # operand is a NORM
+ bra.b _L23_6x
+_L23_2x:
+ cmpi.b %d1,&ZERO # is operand a ZERO?
+ bne.b _L23_3x # no
+ bsr.l sscale_szero # yes
+ bra.b _L23_6x
+_L23_3x:
+ cmpi.b %d1,&INF # is operand an INF?
+ bne.b _L23_4x # no
+ bsr.l sscale_sinf # yes
+ bra.b _L23_6x
+_L23_4x:
+ cmpi.b %d1,&QNAN # is operand a QNAN?
+ bne.b _L23_5x # no
+ bsr.l sop_sqnan # yes
+ bra.b _L23_6x
+_L23_5x:
+ bsr.l sscale_sdnrm # operand is a DENORM
+_L23_6x:
+
+#
+# Result is now in FP0
+#
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr # restore ctrl regs
+ fmovm.x EXC_FP1(%a6),&0x40 # restore fp1
+ unlk %a6
+ rts
+
+
+#########################################################################
+# ssin(): computes the sine of a normalized input #
+# ssind(): computes the sine of a denormalized input #
+# scos(): computes the cosine of a normalized input #
+# scosd(): computes the cosine of a denormalized input #
+# ssincos(): computes the sine and cosine of a normalized input #
+# ssincosd(): computes the sine and cosine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = sin(X) or cos(X) #
+# #
+# For ssincos(X): #
+# fp0 = sin(X) #
+# fp1 = cos(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 1 ulp in 64 significant bit, i.e. #
+# within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# SIN and COS: #
+# 1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1. #
+# #
+# 2. If |X| >= 15Pi or |X| < 2**(-40), go to 7. #
+# #
+# 3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
+# k = N mod 4, so in particular, k = 0,1,2,or 3. #
+# Overwrite k by k := k + AdjN. #
+# #
+# 4. If k is even, go to 6. #
+# #
+# 5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j. #
+# Return sgn*cos(r) where cos(r) is approximated by an #
+# even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)), #
+# s = r*r. #
+# Exit. #
+# #
+# 6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r) #
+# where sin(r) is approximated by an odd polynomial in r #
+# r + r*s*(A1+s*(A2+ ... + s*A7)), s = r*r. #
+# Exit. #
+# #
+# 7. If |X| > 1, go to 9. #
+# #
+# 8. (|X|<2**(-40)) If SIN is invoked, return X; #
+# otherwise return 1. #
+# #
+# 9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
+# go back to 3. #
+# #
+# SINCOS: #
+# 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
+# #
+# 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
+# k = N mod 4, so in particular, k = 0,1,2,or 3. #
+# #
+# 3. If k is even, go to 5. #
+# #
+# 4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie. #
+# j1 exclusive or with the l.s.b. of k. #
+# sgn1 := (-1)**j1, sgn2 := (-1)**j2. #
+# SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where #
+# sin(r) and cos(r) are computed as odd and even #
+# polynomials in r, respectively. Exit #
+# #
+# 5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1. #
+# SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where #
+# sin(r) and cos(r) are computed as odd and even #
+# polynomials in r, respectively. Exit #
+# #
+# 6. If |X| > 1, go to 8. #
+# #
+# 7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit. #
+# #
+# 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
+# go back to 2. #
+# #
+#########################################################################
+
+SINA7: long 0xBD6AAA77,0xCCC994F5
+SINA6: long 0x3DE61209,0x7AAE8DA1
+SINA5: long 0xBE5AE645,0x2A118AE4
+SINA4: long 0x3EC71DE3,0xA5341531
+SINA3: long 0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
+SINA2: long 0x3FF80000,0x88888888,0x888859AF,0x00000000
+SINA1: long 0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
+
+COSB8: long 0x3D2AC4D0,0xD6011EE3
+COSB7: long 0xBDA9396F,0x9F45AC19
+COSB6: long 0x3E21EED9,0x0612C972
+COSB5: long 0xBE927E4F,0xB79D9FCF
+COSB4: long 0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
+COSB3: long 0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
+COSB2: long 0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
+COSB1: long 0xBF000000
+
+ set INARG,FP_SCR0
+
+ set X,FP_SCR0
+# set XDCARE,X+2
+ set XFRAC,X+4
+
+ set RPRIME,FP_SCR0
+ set SPRIME,FP_SCR1
+
+ set POSNEG1,L_SCR1
+ set TWOTO63,L_SCR1
+
+ set ENDFLAG,L_SCR2
+ set INT,L_SCR2
+
+ set ADJN,L_SCR3
+
+############################################
+ global ssin
+ssin:
+ mov.l &0,ADJN(%a6) # yes; SET ADJN TO 0
+ bra.b SINBGN
+
+############################################
+ global scos
+scos:
+ mov.l &1,ADJN(%a6) # yes; SET ADJN TO 1
+
+############################################
+SINBGN:
+#--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
+
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ fmov.x %fp0,X(%a6) # save input at X
+
+# "COMPACTIFY" X
+ mov.l (%a0),%d1 # put exp in hi word
+ mov.w 4(%a0),%d1 # fetch hi(man)
+ and.l &0x7FFFFFFF,%d1 # strip sign
+
+ cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)?
+ bge.b SOK1 # no
+ bra.w SINSM # yes; input is very small
+
+SOK1:
+ cmp.l %d1,&0x4004BC7E # is |X| < 15 PI?
+ blt.b SINMAIN # no
+ bra.w SREDUCEX # yes; input is very large
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SINMAIN:
+ fmov.x %fp0,%fp1
+ fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
+
+ lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
+
+ fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
+
+ mov.l INT(%a6),%d1 # make a copy of N
+ asl.l &4,%d1 # N *= 16
+ add.l %d1,%a1 # tbl_addr = a1 + (N*16)
+
+# A1 IS THE ADDRESS OF N*PIBY2
+# ...WHICH IS IN TWO PIECES Y1 & Y2
+ fsub.x (%a1)+,%fp0 # X-Y1
+ fsub.s (%a1),%fp0 # fp0 = R = (X-Y1)-Y2
+
+SINCONT:
+#--continuation from REDUCEX
+
+#--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
+ mov.l INT(%a6),%d1
+ add.l ADJN(%a6),%d1 # SEE IF D0 IS ODD OR EVEN
+ ror.l &1,%d1 # D0 WAS ODD IFF D0 IS NEGATIVE
+ cmp.l %d1,&0
+ blt.w COSPOLY
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
+#--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
+#--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
+#--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
+#--WHERE T=S*S.
+#--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
+#--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
+SINPOLY:
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmov.x %fp0,X(%a6) # X IS R
+ fmul.x %fp0,%fp0 # FP0 IS S
+
+ fmov.d SINA7(%pc),%fp3
+ fmov.d SINA6(%pc),%fp2
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS T
+
+ ror.l &1,%d1
+ and.l &0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+ eor.l %d1,X(%a6) # X IS NOW R'= SGN*R
+
+ fmul.x %fp1,%fp3 # TA7
+ fmul.x %fp1,%fp2 # TA6
+
+ fadd.d SINA5(%pc),%fp3 # A5+TA7
+ fadd.d SINA4(%pc),%fp2 # A4+TA6
+
+ fmul.x %fp1,%fp3 # T(A5+TA7)
+ fmul.x %fp1,%fp2 # T(A4+TA6)
+
+ fadd.d SINA3(%pc),%fp3 # A3+T(A5+TA7)
+ fadd.x SINA2(%pc),%fp2 # A2+T(A4+TA6)
+
+ fmul.x %fp3,%fp1 # T(A3+T(A5+TA7))
+
+ fmul.x %fp0,%fp2 # S(A2+T(A4+TA6))
+ fadd.x SINA1(%pc),%fp1 # A1+T(A3+T(A5+TA7))
+ fmul.x X(%a6),%fp0 # R'*S
+
+ fadd.x %fp2,%fp1 # [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
+
+ fmul.x %fp1,%fp0 # SIN(R')-R'
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fadd.x X(%a6),%fp0 # last inst - possible exception set
+ bra t_inx2
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN SGN*COS(R). SGN*COS(R) IS COMPUTED BY
+#--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
+#--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
+#--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
+#--WHERE T=S*S.
+#--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
+#--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
+#--AND IS THEREFORE STORED AS SINGLE PRECISION.
+COSPOLY:
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmul.x %fp0,%fp0 # FP0 IS S
+
+ fmov.d COSB8(%pc),%fp2
+ fmov.d COSB7(%pc),%fp3
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS T
+
+ fmov.x %fp0,X(%a6) # X IS S
+ ror.l &1,%d1
+ and.l &0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+
+ fmul.x %fp1,%fp2 # TB8
+
+ eor.l %d1,X(%a6) # X IS NOW S'= SGN*S
+ and.l &0x80000000,%d1
+
+ fmul.x %fp1,%fp3 # TB7
+
+ or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE
+ mov.l %d1,POSNEG1(%a6)
+
+ fadd.d COSB6(%pc),%fp2 # B6+TB8
+ fadd.d COSB5(%pc),%fp3 # B5+TB7
+
+ fmul.x %fp1,%fp2 # T(B6+TB8)
+ fmul.x %fp1,%fp3 # T(B5+TB7)
+
+ fadd.d COSB4(%pc),%fp2 # B4+T(B6+TB8)
+ fadd.x COSB3(%pc),%fp3 # B3+T(B5+TB7)
+
+ fmul.x %fp1,%fp2 # T(B4+T(B6+TB8))
+ fmul.x %fp3,%fp1 # T(B3+T(B5+TB7))
+
+ fadd.x COSB2(%pc),%fp2 # B2+T(B4+T(B6+TB8))
+ fadd.s COSB1(%pc),%fp1 # B1+T(B3+T(B5+TB7))
+
+ fmul.x %fp2,%fp0 # S(B2+T(B4+T(B6+TB8)))
+
+ fadd.x %fp1,%fp0
+
+ fmul.x X(%a6),%fp0
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fadd.s POSNEG1(%a6),%fp0 # last inst - possible exception set
+ bra t_inx2
+
+##############################################
+
+# SINe: Big OR Small?
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+SINBORS:
+ cmp.l %d1,&0x3FFF8000
+ bgt.l SREDUCEX
+
+SINSM:
+ mov.l ADJN(%a6),%d1
+ cmp.l %d1,&0
+ bgt.b COSTINY
+
+# here, the operation may underflow iff the precision is sgl or dbl.
+# extended denorms are handled through another entry point.
+SINTINY:
+# mov.w &0x0000,XDCARE(%a6) # JUST IN CASE
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x X(%a6),%fp0 # last inst - possible exception set
+ bra t_catch
+
+COSTINY:
+ fmov.s &0x3F800000,%fp0 # fp0 = 1.0
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fadd.s &0x80800000,%fp0 # last inst - possible exception set
+ bra t_pinx2
+
+################################################
+ global ssind
+#--SIN(X) = X FOR DENORMALIZED X
+ssind:
+ bra t_extdnrm
+
+############################################
+ global scosd
+#--COS(X) = 1 FOR DENORMALIZED X
+scosd:
+ fmov.s &0x3F800000,%fp0 # fp0 = 1.0
+ bra t_pinx2
+
+##################################################
+
+ global ssincos
+ssincos:
+#--SET ADJN TO 4
+ mov.l &4,ADJN(%a6)
+
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ fmov.x %fp0,X(%a6)
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1 # COMPACTIFY X
+
+ cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
+ bge.b SCOK1
+ bra.w SCSM
+
+SCOK1:
+ cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
+ blt.b SCMAIN
+ bra.w SREDUCEX
+
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SCMAIN:
+ fmov.x %fp0,%fp1
+
+ fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
+
+ lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
+
+ fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
+
+ mov.l INT(%a6),%d1
+ asl.l &4,%d1
+ add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2
+
+ fsub.x (%a1)+,%fp0 # X-Y1
+ fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
+
+SCCONT:
+#--continuation point from REDUCEX
+
+ mov.l INT(%a6),%d1
+ ror.l &1,%d1
+ cmp.l %d1,&0 # D0 < 0 IFF N IS ODD
+ bge.w NEVEN
+
+SNODD:
+#--REGISTERS SAVED SO FAR: D0, A0, FP2.
+ fmovm.x &0x04,-(%sp) # save fp2
+
+ fmov.x %fp0,RPRIME(%a6)
+ fmul.x %fp0,%fp0 # FP0 IS S = R*R
+ fmov.d SINA7(%pc),%fp1 # A7
+ fmov.d COSB8(%pc),%fp2 # B8
+ fmul.x %fp0,%fp1 # SA7
+ fmul.x %fp0,%fp2 # SB8
+
+ mov.l %d2,-(%sp)
+ mov.l %d1,%d2
+ ror.l &1,%d2
+ and.l &0x80000000,%d2
+ eor.l %d1,%d2
+ and.l &0x80000000,%d2
+
+ fadd.d SINA6(%pc),%fp1 # A6+SA7
+ fadd.d COSB7(%pc),%fp2 # B7+SB8
+
+ fmul.x %fp0,%fp1 # S(A6+SA7)
+ eor.l %d2,RPRIME(%a6)
+ mov.l (%sp)+,%d2
+ fmul.x %fp0,%fp2 # S(B7+SB8)
+ ror.l &1,%d1
+ and.l &0x80000000,%d1
+ mov.l &0x3F800000,POSNEG1(%a6)
+ eor.l %d1,POSNEG1(%a6)
+
+ fadd.d SINA5(%pc),%fp1 # A5+S(A6+SA7)
+ fadd.d COSB6(%pc),%fp2 # B6+S(B7+SB8)
+
+ fmul.x %fp0,%fp1 # S(A5+S(A6+SA7))
+ fmul.x %fp0,%fp2 # S(B6+S(B7+SB8))
+ fmov.x %fp0,SPRIME(%a6)
+
+ fadd.d SINA4(%pc),%fp1 # A4+S(A5+S(A6+SA7))
+ eor.l %d1,SPRIME(%a6)
+ fadd.d COSB5(%pc),%fp2 # B5+S(B6+S(B7+SB8))
+
+ fmul.x %fp0,%fp1 # S(A4+...)
+ fmul.x %fp0,%fp2 # S(B5+...)
+
+ fadd.d SINA3(%pc),%fp1 # A3+S(A4+...)
+ fadd.d COSB4(%pc),%fp2 # B4+S(B5+...)
+
+ fmul.x %fp0,%fp1 # S(A3+...)
+ fmul.x %fp0,%fp2 # S(B4+...)
+
+ fadd.x SINA2(%pc),%fp1 # A2+S(A3+...)
+ fadd.x COSB3(%pc),%fp2 # B3+S(B4+...)
+
+ fmul.x %fp0,%fp1 # S(A2+...)
+ fmul.x %fp0,%fp2 # S(B3+...)
+
+ fadd.x SINA1(%pc),%fp1 # A1+S(A2+...)
+ fadd.x COSB2(%pc),%fp2 # B2+S(B3+...)
+
+ fmul.x %fp0,%fp1 # S(A1+...)
+ fmul.x %fp2,%fp0 # S(B2+...)
+
+ fmul.x RPRIME(%a6),%fp1 # R'S(A1+...)
+ fadd.s COSB1(%pc),%fp0 # B1+S(B2...)
+ fmul.x SPRIME(%a6),%fp0 # S'(B1+S(B2+...))
+
+ fmovm.x (%sp)+,&0x20 # restore fp2
+
+ fmov.l %d0,%fpcr
+ fadd.x RPRIME(%a6),%fp1 # COS(X)
+ bsr sto_cos # store cosine result
+ fadd.s POSNEG1(%a6),%fp0 # SIN(X)
+ bra t_inx2
+
+NEVEN:
+#--REGISTERS SAVED SO FAR: FP2.
+ fmovm.x &0x04,-(%sp) # save fp2
+
+ fmov.x %fp0,RPRIME(%a6)
+ fmul.x %fp0,%fp0 # FP0 IS S = R*R
+
+ fmov.d COSB8(%pc),%fp1 # B8
+ fmov.d SINA7(%pc),%fp2 # A7
+
+ fmul.x %fp0,%fp1 # SB8
+ fmov.x %fp0,SPRIME(%a6)
+ fmul.x %fp0,%fp2 # SA7
+
+ ror.l &1,%d1
+ and.l &0x80000000,%d1
+
+ fadd.d COSB7(%pc),%fp1 # B7+SB8
+ fadd.d SINA6(%pc),%fp2 # A6+SA7
+
+ eor.l %d1,RPRIME(%a6)
+ eor.l %d1,SPRIME(%a6)
+
+ fmul.x %fp0,%fp1 # S(B7+SB8)
+
+ or.l &0x3F800000,%d1
+ mov.l %d1,POSNEG1(%a6)
+
+ fmul.x %fp0,%fp2 # S(A6+SA7)
+
+ fadd.d COSB6(%pc),%fp1 # B6+S(B7+SB8)
+ fadd.d SINA5(%pc),%fp2 # A5+S(A6+SA7)
+
+ fmul.x %fp0,%fp1 # S(B6+S(B7+SB8))
+ fmul.x %fp0,%fp2 # S(A5+S(A6+SA7))
+
+ fadd.d COSB5(%pc),%fp1 # B5+S(B6+S(B7+SB8))
+ fadd.d SINA4(%pc),%fp2 # A4+S(A5+S(A6+SA7))
+
+ fmul.x %fp0,%fp1 # S(B5+...)
+ fmul.x %fp0,%fp2 # S(A4+...)
+
+ fadd.d COSB4(%pc),%fp1 # B4+S(B5+...)
+ fadd.d SINA3(%pc),%fp2 # A3+S(A4+...)
+
+ fmul.x %fp0,%fp1 # S(B4+...)
+ fmul.x %fp0,%fp2 # S(A3+...)
+
+ fadd.x COSB3(%pc),%fp1 # B3+S(B4+...)
+ fadd.x SINA2(%pc),%fp2 # A2+S(A3+...)
+
+ fmul.x %fp0,%fp1 # S(B3+...)
+ fmul.x %fp0,%fp2 # S(A2+...)
+
+ fadd.x COSB2(%pc),%fp1 # B2+S(B3+...)
+ fadd.x SINA1(%pc),%fp2 # A1+S(A2+...)
+
+ fmul.x %fp0,%fp1 # S(B2+...)
+ fmul.x %fp2,%fp0 # s(a1+...)
+
+
+ fadd.s COSB1(%pc),%fp1 # B1+S(B2...)
+ fmul.x RPRIME(%a6),%fp0 # R'S(A1+...)
+ fmul.x SPRIME(%a6),%fp1 # S'(B1+S(B2+...))
+
+ fmovm.x (%sp)+,&0x20 # restore fp2
+
+ fmov.l %d0,%fpcr
+ fadd.s POSNEG1(%a6),%fp1 # COS(X)
+ bsr sto_cos # store cosine result
+ fadd.x RPRIME(%a6),%fp0 # SIN(X)
+ bra t_inx2
+
+################################################
+
+SCBORS:
+ cmp.l %d1,&0x3FFF8000
+ bgt.w SREDUCEX
+
+################################################
+
+SCSM:
+# mov.w &0x0000,XDCARE(%a6)
+ fmov.s &0x3F800000,%fp1
+
+ fmov.l %d0,%fpcr
+ fsub.s &0x00800000,%fp1
+ bsr sto_cos # store cosine result
+ fmov.l %fpcr,%d0 # d0 must have fpcr,too
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x X(%a6),%fp0
+ bra t_catch
+
+##############################################
+
+ global ssincosd
+#--SIN AND COS OF X FOR DENORMALIZED X
+ssincosd:
+ mov.l %d0,-(%sp) # save d0
+ fmov.s &0x3F800000,%fp1
+ bsr sto_cos # store cosine result
+ mov.l (%sp)+,%d0 # restore d0
+ bra t_extdnrm
+
+############################################
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+SREDUCEX:
+ fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
+ mov.l %d2,-(%sp) # save d2
+ fmov.s &0x00000000,%fp1 # fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration. In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+ cmp.l %d1,&0x7ffeffff # is arg dangerously large?
+ bne.b SLOOP # no
+
+# yes; create 2**16383*PI/2
+ mov.w &0x7ffe,FP_SCR0_EX(%a6)
+ mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+ mov.w &0x7fdc,FP_SCR1_EX(%a6)
+ mov.l &0x85a308d3,FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6)
+
+ ftest.x %fp0 # test sign of argument
+ fblt.w sred_neg
+
+ or.b &0x80,FP_SCR0_EX(%a6) # positive arg
+ or.b &0x80,FP_SCR1_EX(%a6)
+sred_neg:
+ fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
+ fmov.x %fp0,%fp1 # save high result in fp1
+ fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
+ fsub.x %fp0,%fp1 # determine low component of result
+ fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+SLOOP:
+ fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
+ mov.w INARG(%a6),%d1
+ mov.l %d1,%a1 # save a copy of D0
+ and.l &0x00007FFF,%d1
+ sub.l &0x00003FFF,%d1 # d0 = K
+ cmp.l %d1,&28
+ ble.b SLASTLOOP
+SCONTLOOP:
+ sub.l &27,%d1 # d0 = L := K-27
+ mov.b &0,ENDFLAG(%a6)
+ bra.b SWORK
+SLASTLOOP:
+ clr.l %d1 # d0 = L := 0
+ mov.b &1,ENDFLAG(%a6)
+
+SWORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
+#--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+ mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
+ sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
+
+ mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
+ mov.l &0x4E44152A,FP_SCR0_LO(%a6)
+ mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
+
+ fmov.x %fp0,%fp2
+ fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+ mov.l %a1,%d2
+ swap %d2
+ and.l &0x80000000,%d2
+ or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
+ mov.l %d2,TWOTO63(%a6)
+ fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
+ fsub.s TWOTO63(%a6),%fp2 # fp2 = N
+# fint.x %fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+ mov.l %d1,%d2 # d2 = L
+
+ add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
+ mov.w %d2,FP_SCR0_EX(%a6)
+ mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
+
+ add.l &0x00003FDD,%d1
+ mov.w %d1,FP_SCR1_EX(%a6)
+ mov.l &0x85A308D3,FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
+
+ mov.b ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+ fmov.x %fp2,%fp4 # fp4 = N
+ fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
+ fmov.x %fp2,%fp5 # fp5 = N
+ fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
+ fmov.x %fp4,%fp3 # fp3 = W = N*P1
+
+#--we want P+p = W+w but |p| <= half ulp of P
+#--Then, we need to compute A := R-P and a := r-p
+ fadd.x %fp5,%fp3 # fp3 = P
+ fsub.x %fp3,%fp4 # fp4 = W-P
+
+ fsub.x %fp3,%fp0 # fp0 = A := R - P
+ fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
+
+ fmov.x %fp0,%fp3 # fp3 = A
+ fsub.x %fp4,%fp1 # fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+ fadd.x %fp1,%fp0 # fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+ cmp.b %d1,&0
+ bgt.w SRESTORE
+
+#--Need to calculate r
+ fsub.x %fp0,%fp3 # fp3 = A-R
+ fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
+ bra.w SLOOP
+
+SRESTORE:
+ fmov.l %fp2,INT(%a6)
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
+
+ mov.l ADJN(%a6),%d1
+ cmp.l %d1,&4
+
+ blt.w SINCONT
+ bra.w SCCONT
+
+#########################################################################
+# stan(): computes the tangent of a normalized input #
+# stand(): computes the tangent of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = tan(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulp in 64 significant bit, i.e. #
+# within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
+# #
+# 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
+# k = N mod 2, so in particular, k = 0 or 1. #
+# #
+# 3. If k is odd, go to 5. #
+# #
+# 4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a #
+# rational function U/V where #
+# U = r + r*s*(P1 + s*(P2 + s*P3)), and #
+# V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r. #
+# Exit. #
+# #
+# 4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
+# a rational function U/V where #
+# U = r + r*s*(P1 + s*(P2 + s*P3)), and #
+# V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r, #
+# -Cot(r) = -V/U. Exit. #
+# #
+# 6. If |X| > 1, go to 8. #
+# #
+# 7. (|X|<2**(-40)) Tan(X) = X. Exit. #
+# #
+# 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back #
+# to 2. #
+# #
+#########################################################################
+
+TANQ4:
+ long 0x3EA0B759,0xF50F8688
+TANP3:
+ long 0xBEF2BAA5,0xA8924F04
+
+TANQ3:
+ long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
+
+TANP2:
+ long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
+
+TANQ2:
+ long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
+
+TANP1:
+ long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
+
+TANQ1:
+ long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
+
+INVTWOPI:
+ long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
+
+TWOPI1:
+ long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
+TWOPI2:
+ long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
+
+#--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
+#--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
+#--MOST 69 BITS LONG.
+# global PITBL
+PITBL:
+ long 0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
+ long 0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
+ long 0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
+ long 0xC0040000,0xB6365E22,0xEE46F000,0x21480000
+ long 0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
+ long 0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
+ long 0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
+ long 0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
+ long 0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
+ long 0xC0040000,0x90836524,0x88034B96,0x20B00000
+ long 0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
+ long 0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
+ long 0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
+ long 0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
+ long 0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
+ long 0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
+ long 0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
+ long 0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
+ long 0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
+ long 0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
+ long 0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
+ long 0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
+ long 0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
+ long 0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
+ long 0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
+ long 0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
+ long 0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
+ long 0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
+ long 0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
+ long 0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
+ long 0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
+ long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+ long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
+ long 0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
+ long 0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
+ long 0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
+ long 0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
+ long 0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
+ long 0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
+ long 0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
+ long 0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
+ long 0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
+ long 0x40030000,0x8A3AE64F,0x76F80584,0x21080000
+ long 0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
+ long 0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
+ long 0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
+ long 0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
+ long 0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
+ long 0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
+ long 0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
+ long 0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
+ long 0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
+ long 0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
+ long 0x40040000,0x8A3AE64F,0x76F80584,0x21880000
+ long 0x40040000,0x90836524,0x88034B96,0xA0B00000
+ long 0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
+ long 0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
+ long 0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
+ long 0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
+ long 0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
+ long 0x40040000,0xB6365E22,0xEE46F000,0xA1480000
+ long 0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
+ long 0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
+ long 0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
+
+ set INARG,FP_SCR0
+
+ set TWOTO63,L_SCR1
+ set INT,L_SCR1
+ set ENDFLAG,L_SCR2
+
+ global stan
+stan:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+
+ cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
+ bge.b TANOK1
+ bra.w TANSM
+TANOK1:
+ cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
+ blt.b TANMAIN
+ bra.w REDUCEX
+
+TANMAIN:
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+ fmov.x %fp0,%fp1
+ fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
+
+ lea.l PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
+
+ fmov.l %fp1,%d1 # CONVERT TO INTEGER
+
+ asl.l &4,%d1
+ add.l %d1,%a1 # ADDRESS N*PIBY2 IN Y1, Y2
+
+ fsub.x (%a1)+,%fp0 # X-Y1
+
+ fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
+
+ ror.l &5,%d1
+ and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0
+
+TANCONT:
+ fmovm.x &0x0c,-(%sp) # save fp2,fp3
+
+ cmp.l %d1,&0
+ blt.w NODD
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # S = R*R
+
+ fmov.d TANQ4(%pc),%fp3
+ fmov.d TANP3(%pc),%fp2
+
+ fmul.x %fp1,%fp3 # SQ4
+ fmul.x %fp1,%fp2 # SP3
+
+ fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
+ fadd.x TANP2(%pc),%fp2 # P2+SP3
+
+ fmul.x %fp1,%fp3 # S(Q3+SQ4)
+ fmul.x %fp1,%fp2 # S(P2+SP3)
+
+ fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
+ fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
+
+ fmul.x %fp1,%fp3 # S(Q2+S(Q3+SQ4))
+ fmul.x %fp1,%fp2 # S(P1+S(P2+SP3))
+
+ fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
+ fmul.x %fp0,%fp2 # RS(P1+S(P2+SP3))
+
+ fmul.x %fp3,%fp1 # S(Q1+S(Q2+S(Q3+SQ4)))
+
+ fadd.x %fp2,%fp0 # R+RS(P1+S(P2+SP3))
+
+ fadd.s &0x3F800000,%fp1 # 1+S(Q1+...)
+
+ fmovm.x (%sp)+,&0x30 # restore fp2,fp3
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fdiv.x %fp1,%fp0 # last inst - possible exception set
+ bra t_inx2
+
+NODD:
+ fmov.x %fp0,%fp1
+ fmul.x %fp0,%fp0 # S = R*R
+
+ fmov.d TANQ4(%pc),%fp3
+ fmov.d TANP3(%pc),%fp2
+
+ fmul.x %fp0,%fp3 # SQ4
+ fmul.x %fp0,%fp2 # SP3
+
+ fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
+ fadd.x TANP2(%pc),%fp2 # P2+SP3
+
+ fmul.x %fp0,%fp3 # S(Q3+SQ4)
+ fmul.x %fp0,%fp2 # S(P2+SP3)
+
+ fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
+ fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
+
+ fmul.x %fp0,%fp3 # S(Q2+S(Q3+SQ4))
+ fmul.x %fp0,%fp2 # S(P1+S(P2+SP3))
+
+ fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
+ fmul.x %fp1,%fp2 # RS(P1+S(P2+SP3))
+
+ fmul.x %fp3,%fp0 # S(Q1+S(Q2+S(Q3+SQ4)))
+
+ fadd.x %fp2,%fp1 # R+RS(P1+S(P2+SP3))
+ fadd.s &0x3F800000,%fp0 # 1+S(Q1+...)
+
+ fmovm.x (%sp)+,&0x30 # restore fp2,fp3
+
+ fmov.x %fp1,-(%sp)
+ eor.l &0x80000000,(%sp)
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fdiv.x (%sp)+,%fp0 # last inst - possible exception set
+ bra t_inx2
+
+TANBORS:
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+ cmp.l %d1,&0x3FFF8000
+ bgt.b REDUCEX
+
+TANSM:
+ fmov.x %fp0,-(%sp)
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x (%sp)+,%fp0 # last inst - posibble exception set
+ bra t_catch
+
+ global stand
+#--TAN(X) = X FOR DENORMALIZED X
+stand:
+ bra t_extdnrm
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+REDUCEX:
+ fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
+ mov.l %d2,-(%sp) # save d2
+ fmov.s &0x00000000,%fp1 # fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration. In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+ cmp.l %d1,&0x7ffeffff # is arg dangerously large?
+ bne.b LOOP # no
+
+# yes; create 2**16383*PI/2
+ mov.w &0x7ffe,FP_SCR0_EX(%a6)
+ mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+ mov.w &0x7fdc,FP_SCR1_EX(%a6)
+ mov.l &0x85a308d3,FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6)
+
+ ftest.x %fp0 # test sign of argument
+ fblt.w red_neg
+
+ or.b &0x80,FP_SCR0_EX(%a6) # positive arg
+ or.b &0x80,FP_SCR1_EX(%a6)
+red_neg:
+ fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
+ fmov.x %fp0,%fp1 # save high result in fp1
+ fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
+ fsub.x %fp0,%fp1 # determine low component of result
+ fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+LOOP:
+ fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
+ mov.w INARG(%a6),%d1
+ mov.l %d1,%a1 # save a copy of D0
+ and.l &0x00007FFF,%d1
+ sub.l &0x00003FFF,%d1 # d0 = K
+ cmp.l %d1,&28
+ ble.b LASTLOOP
+CONTLOOP:
+ sub.l &27,%d1 # d0 = L := K-27
+ mov.b &0,ENDFLAG(%a6)
+ bra.b WORK
+LASTLOOP:
+ clr.l %d1 # d0 = L := 0
+ mov.b &1,ENDFLAG(%a6)
+
+WORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
+#--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+ mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
+ sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
+
+ mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
+ mov.l &0x4E44152A,FP_SCR0_LO(%a6)
+ mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
+
+ fmov.x %fp0,%fp2
+ fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+ mov.l %a1,%d2
+ swap %d2
+ and.l &0x80000000,%d2
+ or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
+ mov.l %d2,TWOTO63(%a6)
+ fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
+ fsub.s TWOTO63(%a6),%fp2 # fp2 = N
+# fintrz.x %fp2,%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+ mov.l %d1,%d2 # d2 = L
+
+ add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
+ mov.w %d2,FP_SCR0_EX(%a6)
+ mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
+
+ add.l &0x00003FDD,%d1
+ mov.w %d1,FP_SCR1_EX(%a6)
+ mov.l &0x85A308D3,FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
+
+ mov.b ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+ fmov.x %fp2,%fp4 # fp4 = N
+ fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
+ fmov.x %fp2,%fp5 # fp5 = N
+ fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
+ fmov.x %fp4,%fp3 # fp3 = W = N*P1
+
+#--we want P+p = W+w but |p| <= half ulp of P
+#--Then, we need to compute A := R-P and a := r-p
+ fadd.x %fp5,%fp3 # fp3 = P
+ fsub.x %fp3,%fp4 # fp4 = W-P
+
+ fsub.x %fp3,%fp0 # fp0 = A := R - P
+ fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
+
+ fmov.x %fp0,%fp3 # fp3 = A
+ fsub.x %fp4,%fp1 # fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+ fadd.x %fp1,%fp0 # fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+ cmp.b %d1,&0
+ bgt.w RESTORE
+
+#--Need to calculate r
+ fsub.x %fp0,%fp3 # fp3 = A-R
+ fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
+ bra.w LOOP
+
+RESTORE:
+ fmov.l %fp2,INT(%a6)
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
+
+ mov.l INT(%a6),%d1
+ ror.l &1,%d1
+
+ bra.w TANCONT
+
+#########################################################################
+# satan(): computes the arctangent of a normalized number #
+# satand(): computes the arctangent of a denormalized number #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = arctan(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 2 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5. #
+# #
+# Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x. #
+# Note that k = -4, -3,..., or 3. #
+# Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5 #
+# significant bits of X with a bit-1 attached at the 6-th #
+# bit position. Define u to be u = (X-F) / (1 + X*F). #
+# #
+# Step 3. Approximate arctan(u) by a polynomial poly. #
+# #
+# Step 4. Return arctan(F) + poly, arctan(F) is fetched from a #
+# table of values calculated beforehand. Exit. #
+# #
+# Step 5. If |X| >= 16, go to Step 7. #
+# #
+# Step 6. Approximate arctan(X) by an odd polynomial in X. Exit. #
+# #
+# Step 7. Define X' = -1/X. Approximate arctan(X') by an odd #
+# polynomial in X'. #
+# Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit. #
+# #
+#########################################################################
+
+ATANA3: long 0xBFF6687E,0x314987D8
+ATANA2: long 0x4002AC69,0x34A26DB3
+ATANA1: long 0xBFC2476F,0x4E1DA28E
+
+ATANB6: long 0x3FB34444,0x7F876989
+ATANB5: long 0xBFB744EE,0x7FAF45DB
+ATANB4: long 0x3FBC71C6,0x46940220
+ATANB3: long 0xBFC24924,0x921872F9
+ATANB2: long 0x3FC99999,0x99998FA9
+ATANB1: long 0xBFD55555,0x55555555
+
+ATANC5: long 0xBFB70BF3,0x98539E6A
+ATANC4: long 0x3FBC7187,0x962D1D7D
+ATANC3: long 0xBFC24924,0x827107B8
+ATANC2: long 0x3FC99999,0x9996263E
+ATANC1: long 0xBFD55555,0x55555536
+
+PPIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+NPIBY2: long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+PTINY: long 0x00010000,0x80000000,0x00000000,0x00000000
+NTINY: long 0x80010000,0x80000000,0x00000000,0x00000000
+
+ATANTBL:
+ long 0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
+ long 0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
+ long 0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
+ long 0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
+ long 0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
+ long 0x3FFB0000,0xAB98E943,0x62765619,0x00000000
+ long 0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
+ long 0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
+ long 0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
+ long 0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
+ long 0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
+ long 0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
+ long 0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
+ long 0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
+ long 0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
+ long 0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
+ long 0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
+ long 0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
+ long 0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
+ long 0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
+ long 0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
+ long 0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
+ long 0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
+ long 0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
+ long 0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
+ long 0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
+ long 0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
+ long 0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
+ long 0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
+ long 0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
+ long 0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
+ long 0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
+ long 0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
+ long 0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
+ long 0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
+ long 0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
+ long 0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
+ long 0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
+ long 0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
+ long 0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
+ long 0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
+ long 0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
+ long 0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
+ long 0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
+ long 0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
+ long 0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
+ long 0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
+ long 0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
+ long 0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
+ long 0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
+ long 0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
+ long 0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
+ long 0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
+ long 0x3FFE0000,0x97731420,0x365E538C,0x00000000
+ long 0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
+ long 0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
+ long 0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
+ long 0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
+ long 0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
+ long 0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
+ long 0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
+ long 0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
+ long 0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
+ long 0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
+ long 0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
+ long 0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
+ long 0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
+ long 0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
+ long 0x3FFE0000,0xE8771129,0xC4353259,0x00000000
+ long 0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
+ long 0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
+ long 0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
+ long 0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
+ long 0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
+ long 0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
+ long 0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
+ long 0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
+ long 0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
+ long 0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
+ long 0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
+ long 0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
+ long 0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
+ long 0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
+ long 0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
+ long 0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
+ long 0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
+ long 0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
+ long 0x3FFF0000,0x9F100575,0x006CC571,0x00000000
+ long 0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
+ long 0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
+ long 0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
+ long 0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
+ long 0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
+ long 0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
+ long 0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
+ long 0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
+ long 0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
+ long 0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
+ long 0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
+ long 0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
+ long 0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
+ long 0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
+ long 0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
+ long 0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
+ long 0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
+ long 0x3FFF0000,0xB525529D,0x562246BD,0x00000000
+ long 0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
+ long 0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
+ long 0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
+ long 0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
+ long 0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
+ long 0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
+ long 0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
+ long 0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
+ long 0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
+ long 0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
+ long 0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
+ long 0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
+ long 0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
+ long 0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
+ long 0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
+ long 0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
+ long 0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
+ long 0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
+ long 0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
+ long 0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
+ long 0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
+ long 0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
+
+ set X,FP_SCR0
+ set XDCARE,X+2
+ set XFRAC,X+4
+ set XFRACLO,X+8
+
+ set ATANF,FP_SCR1
+ set ATANFHI,ATANF+4
+ set ATANFLO,ATANF+8
+
+ global satan
+#--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+satan:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ fmov.x %fp0,X(%a6)
+ and.l &0x7FFFFFFF,%d1
+
+ cmp.l %d1,&0x3FFB8000 # |X| >= 1/16?
+ bge.b ATANOK1
+ bra.w ATANSM
+
+ATANOK1:
+ cmp.l %d1,&0x4002FFFF # |X| < 16 ?
+ ble.b ATANMAIN
+ bra.w ATANBIG
+
+#--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
+#--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
+#--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
+#--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
+#--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
+#--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
+#--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
+#--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
+#--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
+#--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
+#--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
+#--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
+#--WILL INVOLVE A VERY LONG POLYNOMIAL.
+
+#--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
+#--WE CHOSE F TO BE +-2^K * 1.BBBB1
+#--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
+#--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
+#--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
+#-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
+
+ATANMAIN:
+
+ and.l &0xF8000000,XFRAC(%a6) # FIRST 5 BITS
+ or.l &0x04000000,XFRAC(%a6) # SET 6-TH BIT TO 1
+ mov.l &0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
+
+ fmov.x %fp0,%fp1 # FP1 IS X
+ fmul.x X(%a6),%fp1 # FP1 IS X*F, NOTE THAT X*F > 0
+ fsub.x X(%a6),%fp0 # FP0 IS X-F
+ fadd.s &0x3F800000,%fp1 # FP1 IS 1 + X*F
+ fdiv.x %fp1,%fp0 # FP0 IS U = (X-F)/(1+X*F)
+
+#--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
+#--CREATE ATAN(F) AND STORE IT IN ATANF, AND
+#--SAVE REGISTERS FP2.
+
+ mov.l %d2,-(%sp) # SAVE d2 TEMPORARILY
+ mov.l %d1,%d2 # THE EXP AND 16 BITS OF X
+ and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION
+ and.l &0x7FFF0000,%d2 # EXPONENT OF F
+ sub.l &0x3FFB0000,%d2 # K+4
+ asr.l &1,%d2
+ add.l %d2,%d1 # THE 7 BITS IDENTIFYING F
+ asr.l &7,%d1 # INDEX INTO TBL OF ATAN(|F|)
+ lea ATANTBL(%pc),%a1
+ add.l %d1,%a1 # ADDRESS OF ATAN(|F|)
+ mov.l (%a1)+,ATANF(%a6)
+ mov.l (%a1)+,ATANFHI(%a6)
+ mov.l (%a1)+,ATANFLO(%a6) # ATANF IS NOW ATAN(|F|)
+ mov.l X(%a6),%d1 # LOAD SIGN AND EXPO. AGAIN
+ and.l &0x80000000,%d1 # SIGN(F)
+ or.l %d1,ATANF(%a6) # ATANF IS NOW SIGN(F)*ATAN(|F|)
+ mov.l (%sp)+,%d2 # RESTORE d2
+
+#--THAT'S ALL I HAVE TO DO FOR NOW,
+#--BUT ALAS, THE DIVIDE IS STILL CRANKING!
+
+#--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
+#--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
+#--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
+#--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
+#--WHAT WE HAVE HERE IS MERELY A1 = A3, A2 = A1/A3, A3 = A2/A3.
+#--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
+#--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
+
+ fmovm.x &0x04,-(%sp) # save fp2
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1
+ fmov.d ATANA3(%pc),%fp2
+ fadd.x %fp1,%fp2 # A3+V
+ fmul.x %fp1,%fp2 # V*(A3+V)
+ fmul.x %fp0,%fp1 # U*V
+ fadd.d ATANA2(%pc),%fp2 # A2+V*(A3+V)
+ fmul.d ATANA1(%pc),%fp1 # A1*U*V
+ fmul.x %fp2,%fp1 # A1*U*V*(A2+V*(A3+V))
+ fadd.x %fp1,%fp0 # ATAN(U), FP1 RELEASED
+
+ fmovm.x (%sp)+,&0x20 # restore fp2
+
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ fadd.x ATANF(%a6),%fp0 # ATAN(X)
+ bra t_inx2
+
+ATANBORS:
+#--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
+#--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
+ cmp.l %d1,&0x3FFF8000
+ bgt.w ATANBIG # I.E. |X| >= 16
+
+ATANSM:
+#--|X| <= 1/16
+#--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
+#--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
+#--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
+#--WHERE Y = X*X, AND Z = Y*Y.
+
+ cmp.l %d1,&0x3FD78000
+ blt.w ATANTINY
+
+#--COMPUTE POLYNOMIAL
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmul.x %fp0,%fp0 # FPO IS Y = X*X
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
+
+ fmov.d ATANB6(%pc),%fp2
+ fmov.d ATANB5(%pc),%fp3
+
+ fmul.x %fp1,%fp2 # Z*B6
+ fmul.x %fp1,%fp3 # Z*B5
+
+ fadd.d ATANB4(%pc),%fp2 # B4+Z*B6
+ fadd.d ATANB3(%pc),%fp3 # B3+Z*B5
+
+ fmul.x %fp1,%fp2 # Z*(B4+Z*B6)
+ fmul.x %fp3,%fp1 # Z*(B3+Z*B5)
+
+ fadd.d ATANB2(%pc),%fp2 # B2+Z*(B4+Z*B6)
+ fadd.d ATANB1(%pc),%fp1 # B1+Z*(B3+Z*B5)
+
+ fmul.x %fp0,%fp2 # Y*(B2+Z*(B4+Z*B6))
+ fmul.x X(%a6),%fp0 # X*Y
+
+ fadd.x %fp2,%fp1 # [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
+
+ fmul.x %fp1,%fp0 # X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ fadd.x X(%a6),%fp0
+ bra t_inx2
+
+ATANTINY:
+#--|X| < 2^(-40), ATAN(X) = X
+
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x X(%a6),%fp0 # last inst - possible exception set
+
+ bra t_catch
+
+ATANBIG:
+#--IF |X| > 2^(100), RETURN SIGN(X)*(PI/2 - TINY). OTHERWISE,
+#--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
+ cmp.l %d1,&0x40638000
+ bgt.w ATANHUGE
+
+#--APPROXIMATE ATAN(-1/X) BY
+#--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
+#--THIS CAN BE RE-WRITTEN AS
+#--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
+
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmov.s &0xBF800000,%fp1 # LOAD -1
+ fdiv.x %fp0,%fp1 # FP1 IS -1/X
+
+#--DIVIDE IS STILL CRANKING
+
+ fmov.x %fp1,%fp0 # FP0 IS X'
+ fmul.x %fp0,%fp0 # FP0 IS Y = X'*X'
+ fmov.x %fp1,X(%a6) # X IS REALLY X'
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
+
+ fmov.d ATANC5(%pc),%fp3
+ fmov.d ATANC4(%pc),%fp2
+
+ fmul.x %fp1,%fp3 # Z*C5
+ fmul.x %fp1,%fp2 # Z*B4
+
+ fadd.d ATANC3(%pc),%fp3 # C3+Z*C5
+ fadd.d ATANC2(%pc),%fp2 # C2+Z*C4
+
+ fmul.x %fp3,%fp1 # Z*(C3+Z*C5), FP3 RELEASED
+ fmul.x %fp0,%fp2 # Y*(C2+Z*C4)
+
+ fadd.d ATANC1(%pc),%fp1 # C1+Z*(C3+Z*C5)
+ fmul.x X(%a6),%fp0 # X'*Y
+
+ fadd.x %fp2,%fp1 # [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
+
+ fmul.x %fp1,%fp0 # X'*Y*([B1+Z*(B3+Z*B5)]
+# ... +[Y*(B2+Z*(B4+Z*B6))])
+ fadd.x X(%a6),%fp0
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ tst.b (%a0)
+ bpl.b pos_big
+
+neg_big:
+ fadd.x NPIBY2(%pc),%fp0
+ bra t_minx2
+
+pos_big:
+ fadd.x PPIBY2(%pc),%fp0
+ bra t_pinx2
+
+ATANHUGE:
+#--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
+ tst.b (%a0)
+ bpl.b pos_huge
+
+neg_huge:
+ fmov.x NPIBY2(%pc),%fp0
+ fmov.l %d0,%fpcr
+ fadd.x PTINY(%pc),%fp0
+ bra t_minx2
+
+pos_huge:
+ fmov.x PPIBY2(%pc),%fp0
+ fmov.l %d0,%fpcr
+ fadd.x NTINY(%pc),%fp0
+ bra t_pinx2
+
+ global satand
+#--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
+satand:
+ bra t_extdnrm
+
+#########################################################################
+# sasin(): computes the inverse sine of a normalized input #
+# sasind(): computes the inverse sine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = arcsin(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# ASIN #
+# 1. If |X| >= 1, go to 3. #
+# #
+# 2. (|X| < 1) Calculate asin(X) by #
+# z := sqrt( [1-X][1+X] ) #
+# asin(X) = atan( x / z ). #
+# Exit. #
+# #
+# 3. If |X| > 1, go to 5. #
+# #
+# 4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
+# #
+# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
+# Exit. #
+# #
+#########################################################################
+
+ global sasin
+sasin:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x3FFF8000
+ bge.b ASINBIG
+
+# This catch is added here for the '060 QSP. Originally, the call to
+# satan() would handle this case by causing the exception which would
+# not be caught until gen_except(). Now, with the exceptions being
+# detected inside of satan(), the exception would have been handled there
+# instead of inside sasin() as expected.
+ cmp.l %d1,&0x3FD78000
+ blt.w ASINTINY
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
+
+ASINMAIN:
+ fmov.s &0x3F800000,%fp1
+ fsub.x %fp0,%fp1 # 1-X
+ fmovm.x &0x4,-(%sp) # {fp2}
+ fmov.s &0x3F800000,%fp2
+ fadd.x %fp0,%fp2 # 1+X
+ fmul.x %fp2,%fp1 # (1+X)(1-X)
+ fmovm.x (%sp)+,&0x20 # {fp2}
+ fsqrt.x %fp1 # SQRT([1-X][1+X])
+ fdiv.x %fp1,%fp0 # X/SQRT([1-X][1+X])
+ fmovm.x &0x01,-(%sp) # save X/SQRT(...)
+ lea (%sp),%a0 # pass ptr to X/SQRT(...)
+ bsr satan
+ add.l &0xc,%sp # clear X/SQRT(...) from stack
+ bra t_inx2
+
+ASINBIG:
+ fabs.x %fp0 # |X|
+ fcmp.s %fp0,&0x3F800000
+ fbgt t_operr # cause an operr exception
+
+#--|X| = 1, ASIN(X) = +- PI/2.
+ASINONE:
+ fmov.x PIBY2(%pc),%fp0
+ mov.l (%a0),%d1
+ and.l &0x80000000,%d1 # SIGN BIT OF X
+ or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT
+ mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT
+ fmov.l %d0,%fpcr
+ fmul.s (%sp)+,%fp0
+ bra t_inx2
+
+#--|X| < 2^(-40), ATAN(X) = X
+ASINTINY:
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x (%a0),%fp0 # last inst - possible exception
+ bra t_catch
+
+ global sasind
+#--ASIN(X) = X FOR DENORMALIZED X
+sasind:
+ bra t_extdnrm
+
+#########################################################################
+# sacos(): computes the inverse cosine of a normalized input #
+# sacosd(): computes the inverse cosine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = arccos(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# ACOS #
+# 1. If |X| >= 1, go to 3. #
+# #
+# 2. (|X| < 1) Calculate acos(X) by #
+# z := (1-X) / (1+X) #
+# acos(X) = 2 * atan( sqrt(z) ). #
+# Exit. #
+# #
+# 3. If |X| > 1, go to 5. #
+# #
+# 4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit. #
+# #
+# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
+# Exit. #
+# #
+#########################################################################
+
+ global sacos
+sacos:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1 # pack exp w/ upper 16 fraction
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x3FFF8000
+ bge.b ACOSBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ACOS(X) = 2 * ATAN( SQRT( (1-X)/(1+X) ) )
+
+ACOSMAIN:
+ fmov.s &0x3F800000,%fp1
+ fadd.x %fp0,%fp1 # 1+X
+ fneg.x %fp0 # -X
+ fadd.s &0x3F800000,%fp0 # 1-X
+ fdiv.x %fp1,%fp0 # (1-X)/(1+X)
+ fsqrt.x %fp0 # SQRT((1-X)/(1+X))
+ mov.l %d0,-(%sp) # save original users fpcr
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save SQRT(...) to stack
+ lea (%sp),%a0 # pass ptr to sqrt
+ bsr satan # ATAN(SQRT([1-X]/[1+X]))
+ add.l &0xc,%sp # clear SQRT(...) from stack
+
+ fmov.l (%sp)+,%fpcr # restore users round prec,mode
+ fadd.x %fp0,%fp0 # 2 * ATAN( STUFF )
+ bra t_pinx2
+
+ACOSBIG:
+ fabs.x %fp0
+ fcmp.s %fp0,&0x3F800000
+ fbgt t_operr # cause an operr exception
+
+#--|X| = 1, ACOS(X) = 0 OR PI
+ tst.b (%a0) # is X positive or negative?
+ bpl.b ACOSP1
+
+#--X = -1
+#Returns PI and inexact exception
+ACOSM1:
+ fmov.x PI(%pc),%fp0 # load PI
+ fmov.l %d0,%fpcr # load round mode,prec
+ fadd.s &0x00800000,%fp0 # add a small value
+ bra t_pinx2
+
+ACOSP1:
+ bra ld_pzero # answer is positive zero
+
+ global sacosd
+#--ACOS(X) = PI/2 FOR DENORMALIZED X
+sacosd:
+ fmov.l %d0,%fpcr # load user's rnd mode/prec
+ fmov.x PIBY2(%pc),%fp0
+ bra t_pinx2
+
+#########################################################################
+# setox(): computes the exponential for a normalized input #
+# setoxd(): computes the exponential for a denormalized input #
+# setoxm1(): computes the exponential minus 1 for a normalized input #
+# setoxm1d(): computes the exponential minus 1 for a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = exp(X) or exp(X)-1 #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 0.85 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM and IMPLEMENTATION **************************************** #
+# #
+# setoxd #
+# ------ #
+# Step 1. Set ans := 1.0 #
+# #
+# Step 2. Return ans := ans + sign(X)*2^(-126). Exit. #
+# Notes: This will always generate one exception -- inexact. #
+# #
+# #
+# setox #
+# ----- #
+# #
+# Step 1. Filter out extreme cases of input argument. #
+# 1.1 If |X| >= 2^(-65), go to Step 1.3. #
+# 1.2 Go to Step 7. #
+# 1.3 If |X| < 16380 log(2), go to Step 2. #
+# 1.4 Go to Step 8. #
+# Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
+# To avoid the use of floating-point comparisons, a #
+# compact representation of |X| is used. This format is a #
+# 32-bit integer, the upper (more significant) 16 bits #
+# are the sign and biased exponent field of |X|; the #
+# lower 16 bits are the 16 most significant fraction #
+# (including the explicit bit) bits of |X|. Consequently, #
+# the comparisons in Steps 1.1 and 1.3 can be performed #
+# by integer comparison. Note also that the constant #
+# 16380 log(2) used in Step 1.3 is also in the compact #
+# form. Thus taking the branch to Step 2 guarantees #
+# |X| < 16380 log(2). There is no harm to have a small #
+# number of cases where |X| is less than, but close to, #
+# 16380 log(2) and the branch to Step 9 is taken. #
+# #
+# Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
+# 2.1 Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
+# was taken) #
+# 2.2 N := round-to-nearest-integer( X * 64/log2 ). #
+# 2.3 Calculate J = N mod 64; so J = 0,1,2,..., #
+# or 63. #
+# 2.4 Calculate M = (N - J)/64; so N = 64M + J. #
+# 2.5 Calculate the address of the stored value of #
+# 2^(J/64). #
+# 2.6 Create the value Scale = 2^M. #
+# Notes: The calculation in 2.2 is really performed by #
+# Z := X * constant #
+# N := round-to-nearest-integer(Z) #
+# where #
+# constant := single-precision( 64/log 2 ). #
+# #
+# Using a single-precision constant avoids memory #
+# access. Another effect of using a single-precision #
+# "constant" is that the calculated value Z is #
+# #
+# Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24). #
+# #
+# This error has to be considered later in Steps 3 and 4. #
+# #
+# Step 3. Calculate X - N*log2/64. #
+# 3.1 R := X + N*L1, #
+# where L1 := single-precision(-log2/64). #
+# 3.2 R := R + N*L2, #
+# L2 := extended-precision(-log2/64 - L1).#
+# Notes: a) The way L1 and L2 are chosen ensures L1+L2 #
+# approximate the value -log2/64 to 88 bits of accuracy. #
+# b) N*L1 is exact because N is no longer than 22 bits #
+# and L1 is no longer than 24 bits. #
+# c) The calculation X+N*L1 is also exact due to #
+# cancellation. Thus, R is practically X+N(L1+L2) to full #
+# 64 bits. #
+# d) It is important to estimate how large can |R| be #
+# after Step 3.2. #
+# #
+# N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24) #
+# X*64/log2 (1+eps) = N + f, |f| <= 0.5 #
+# X*64/log2 - N = f - eps*X 64/log2 #
+# X - N*log2/64 = f*log2/64 - eps*X #
+# #
+# #
+# Now |X| <= 16446 log2, thus #
+# #
+# |X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64 #
+# <= 0.57 log2/64. #
+# This bound will be used in Step 4. #
+# #
+# Step 4. Approximate exp(R)-1 by a polynomial #
+# p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5)))) #
+# Notes: a) In order to reduce memory access, the coefficients #
+# are made as "short" as possible: A1 (which is 1/2), A4 #
+# and A5 are single precision; A2 and A3 are double #
+# precision. #
+# b) Even with the restrictions above, #
+# |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062. #
+# Note that 0.0062 is slightly bigger than 0.57 log2/64. #
+# c) To fully utilize the pipeline, p is separated into #
+# two independent pieces of roughly equal complexities #
+# p = [ R + R*S*(A2 + S*A4) ] + #
+# [ S*(A1 + S*(A3 + S*A5)) ] #
+# where S = R*R. #
+# #
+# Step 5. Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by #
+# ans := T + ( T*p + t) #
+# where T and t are the stored values for 2^(J/64). #
+# Notes: 2^(J/64) is stored as T and t where T+t approximates #
+# 2^(J/64) to roughly 85 bits; T is in extended precision #
+# and t is in single precision. Note also that T is #
+# rounded to 62 bits so that the last two bits of T are #
+# zero. The reason for such a special form is that T-1, #
+# T-2, and T-8 will all be exact --- a property that will #
+# give much more accurate computation of the function #
+# EXPM1. #
+# #
+# Step 6. Reconstruction of exp(X) #
+# exp(X) = 2^M * 2^(J/64) * exp(R). #
+# 6.1 If AdjFlag = 0, go to 6.3 #
+# 6.2 ans := ans * AdjScale #
+# 6.3 Restore the user FPCR #
+# 6.4 Return ans := ans * Scale. Exit. #
+# Notes: If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R, #
+# |M| <= 16380, and Scale = 2^M. Moreover, exp(X) will #
+# neither overflow nor underflow. If AdjFlag = 1, that #
+# means that #
+# X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380. #
+# Hence, exp(X) may overflow or underflow or neither. #
+# When that is the case, AdjScale = 2^(M1) where M1 is #
+# approximately M. Thus 6.2 will never cause #
+# over/underflow. Possible exception in 6.4 is overflow #
+# or underflow. The inexact exception is not generated in #
+# 6.4. Although one can argue that the inexact flag #
+# should always be raised, to simulate that exception #
+# cost to much than the flag is worth in practical uses. #
+# #
+# Step 7. Return 1 + X. #
+# 7.1 ans := X #
+# 7.2 Restore user FPCR. #
+# 7.3 Return ans := 1 + ans. Exit #
+# Notes: For non-zero X, the inexact exception will always be #
+# raised by 7.3. That is the only exception raised by 7.3.#
+# Note also that we use the FMOVEM instruction to move X #
+# in Step 7.1 to avoid unnecessary trapping. (Although #
+# the FMOVEM may not seem relevant since X is normalized, #
+# the precaution will be useful in the library version of #
+# this code where the separate entry for denormalized #
+# inputs will be done away with.) #
+# #
+# Step 8. Handle exp(X) where |X| >= 16380log2. #
+# 8.1 If |X| > 16480 log2, go to Step 9. #
+# (mimic 2.2 - 2.6) #
+# 8.2 N := round-to-integer( X * 64/log2 ) #
+# 8.3 Calculate J = N mod 64, J = 0,1,...,63 #
+# 8.4 K := (N-J)/64, M1 := truncate(K/2), M = K-M1, #
+# AdjFlag := 1. #
+# 8.5 Calculate the address of the stored value #
+# 2^(J/64). #
+# 8.6 Create the values Scale = 2^M, AdjScale = 2^M1. #
+# 8.7 Go to Step 3. #
+# Notes: Refer to notes for 2.2 - 2.6. #
+# #
+# Step 9. Handle exp(X), |X| > 16480 log2. #
+# 9.1 If X < 0, go to 9.3 #
+# 9.2 ans := Huge, go to 9.4 #
+# 9.3 ans := Tiny. #
+# 9.4 Restore user FPCR. #
+# 9.5 Return ans := ans * ans. Exit. #
+# Notes: Exp(X) will surely overflow or underflow, depending on #
+# X's sign. "Huge" and "Tiny" are respectively large/tiny #
+# extended-precision numbers whose square over/underflow #
+# with an inexact result. Thus, 9.5 always raises the #
+# inexact together with either overflow or underflow. #
+# #
+# setoxm1d #
+# -------- #
+# #
+# Step 1. Set ans := 0 #
+# #
+# Step 2. Return ans := X + ans. Exit. #
+# Notes: This will return X with the appropriate rounding #
+# precision prescribed by the user FPCR. #
+# #
+# setoxm1 #
+# ------- #
+# #
+# Step 1. Check |X| #
+# 1.1 If |X| >= 1/4, go to Step 1.3. #
+# 1.2 Go to Step 7. #
+# 1.3 If |X| < 70 log(2), go to Step 2. #
+# 1.4 Go to Step 10. #
+# Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
+# However, it is conceivable |X| can be small very often #
+# because EXPM1 is intended to evaluate exp(X)-1 #
+# accurately when |X| is small. For further details on #
+# the comparisons, see the notes on Step 1 of setox. #
+# #
+# Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
+# 2.1 N := round-to-nearest-integer( X * 64/log2 ). #
+# 2.2 Calculate J = N mod 64; so J = 0,1,2,..., #
+# or 63. #
+# 2.3 Calculate M = (N - J)/64; so N = 64M + J. #
+# 2.4 Calculate the address of the stored value of #
+# 2^(J/64). #
+# 2.5 Create the values Sc = 2^M and #
+# OnebySc := -2^(-M). #
+# Notes: See the notes on Step 2 of setox. #
+# #
+# Step 3. Calculate X - N*log2/64. #
+# 3.1 R := X + N*L1, #
+# where L1 := single-precision(-log2/64). #
+# 3.2 R := R + N*L2, #
+# L2 := extended-precision(-log2/64 - L1).#
+# Notes: Applying the analysis of Step 3 of setox in this case #
+# shows that |R| <= 0.0055 (note that |X| <= 70 log2 in #
+# this case). #
+# #
+# Step 4. Approximate exp(R)-1 by a polynomial #
+# p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6))))) #
+# Notes: a) In order to reduce memory access, the coefficients #
+# are made as "short" as possible: A1 (which is 1/2), A5 #
+# and A6 are single precision; A2, A3 and A4 are double #
+# precision. #
+# b) Even with the restriction above, #
+# |p - (exp(R)-1)| < |R| * 2^(-72.7) #
+# for all |R| <= 0.0055. #
+# c) To fully utilize the pipeline, p is separated into #
+# two independent pieces of roughly equal complexity #
+# p = [ R*S*(A2 + S*(A4 + S*A6)) ] + #
+# [ R + S*(A1 + S*(A3 + S*A5)) ] #
+# where S = R*R. #
+# #
+# Step 5. Compute 2^(J/64)*p by #
+# p := T*p #
+# where T and t are the stored values for 2^(J/64). #
+# Notes: 2^(J/64) is stored as T and t where T+t approximates #
+# 2^(J/64) to roughly 85 bits; T is in extended precision #
+# and t is in single precision. Note also that T is #
+# rounded to 62 bits so that the last two bits of T are #
+# zero. The reason for such a special form is that T-1, #
+# T-2, and T-8 will all be exact --- a property that will #
+# be exploited in Step 6 below. The total relative error #
+# in p is no bigger than 2^(-67.7) compared to the final #
+# result. #
+# #
+# Step 6. Reconstruction of exp(X)-1 #
+# exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ). #
+# 6.1 If M <= 63, go to Step 6.3. #
+# 6.2 ans := T + (p + (t + OnebySc)). Go to 6.6 #
+# 6.3 If M >= -3, go to 6.5. #
+# 6.4 ans := (T + (p + t)) + OnebySc. Go to 6.6 #
+# 6.5 ans := (T + OnebySc) + (p + t). #
+# 6.6 Restore user FPCR. #
+# 6.7 Return ans := Sc * ans. Exit. #
+# Notes: The various arrangements of the expressions give #
+# accurate evaluations. #
+# #
+# Step 7. exp(X)-1 for |X| < 1/4. #
+# 7.1 If |X| >= 2^(-65), go to Step 9. #
+# 7.2 Go to Step 8. #
+# #
+# Step 8. Calculate exp(X)-1, |X| < 2^(-65). #
+# 8.1 If |X| < 2^(-16312), goto 8.3 #
+# 8.2 Restore FPCR; return ans := X - 2^(-16382). #
+# Exit. #
+# 8.3 X := X * 2^(140). #
+# 8.4 Restore FPCR; ans := ans - 2^(-16382). #
+# Return ans := ans*2^(140). Exit #
+# Notes: The idea is to return "X - tiny" under the user #
+# precision and rounding modes. To avoid unnecessary #
+# inefficiency, we stay away from denormalized numbers #
+# the best we can. For |X| >= 2^(-16312), the #
+# straightforward 8.2 generates the inexact exception as #
+# the case warrants. #
+# #
+# Step 9. Calculate exp(X)-1, |X| < 1/4, by a polynomial #
+# p = X + X*X*(B1 + X*(B2 + ... + X*B12)) #
+# Notes: a) In order to reduce memory access, the coefficients #
+# are made as "short" as possible: B1 (which is 1/2), B9 #
+# to B12 are single precision; B3 to B8 are double #
+# precision; and B2 is double extended. #
+# b) Even with the restriction above, #
+# |p - (exp(X)-1)| < |X| 2^(-70.6) #
+# for all |X| <= 0.251. #
+# Note that 0.251 is slightly bigger than 1/4. #
+# c) To fully preserve accuracy, the polynomial is #
+# computed as #
+# X + ( S*B1 + Q ) where S = X*X and #
+# Q = X*S*(B2 + X*(B3 + ... + X*B12)) #
+# d) To fully utilize the pipeline, Q is separated into #
+# two independent pieces of roughly equal complexity #
+# Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] + #
+# [ S*S*(B3 + S*(B5 + ... + S*B11)) ] #
+# #
+# Step 10. Calculate exp(X)-1 for |X| >= 70 log 2. #
+# 10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all #
+# practical purposes. Therefore, go to Step 1 of setox. #
+# 10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical #
+# purposes. #
+# ans := -1 #
+# Restore user FPCR #
+# Return ans := ans + 2^(-126). Exit. #
+# Notes: 10.2 will always create an inexact and return -1 + tiny #
+# in the user rounding precision and mode. #
+# #
+#########################################################################
+
+L2: long 0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
+
+EEXPA3: long 0x3FA55555,0x55554CC1
+EEXPA2: long 0x3FC55555,0x55554A54
+
+EM1A4: long 0x3F811111,0x11174385
+EM1A3: long 0x3FA55555,0x55554F5A
+
+EM1A2: long 0x3FC55555,0x55555555,0x00000000,0x00000000
+
+EM1B8: long 0x3EC71DE3,0xA5774682
+EM1B7: long 0x3EFA01A0,0x19D7CB68
+
+EM1B6: long 0x3F2A01A0,0x1A019DF3
+EM1B5: long 0x3F56C16C,0x16C170E2
+
+EM1B4: long 0x3F811111,0x11111111
+EM1B3: long 0x3FA55555,0x55555555
+
+EM1B2: long 0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
+ long 0x00000000
+
+TWO140: long 0x48B00000,0x00000000
+TWON140:
+ long 0x37300000,0x00000000
+
+EEXPTBL:
+ long 0x3FFF0000,0x80000000,0x00000000,0x00000000
+ long 0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
+ long 0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
+ long 0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
+ long 0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
+ long 0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
+ long 0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
+ long 0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
+ long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
+ long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
+ long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
+ long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
+ long 0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
+ long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
+ long 0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
+ long 0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
+ long 0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
+ long 0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
+ long 0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
+ long 0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
+ long 0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
+ long 0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
+ long 0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
+ long 0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
+ long 0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
+ long 0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
+ long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
+ long 0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
+ long 0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
+ long 0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
+ long 0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
+ long 0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
+ long 0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
+ long 0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
+ long 0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
+ long 0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
+ long 0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
+ long 0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
+ long 0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
+ long 0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
+ long 0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
+ long 0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
+ long 0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
+ long 0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
+ long 0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
+ long 0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
+ long 0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
+ long 0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
+ long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
+ long 0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
+ long 0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
+ long 0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
+ long 0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
+ long 0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
+ long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
+ long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
+ long 0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
+ long 0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
+ long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
+ long 0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
+ long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
+ long 0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
+ long 0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
+ long 0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
+
+ set ADJFLAG,L_SCR2
+ set SCALE,FP_SCR0
+ set ADJSCALE,FP_SCR1
+ set SC,FP_SCR0
+ set ONEBYSC,FP_SCR1
+
+ global setox
+setox:
+#--entry point for EXP(X), here X is finite, non-zero, and not NaN's
+
+#--Step 1.
+ mov.l (%a0),%d1 # load part of input X
+ and.l &0x7FFF0000,%d1 # biased expo. of X
+ cmp.l %d1,&0x3FBE0000 # 2^(-65)
+ bge.b EXPC1 # normal case
+ bra EXPSM
+
+EXPC1:
+#--The case |X| >= 2^(-65)
+ mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
+ cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits
+ blt.b EXPMAIN # normal case
+ bra EEXPBIG
+
+EXPMAIN:
+#--Step 2.
+#--This is the normal branch: 2^(-65) <= |X| < 16380 log2.
+ fmov.x (%a0),%fp0 # load input from (a0)
+
+ fmov.x %fp0,%fp1
+ fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
+ fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
+ mov.l &0,ADJFLAG(%a6)
+ fmov.l %fp0,%d1 # N = int( X * 64/log2 )
+ lea EEXPTBL(%pc),%a1
+ fmov.l %d1,%fp0 # convert to floating-format
+
+ mov.l %d1,L_SCR1(%a6) # save N temporarily
+ and.l &0x3F,%d1 # D0 is J = N mod 64
+ lsl.l &4,%d1
+ add.l %d1,%a1 # address of 2^(J/64)
+ mov.l L_SCR1(%a6),%d1
+ asr.l &6,%d1 # D0 is M
+ add.w &0x3FFF,%d1 # biased expo. of 2^(M)
+ mov.w L2(%pc),L_SCR1(%a6) # prefetch L2, no need in CB
+
+EXPCONT1:
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
+ fmov.x %fp0,%fp2
+ fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
+ fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
+ fadd.x %fp1,%fp0 # X + N*L1
+ fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # fp1 IS S = R*R
+
+ fmov.s &0x3AB60B70,%fp2 # fp2 IS A5
+
+ fmul.x %fp1,%fp2 # fp2 IS S*A5
+ fmov.x %fp1,%fp3
+ fmul.s &0x3C088895,%fp3 # fp3 IS S*A4
+
+ fadd.d EEXPA3(%pc),%fp2 # fp2 IS A3+S*A5
+ fadd.d EEXPA2(%pc),%fp3 # fp3 IS A2+S*A4
+
+ fmul.x %fp1,%fp2 # fp2 IS S*(A3+S*A5)
+ mov.w %d1,SCALE(%a6) # SCALE is 2^(M) in extended
+ mov.l &0x80000000,SCALE+4(%a6)
+ clr.l SCALE+8(%a6)
+
+ fmul.x %fp1,%fp3 # fp3 IS S*(A2+S*A4)
+
+ fadd.s &0x3F000000,%fp2 # fp2 IS A1+S*(A3+S*A5)
+ fmul.x %fp0,%fp3 # fp3 IS R*S*(A2+S*A4)
+
+ fmul.x %fp1,%fp2 # fp2 IS S*(A1+S*(A3+S*A5))
+ fadd.x %fp3,%fp0 # fp0 IS R+R*S*(A2+S*A4),
+
+ fmov.x (%a1)+,%fp1 # fp1 is lead. pt. of 2^(J/64)
+ fadd.x %fp2,%fp0 # fp0 is EXP(R) - 1
+
+#--Step 5
+#--final reconstruction process
+#--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
+
+ fmul.x %fp1,%fp0 # 2^(J/64)*(Exp(R)-1)
+ fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
+ fadd.s (%a1),%fp0 # accurate 2^(J/64)
+
+ fadd.x %fp1,%fp0 # 2^(J/64) + 2^(J/64)*...
+ mov.l ADJFLAG(%a6),%d1
+
+#--Step 6
+ tst.l %d1
+ beq.b NORMAL
+ADJUST:
+ fmul.x ADJSCALE(%a6),%fp0
+NORMAL:
+ fmov.l %d0,%fpcr # restore user FPCR
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x SCALE(%a6),%fp0 # multiply 2^(M)
+ bra t_catch
+
+EXPSM:
+#--Step 7
+ fmovm.x (%a0),&0x80 # load X
+ fmov.l %d0,%fpcr
+ fadd.s &0x3F800000,%fp0 # 1+X in user mode
+ bra t_pinx2
+
+EEXPBIG:
+#--Step 8
+ cmp.l %d1,&0x400CB27C # 16480 log2
+ bgt.b EXP2BIG
+#--Steps 8.2 -- 8.6
+ fmov.x (%a0),%fp0 # load input from (a0)
+
+ fmov.x %fp0,%fp1
+ fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
+ fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
+ mov.l &1,ADJFLAG(%a6)
+ fmov.l %fp0,%d1 # N = int( X * 64/log2 )
+ lea EEXPTBL(%pc),%a1
+ fmov.l %d1,%fp0 # convert to floating-format
+ mov.l %d1,L_SCR1(%a6) # save N temporarily
+ and.l &0x3F,%d1 # D0 is J = N mod 64
+ lsl.l &4,%d1
+ add.l %d1,%a1 # address of 2^(J/64)
+ mov.l L_SCR1(%a6),%d1
+ asr.l &6,%d1 # D0 is K
+ mov.l %d1,L_SCR1(%a6) # save K temporarily
+ asr.l &1,%d1 # D0 is M1
+ sub.l %d1,L_SCR1(%a6) # a1 is M
+ add.w &0x3FFF,%d1 # biased expo. of 2^(M1)
+ mov.w %d1,ADJSCALE(%a6) # ADJSCALE := 2^(M1)
+ mov.l &0x80000000,ADJSCALE+4(%a6)
+ clr.l ADJSCALE+8(%a6)
+ mov.l L_SCR1(%a6),%d1 # D0 is M
+ add.w &0x3FFF,%d1 # biased expo. of 2^(M)
+ bra.w EXPCONT1 # go back to Step 3
+
+EXP2BIG:
+#--Step 9
+ tst.b (%a0) # is X positive or negative?
+ bmi t_unfl2
+ bra t_ovfl2
+
+ global setoxd
+setoxd:
+#--entry point for EXP(X), X is denormalized
+ mov.l (%a0),-(%sp)
+ andi.l &0x80000000,(%sp)
+ ori.l &0x00800000,(%sp) # sign(X)*2^(-126)
+
+ fmov.s &0x3F800000,%fp0
+
+ fmov.l %d0,%fpcr
+ fadd.s (%sp)+,%fp0
+ bra t_pinx2
+
+ global setoxm1
+setoxm1:
+#--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
+
+#--Step 1.
+#--Step 1.1
+ mov.l (%a0),%d1 # load part of input X
+ and.l &0x7FFF0000,%d1 # biased expo. of X
+ cmp.l %d1,&0x3FFD0000 # 1/4
+ bge.b EM1CON1 # |X| >= 1/4
+ bra EM1SM
+
+EM1CON1:
+#--Step 1.3
+#--The case |X| >= 1/4
+ mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
+ cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits
+ ble.b EM1MAIN # 1/4 <= |X| <= 70log2
+ bra EM1BIG
+
+EM1MAIN:
+#--Step 2.
+#--This is the case: 1/4 <= |X| <= 70 log2.
+ fmov.x (%a0),%fp0 # load input from (a0)
+
+ fmov.x %fp0,%fp1
+ fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
+ fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
+ fmov.l %fp0,%d1 # N = int( X * 64/log2 )
+ lea EEXPTBL(%pc),%a1
+ fmov.l %d1,%fp0 # convert to floating-format
+
+ mov.l %d1,L_SCR1(%a6) # save N temporarily
+ and.l &0x3F,%d1 # D0 is J = N mod 64
+ lsl.l &4,%d1
+ add.l %d1,%a1 # address of 2^(J/64)
+ mov.l L_SCR1(%a6),%d1
+ asr.l &6,%d1 # D0 is M
+ mov.l %d1,L_SCR1(%a6) # save a copy of M
+
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 and a1 both contain M
+ fmov.x %fp0,%fp2
+ fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
+ fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
+ fadd.x %fp1,%fp0 # X + N*L1
+ fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
+ add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # fp1 IS S = R*R
+
+ fmov.s &0x3950097B,%fp2 # fp2 IS a6
+
+ fmul.x %fp1,%fp2 # fp2 IS S*A6
+ fmov.x %fp1,%fp3
+ fmul.s &0x3AB60B6A,%fp3 # fp3 IS S*A5
+
+ fadd.d EM1A4(%pc),%fp2 # fp2 IS A4+S*A6
+ fadd.d EM1A3(%pc),%fp3 # fp3 IS A3+S*A5
+ mov.w %d1,SC(%a6) # SC is 2^(M) in extended
+ mov.l &0x80000000,SC+4(%a6)
+ clr.l SC+8(%a6)
+
+ fmul.x %fp1,%fp2 # fp2 IS S*(A4+S*A6)
+ mov.l L_SCR1(%a6),%d1 # D0 is M
+ neg.w %d1 # D0 is -M
+ fmul.x %fp1,%fp3 # fp3 IS S*(A3+S*A5)
+ add.w &0x3FFF,%d1 # biased expo. of 2^(-M)
+ fadd.d EM1A2(%pc),%fp2 # fp2 IS A2+S*(A4+S*A6)
+ fadd.s &0x3F000000,%fp3 # fp3 IS A1+S*(A3+S*A5)
+
+ fmul.x %fp1,%fp2 # fp2 IS S*(A2+S*(A4+S*A6))
+ or.w &0x8000,%d1 # signed/expo. of -2^(-M)
+ mov.w %d1,ONEBYSC(%a6) # OnebySc is -2^(-M)
+ mov.l &0x80000000,ONEBYSC+4(%a6)
+ clr.l ONEBYSC+8(%a6)
+ fmul.x %fp3,%fp1 # fp1 IS S*(A1+S*(A3+S*A5))
+
+ fmul.x %fp0,%fp2 # fp2 IS R*S*(A2+S*(A4+S*A6))
+ fadd.x %fp1,%fp0 # fp0 IS R+S*(A1+S*(A3+S*A5))
+
+ fadd.x %fp2,%fp0 # fp0 IS EXP(R)-1
+
+ fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
+
+#--Step 5
+#--Compute 2^(J/64)*p
+
+ fmul.x (%a1),%fp0 # 2^(J/64)*(Exp(R)-1)
+
+#--Step 6
+#--Step 6.1
+ mov.l L_SCR1(%a6),%d1 # retrieve M
+ cmp.l %d1,&63
+ ble.b MLE63
+#--Step 6.2 M >= 64
+ fmov.s 12(%a1),%fp1 # fp1 is t
+ fadd.x ONEBYSC(%a6),%fp1 # fp1 is t+OnebySc
+ fadd.x %fp1,%fp0 # p+(t+OnebySc), fp1 released
+ fadd.x (%a1),%fp0 # T+(p+(t+OnebySc))
+ bra EM1SCALE
+MLE63:
+#--Step 6.3 M <= 63
+ cmp.l %d1,&-3
+ bge.b MGEN3
+MLTN3:
+#--Step 6.4 M <= -4
+ fadd.s 12(%a1),%fp0 # p+t
+ fadd.x (%a1),%fp0 # T+(p+t)
+ fadd.x ONEBYSC(%a6),%fp0 # OnebySc + (T+(p+t))
+ bra EM1SCALE
+MGEN3:
+#--Step 6.5 -3 <= M <= 63
+ fmov.x (%a1)+,%fp1 # fp1 is T
+ fadd.s (%a1),%fp0 # fp0 is p+t
+ fadd.x ONEBYSC(%a6),%fp1 # fp1 is T+OnebySc
+ fadd.x %fp1,%fp0 # (T+OnebySc)+(p+t)
+
+EM1SCALE:
+#--Step 6.6
+ fmov.l %d0,%fpcr
+ fmul.x SC(%a6),%fp0
+ bra t_inx2
+
+EM1SM:
+#--Step 7 |X| < 1/4.
+ cmp.l %d1,&0x3FBE0000 # 2^(-65)
+ bge.b EM1POLY
+
+EM1TINY:
+#--Step 8 |X| < 2^(-65)
+ cmp.l %d1,&0x00330000 # 2^(-16312)
+ blt.b EM12TINY
+#--Step 8.2
+ mov.l &0x80010000,SC(%a6) # SC is -2^(-16382)
+ mov.l &0x80000000,SC+4(%a6)
+ clr.l SC+8(%a6)
+ fmov.x (%a0),%fp0
+ fmov.l %d0,%fpcr
+ mov.b &FADD_OP,%d1 # last inst is ADD
+ fadd.x SC(%a6),%fp0
+ bra t_catch
+
+EM12TINY:
+#--Step 8.3
+ fmov.x (%a0),%fp0
+ fmul.d TWO140(%pc),%fp0
+ mov.l &0x80010000,SC(%a6)
+ mov.l &0x80000000,SC+4(%a6)
+ clr.l SC+8(%a6)
+ fadd.x SC(%a6),%fp0
+ fmov.l %d0,%fpcr
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.d TWON140(%pc),%fp0
+ bra t_catch
+
+EM1POLY:
+#--Step 9 exp(X)-1 by a simple polynomial
+ fmov.x (%a0),%fp0 # fp0 is X
+ fmul.x %fp0,%fp0 # fp0 is S := X*X
+ fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
+ fmov.s &0x2F30CAA8,%fp1 # fp1 is B12
+ fmul.x %fp0,%fp1 # fp1 is S*B12
+ fmov.s &0x310F8290,%fp2 # fp2 is B11
+ fadd.s &0x32D73220,%fp1 # fp1 is B10+S*B12
+
+ fmul.x %fp0,%fp2 # fp2 is S*B11
+ fmul.x %fp0,%fp1 # fp1 is S*(B10 + ...
+
+ fadd.s &0x3493F281,%fp2 # fp2 is B9+S*...
+ fadd.d EM1B8(%pc),%fp1 # fp1 is B8+S*...
+
+ fmul.x %fp0,%fp2 # fp2 is S*(B9+...
+ fmul.x %fp0,%fp1 # fp1 is S*(B8+...
+
+ fadd.d EM1B7(%pc),%fp2 # fp2 is B7+S*...
+ fadd.d EM1B6(%pc),%fp1 # fp1 is B6+S*...
+
+ fmul.x %fp0,%fp2 # fp2 is S*(B7+...
+ fmul.x %fp0,%fp1 # fp1 is S*(B6+...
+
+ fadd.d EM1B5(%pc),%fp2 # fp2 is B5+S*...
+ fadd.d EM1B4(%pc),%fp1 # fp1 is B4+S*...
+
+ fmul.x %fp0,%fp2 # fp2 is S*(B5+...
+ fmul.x %fp0,%fp1 # fp1 is S*(B4+...
+
+ fadd.d EM1B3(%pc),%fp2 # fp2 is B3+S*...
+ fadd.x EM1B2(%pc),%fp1 # fp1 is B2+S*...
+
+ fmul.x %fp0,%fp2 # fp2 is S*(B3+...
+ fmul.x %fp0,%fp1 # fp1 is S*(B2+...
+
+ fmul.x %fp0,%fp2 # fp2 is S*S*(B3+...)
+ fmul.x (%a0),%fp1 # fp1 is X*S*(B2...
+
+ fmul.s &0x3F000000,%fp0 # fp0 is S*B1
+ fadd.x %fp2,%fp1 # fp1 is Q
+
+ fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
+
+ fadd.x %fp1,%fp0 # fp0 is S*B1+Q
+
+ fmov.l %d0,%fpcr
+ fadd.x (%a0),%fp0
+ bra t_inx2
+
+EM1BIG:
+#--Step 10 |X| > 70 log2
+ mov.l (%a0),%d1
+ cmp.l %d1,&0
+ bgt.w EXPC1
+#--Step 10.2
+ fmov.s &0xBF800000,%fp0 # fp0 is -1
+ fmov.l %d0,%fpcr
+ fadd.s &0x00800000,%fp0 # -1 + 2^(-126)
+ bra t_minx2
+
+ global setoxm1d
+setoxm1d:
+#--entry point for EXPM1(X), here X is denormalized
+#--Step 0.
+ bra t_extdnrm
+
+#########################################################################
+# sgetexp(): returns the exponent portion of the input argument. #
+# The exponent bias is removed and the exponent value is #
+# returned as an extended precision number in fp0. #
+# sgetexpd(): handles denormalized numbers. #
+# #
+# sgetman(): extracts the mantissa of the input argument. The #
+# mantissa is converted to an extended precision number w/ #
+# an exponent of $3fff and is returned in fp0. The range of #
+# the result is [1.0 - 2.0). #
+# sgetmand(): handles denormalized numbers. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = exponent(X) or mantissa(X) #
+# #
+#########################################################################
+
+ global sgetexp
+sgetexp:
+ mov.w SRC_EX(%a0),%d0 # get the exponent
+ bclr &0xf,%d0 # clear the sign bit
+ subi.w &0x3fff,%d0 # subtract off the bias
+ fmov.w %d0,%fp0 # return exp in fp0
+ blt.b sgetexpn # it's negative
+ rts
+
+sgetexpn:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+ global sgetexpd
+sgetexpd:
+ bsr.l norm # normalize
+ neg.w %d0 # new exp = -(shft amt)
+ subi.w &0x3fff,%d0 # subtract off the bias
+ fmov.w %d0,%fp0 # return exp in fp0
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+ global sgetman
+sgetman:
+ mov.w SRC_EX(%a0),%d0 # get the exp
+ ori.w &0x7fff,%d0 # clear old exp
+ bclr &0xe,%d0 # make it the new exp +-3fff
+
+# here, we build the result in a tmp location so as not to disturb the input
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmov.x FP_SCR0(%a6),%fp0 # put new value back in fp0
+ bmi.b sgetmann # it's negative
+ rts
+
+sgetmann:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+#
+# For denormalized numbers, shift the mantissa until the j-bit = 1,
+# then load the exponent with +/1 $3fff.
+#
+ global sgetmand
+sgetmand:
+ bsr.l norm # normalize exponent
+ bra.b sgetman
+
+#########################################################################
+# scosh(): computes the hyperbolic cosine of a normalized input #
+# scoshd(): computes the hyperbolic cosine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = cosh(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# COSH #
+# 1. If |X| > 16380 log2, go to 3. #
+# #
+# 2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae #
+# y = |X|, z = exp(Y), and #
+# cosh(X) = (1/2)*( z + 1/z ). #
+# Exit. #
+# #
+# 3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5. #
+# #
+# 4. (16380 log2 < |X| <= 16480 log2) #
+# cosh(X) = sign(X) * exp(|X|)/2. #
+# However, invoking exp(|X|) may cause premature #
+# overflow. Thus, we calculate sinh(X) as follows: #
+# Y := |X| #
+# Fact := 2**(16380) #
+# Y' := Y - 16381 log2 #
+# cosh(X) := Fact * exp(Y'). #
+# Exit. #
+# #
+# 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
+# Huge*Huge to generate overflow and an infinity with #
+# the appropriate sign. Huge is the largest finite number #
+# in extended format. Exit. #
+# #
+#########################################################################
+
+TWO16380:
+ long 0x7FFB0000,0x80000000,0x00000000,0x00000000
+
+ global scosh
+scosh:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x400CB167
+ bgt.b COSHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
+
+ fabs.x %fp0 # |X|
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save |X| to stack
+ lea (%sp),%a0 # pass ptr to |X|
+ bsr setox # FP0 IS EXP(|X|)
+ add.l &0xc,%sp # erase |X| from stack
+ fmul.s &0x3F000000,%fp0 # (1/2)EXP(|X|)
+ mov.l (%sp)+,%d0
+
+ fmov.s &0x3E800000,%fp1 # (1/4)
+ fdiv.x %fp0,%fp1 # 1/(2 EXP(|X|))
+
+ fmov.l %d0,%fpcr
+ mov.b &FADD_OP,%d1 # last inst is ADD
+ fadd.x %fp1,%fp0
+ bra t_catch
+
+COSHBIG:
+ cmp.l %d1,&0x400CB2B3
+ bgt.b COSHHUGE
+
+ fabs.x %fp0
+ fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
+ fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save fp0 to stack
+ lea (%sp),%a0 # pass ptr to fp0
+ bsr setox
+ add.l &0xc,%sp # clear fp0 from stack
+ mov.l (%sp)+,%d0
+
+ fmov.l %d0,%fpcr
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x TWO16380(%pc),%fp0
+ bra t_catch
+
+COSHHUGE:
+ bra t_ovfl2
+
+ global scoshd
+#--COSH(X) = 1 FOR DENORMALIZED X
+scoshd:
+ fmov.s &0x3F800000,%fp0
+
+ fmov.l %d0,%fpcr
+ fadd.s &0x00800000,%fp0
+ bra t_pinx2
+
+#########################################################################
+# ssinh(): computes the hyperbolic sine of a normalized input #
+# ssinhd(): computes the hyperbolic sine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = sinh(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# SINH #
+# 1. If |X| > 16380 log2, go to 3. #
+# #
+# 2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula #
+# y = |X|, sgn = sign(X), and z = expm1(Y), #
+# sinh(X) = sgn*(1/2)*( z + z/(1+z) ). #
+# Exit. #
+# #
+# 3. If |X| > 16480 log2, go to 5. #
+# #
+# 4. (16380 log2 < |X| <= 16480 log2) #
+# sinh(X) = sign(X) * exp(|X|)/2. #
+# However, invoking exp(|X|) may cause premature overflow. #
+# Thus, we calculate sinh(X) as follows: #
+# Y := |X| #
+# sgn := sign(X) #
+# sgnFact := sgn * 2**(16380) #
+# Y' := Y - 16381 log2 #
+# sinh(X) := sgnFact * exp(Y'). #
+# Exit. #
+# #
+# 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
+# sign(X)*Huge*Huge to generate overflow and an infinity with #
+# the appropriate sign. Huge is the largest finite number in #
+# extended format. Exit. #
+# #
+#########################################################################
+
+ global ssinh
+ssinh:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ mov.l %d1,%a1 # save (compacted) operand
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x400CB167
+ bgt.b SINHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
+
+ fabs.x %fp0 # Y = |X|
+
+ movm.l &0x8040,-(%sp) # {a1/d0}
+ fmovm.x &0x01,-(%sp) # save Y on stack
+ lea (%sp),%a0 # pass ptr to Y
+ clr.l %d0
+ bsr setoxm1 # FP0 IS Z = EXPM1(Y)
+ add.l &0xc,%sp # clear Y from stack
+ fmov.l &0,%fpcr
+ movm.l (%sp)+,&0x0201 # {a1/d0}
+
+ fmov.x %fp0,%fp1
+ fadd.s &0x3F800000,%fp1 # 1+Z
+ fmov.x %fp0,-(%sp)
+ fdiv.x %fp1,%fp0 # Z/(1+Z)
+ mov.l %a1,%d1
+ and.l &0x80000000,%d1
+ or.l &0x3F000000,%d1
+ fadd.x (%sp)+,%fp0
+ mov.l %d1,-(%sp)
+
+ fmov.l %d0,%fpcr
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.s (%sp)+,%fp0 # last fp inst - possible exceptions set
+ bra t_catch
+
+SINHBIG:
+ cmp.l %d1,&0x400CB2B3
+ bgt t_ovfl
+ fabs.x %fp0
+ fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
+ mov.l &0,-(%sp)
+ mov.l &0x80000000,-(%sp)
+ mov.l %a1,%d1
+ and.l &0x80000000,%d1
+ or.l &0x7FFB0000,%d1
+ mov.l %d1,-(%sp) # EXTENDED FMT
+ fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save fp0 on stack
+ lea (%sp),%a0 # pass ptr to fp0
+ bsr setox
+ add.l &0xc,%sp # clear fp0 from stack
+
+ mov.l (%sp)+,%d0
+ fmov.l %d0,%fpcr
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x (%sp)+,%fp0 # possible exception
+ bra t_catch
+
+ global ssinhd
+#--SINH(X) = X FOR DENORMALIZED X
+ssinhd:
+ bra t_extdnrm
+
+#########################################################################
+# stanh(): computes the hyperbolic tangent of a normalized input #
+# stanhd(): computes the hyperbolic tangent of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = tanh(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# TANH #
+# 1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3. #
+# #
+# 2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by #
+# sgn := sign(X), y := 2|X|, z := expm1(Y), and #
+# tanh(X) = sgn*( z/(2+z) ). #
+# Exit. #
+# #
+# 3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1, #
+# go to 7. #
+# #
+# 4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6. #
+# #
+# 5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by #
+# sgn := sign(X), y := 2|X|, z := exp(Y), #
+# tanh(X) = sgn - [ sgn*2/(1+z) ]. #
+# Exit. #
+# #
+# 6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we #
+# calculate Tanh(X) by #
+# sgn := sign(X), Tiny := 2**(-126), #
+# tanh(X) := sgn - sgn*Tiny. #
+# Exit. #
+# #
+# 7. (|X| < 2**(-40)). Tanh(X) = X. Exit. #
+# #
+#########################################################################
+
+ set X,FP_SCR0
+ set XFRAC,X+4
+
+ set SGN,L_SCR3
+
+ set V,FP_SCR0
+
+ global stanh
+stanh:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ fmov.x %fp0,X(%a6)
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ mov.l %d1,X(%a6)
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)?
+ blt.w TANHBORS # yes
+ cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2?
+ bgt.w TANHBORS # yes
+
+#--THIS IS THE USUAL CASE
+#--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
+
+ mov.l X(%a6),%d1
+ mov.l %d1,SGN(%a6)
+ and.l &0x7FFF0000,%d1
+ add.l &0x00010000,%d1 # EXPONENT OF 2|X|
+ mov.l %d1,X(%a6)
+ and.l &0x80000000,SGN(%a6)
+ fmov.x X(%a6),%fp0 # FP0 IS Y = 2|X|
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x1,-(%sp) # save Y on stack
+ lea (%sp),%a0 # pass ptr to Y
+ bsr setoxm1 # FP0 IS Z = EXPM1(Y)
+ add.l &0xc,%sp # clear Y from stack
+ mov.l (%sp)+,%d0
+
+ fmov.x %fp0,%fp1
+ fadd.s &0x40000000,%fp1 # Z+2
+ mov.l SGN(%a6),%d1
+ fmov.x %fp1,V(%a6)
+ eor.l %d1,V(%a6)
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ fdiv.x V(%a6),%fp0
+ bra t_inx2
+
+TANHBORS:
+ cmp.l %d1,&0x3FFF8000
+ blt.w TANHSM
+
+ cmp.l %d1,&0x40048AA1
+ bgt.w TANHHUGE
+
+#-- (5/2) LOG2 < |X| < 50 LOG2,
+#--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
+#--TANH(X) = SGN - SGN*2/[EXP(Y)+1].
+
+ mov.l X(%a6),%d1
+ mov.l %d1,SGN(%a6)
+ and.l &0x7FFF0000,%d1
+ add.l &0x00010000,%d1 # EXPO OF 2|X|
+ mov.l %d1,X(%a6) # Y = 2|X|
+ and.l &0x80000000,SGN(%a6)
+ mov.l SGN(%a6),%d1
+ fmov.x X(%a6),%fp0 # Y = 2|X|
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save Y on stack
+ lea (%sp),%a0 # pass ptr to Y
+ bsr setox # FP0 IS EXP(Y)
+ add.l &0xc,%sp # clear Y from stack
+ mov.l (%sp)+,%d0
+ mov.l SGN(%a6),%d1
+ fadd.s &0x3F800000,%fp0 # EXP(Y)+1
+
+ eor.l &0xC0000000,%d1 # -SIGN(X)*2
+ fmov.s %d1,%fp1 # -SIGN(X)*2 IN SGL FMT
+ fdiv.x %fp0,%fp1 # -SIGN(X)2 / [EXP(Y)+1 ]
+
+ mov.l SGN(%a6),%d1
+ or.l &0x3F800000,%d1 # SGN
+ fmov.s %d1,%fp0 # SGN IN SGL FMT
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ mov.b &FADD_OP,%d1 # last inst is ADD
+ fadd.x %fp1,%fp0
+ bra t_inx2
+
+TANHSM:
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x X(%a6),%fp0 # last inst - possible exception set
+ bra t_catch
+
+#---RETURN SGN(X) - SGN(X)EPS
+TANHHUGE:
+ mov.l X(%a6),%d1
+ and.l &0x80000000,%d1
+ or.l &0x3F800000,%d1
+ fmov.s %d1,%fp0
+ and.l &0x80000000,%d1
+ eor.l &0x80800000,%d1 # -SIGN(X)*EPS
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ fadd.s %d1,%fp0
+ bra t_inx2
+
+ global stanhd
+#--TANH(X) = X FOR DENORMALIZED X
+stanhd:
+ bra t_extdnrm
+
+#########################################################################
+# slogn(): computes the natural logarithm of a normalized input #
+# slognd(): computes the natural logarithm of a denormalized input #
+# slognp1(): computes the log(1+X) of a normalized input #
+# slognp1d(): computes the log(1+X) of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = log(X) or log(1+X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 2 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# LOGN: #
+# Step 1. If |X-1| < 1/16, approximate log(X) by an odd #
+# polynomial in u, where u = 2(X-1)/(X+1). Otherwise, #
+# move on to Step 2. #
+# #
+# Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first #
+# seven significant bits of Y plus 2**(-7), i.e. #
+# F = 1.xxxxxx1 in base 2 where the six "x" match those #
+# of Y. Note that |Y-F| <= 2**(-7). #
+# #
+# Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a #
+# polynomial in u, log(1+u) = poly. #
+# #
+# Step 4. Reconstruct #
+# log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u) #
+# by k*log(2) + (log(F) + poly). The values of log(F) are #
+# calculated beforehand and stored in the program. #
+# #
+# lognp1: #
+# Step 1: If |X| < 1/16, approximate log(1+X) by an odd #
+# polynomial in u where u = 2X/(2+X). Otherwise, move on #
+# to Step 2. #
+# #
+# Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done #
+# in Step 2 of the algorithm for LOGN and compute #
+# log(1+X) as k*log(2) + log(F) + poly where poly #
+# approximates log(1+u), u = (Y-F)/F. #
+# #
+# Implementation Notes: #
+# Note 1. There are 64 different possible values for F, thus 64 #
+# log(F)'s need to be tabulated. Moreover, the values of #
+# 1/F are also tabulated so that the division in (Y-F)/F #
+# can be performed by a multiplication. #
+# #
+# Note 2. In Step 2 of lognp1, in order to preserved accuracy, #
+# the value Y-F has to be calculated carefully when #
+# 1/2 <= X < 3/2. #
+# #
+# Note 3. To fully exploit the pipeline, polynomials are usually #
+# separated into two parts evaluated independently before #
+# being added up. #
+# #
+#########################################################################
+LOGOF2:
+ long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+one:
+ long 0x3F800000
+zero:
+ long 0x00000000
+infty:
+ long 0x7F800000
+negone:
+ long 0xBF800000
+
+LOGA6:
+ long 0x3FC2499A,0xB5E4040B
+LOGA5:
+ long 0xBFC555B5,0x848CB7DB
+
+LOGA4:
+ long 0x3FC99999,0x987D8730
+LOGA3:
+ long 0xBFCFFFFF,0xFF6F7E97
+
+LOGA2:
+ long 0x3FD55555,0x555555A4
+LOGA1:
+ long 0xBFE00000,0x00000008
+
+LOGB5:
+ long 0x3F175496,0xADD7DAD6
+LOGB4:
+ long 0x3F3C71C2,0xFE80C7E0
+
+LOGB3:
+ long 0x3F624924,0x928BCCFF
+LOGB2:
+ long 0x3F899999,0x999995EC
+
+LOGB1:
+ long 0x3FB55555,0x55555555
+TWO:
+ long 0x40000000,0x00000000
+
+LTHOLD:
+ long 0x3f990000,0x80000000,0x00000000,0x00000000
+
+LOGTBL:
+ long 0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
+ long 0x3FF70000,0xFF015358,0x833C47E2,0x00000000
+ long 0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
+ long 0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
+ long 0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
+ long 0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
+ long 0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
+ long 0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
+ long 0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
+ long 0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
+ long 0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
+ long 0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
+ long 0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
+ long 0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
+ long 0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
+ long 0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
+ long 0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
+ long 0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
+ long 0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
+ long 0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
+ long 0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
+ long 0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
+ long 0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
+ long 0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
+ long 0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
+ long 0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
+ long 0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
+ long 0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
+ long 0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
+ long 0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
+ long 0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
+ long 0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
+ long 0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
+ long 0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
+ long 0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
+ long 0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
+ long 0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
+ long 0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
+ long 0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
+ long 0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
+ long 0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
+ long 0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
+ long 0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
+ long 0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
+ long 0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
+ long 0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
+ long 0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
+ long 0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
+ long 0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
+ long 0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
+ long 0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
+ long 0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
+ long 0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
+ long 0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
+ long 0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
+ long 0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
+ long 0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
+ long 0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
+ long 0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
+ long 0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
+ long 0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
+ long 0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
+ long 0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
+ long 0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
+ long 0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
+ long 0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
+ long 0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
+ long 0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
+ long 0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
+ long 0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
+ long 0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
+ long 0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
+ long 0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
+ long 0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
+ long 0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
+ long 0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
+ long 0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
+ long 0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
+ long 0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
+ long 0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
+ long 0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
+ long 0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
+ long 0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
+ long 0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
+ long 0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
+ long 0x3FFE0000,0x825EFCED,0x49369330,0x00000000
+ long 0x3FFE0000,0x9868C809,0x868C8098,0x00000000
+ long 0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
+ long 0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
+ long 0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
+ long 0x3FFE0000,0x95A02568,0x095A0257,0x00000000
+ long 0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
+ long 0x3FFE0000,0x94458094,0x45809446,0x00000000
+ long 0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
+ long 0x3FFE0000,0x92F11384,0x0497889C,0x00000000
+ long 0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
+ long 0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
+ long 0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
+ long 0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
+ long 0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
+ long 0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
+ long 0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
+ long 0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
+ long 0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
+ long 0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
+ long 0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
+ long 0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
+ long 0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
+ long 0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
+ long 0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
+ long 0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
+ long 0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
+ long 0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
+ long 0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
+ long 0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
+ long 0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
+ long 0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
+ long 0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
+ long 0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
+ long 0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
+ long 0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
+ long 0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
+ long 0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
+ long 0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
+ long 0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
+ long 0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
+ long 0x3FFE0000,0x80808080,0x80808081,0x00000000
+ long 0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
+
+ set ADJK,L_SCR1
+
+ set X,FP_SCR0
+ set XDCARE,X+2
+ set XFRAC,X+4
+
+ set F,FP_SCR1
+ set FFRAC,F+4
+
+ set KLOG2,FP_SCR0
+
+ set SAVEU,FP_SCR0
+
+ global slogn
+#--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slogn:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ mov.l &0x00000000,ADJK(%a6)
+
+LOGBGN:
+#--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
+#--A FINITE, NON-ZERO, NORMALIZED NUMBER.
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+
+ mov.l (%a0),X(%a6)
+ mov.l 4(%a0),X+4(%a6)
+ mov.l 8(%a0),X+8(%a6)
+
+ cmp.l %d1,&0 # CHECK IF X IS NEGATIVE
+ blt.w LOGNEG # LOG OF NEGATIVE ARGUMENT IS INVALID
+# X IS POSITIVE, CHECK IF X IS NEAR 1
+ cmp.l %d1,&0x3ffef07d # IS X < 15/16?
+ blt.b LOGMAIN # YES
+ cmp.l %d1,&0x3fff8841 # IS X > 17/16?
+ ble.w LOGNEAR1 # NO
+
+LOGMAIN:
+#--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
+
+#--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
+#--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
+#--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
+#-- = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
+#--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
+#--LOG(1+U) CAN BE VERY EFFICIENT.
+#--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
+#--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
+
+#--GET K, Y, F, AND ADDRESS OF 1/F.
+ asr.l &8,%d1
+ asr.l &8,%d1 # SHIFTED 16 BITS, BIASED EXPO. OF X
+ sub.l &0x3FFF,%d1 # THIS IS K
+ add.l ADJK(%a6),%d1 # ADJUST K, ORIGINAL INPUT MAY BE DENORM.
+ lea LOGTBL(%pc),%a0 # BASE ADDRESS OF 1/F AND LOG(F)
+ fmov.l %d1,%fp1 # CONVERT K TO FLOATING-POINT FORMAT
+
+#--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
+ mov.l &0x3FFF0000,X(%a6) # X IS NOW Y, I.E. 2^(-K)*X
+ mov.l XFRAC(%a6),FFRAC(%a6)
+ and.l &0xFE000000,FFRAC(%a6) # FIRST 7 BITS OF Y
+ or.l &0x01000000,FFRAC(%a6) # GET F: ATTACH A 1 AT THE EIGHTH BIT
+ mov.l FFRAC(%a6),%d1 # READY TO GET ADDRESS OF 1/F
+ and.l &0x7E000000,%d1
+ asr.l &8,%d1
+ asr.l &8,%d1
+ asr.l &4,%d1 # SHIFTED 20, D0 IS THE DISPLACEMENT
+ add.l %d1,%a0 # A0 IS THE ADDRESS FOR 1/F
+
+ fmov.x X(%a6),%fp0
+ mov.l &0x3fff0000,F(%a6)
+ clr.l F+8(%a6)
+ fsub.x F(%a6),%fp0 # Y-F
+ fmovm.x &0xc,-(%sp) # SAVE FP2-3 WHILE FP0 IS NOT READY
+#--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
+#--REGISTERS SAVED: FPCR, FP1, FP2
+
+LP1CONT1:
+#--AN RE-ENTRY POINT FOR LOGNP1
+ fmul.x (%a0),%fp0 # FP0 IS U = (Y-F)/F
+ fmul.x LOGOF2(%pc),%fp1 # GET K*LOG2 WHILE FP0 IS NOT READY
+ fmov.x %fp0,%fp2
+ fmul.x %fp2,%fp2 # FP2 IS V=U*U
+ fmov.x %fp1,KLOG2(%a6) # PUT K*LOG2 IN MEMEORY, FREE FP1
+
+#--LOG(1+U) IS APPROXIMATED BY
+#--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
+#--[U + V*(A1+V*(A3+V*A5))] + [U*V*(A2+V*(A4+V*A6))]
+
+ fmov.x %fp2,%fp3
+ fmov.x %fp2,%fp1
+
+ fmul.d LOGA6(%pc),%fp1 # V*A6
+ fmul.d LOGA5(%pc),%fp2 # V*A5
+
+ fadd.d LOGA4(%pc),%fp1 # A4+V*A6
+ fadd.d LOGA3(%pc),%fp2 # A3+V*A5
+
+ fmul.x %fp3,%fp1 # V*(A4+V*A6)
+ fmul.x %fp3,%fp2 # V*(A3+V*A5)
+
+ fadd.d LOGA2(%pc),%fp1 # A2+V*(A4+V*A6)
+ fadd.d LOGA1(%pc),%fp2 # A1+V*(A3+V*A5)
+
+ fmul.x %fp3,%fp1 # V*(A2+V*(A4+V*A6))
+ add.l &16,%a0 # ADDRESS OF LOG(F)
+ fmul.x %fp3,%fp2 # V*(A1+V*(A3+V*A5))
+
+ fmul.x %fp0,%fp1 # U*V*(A2+V*(A4+V*A6))
+ fadd.x %fp2,%fp0 # U+V*(A1+V*(A3+V*A5))
+
+ fadd.x (%a0),%fp1 # LOG(F)+U*V*(A2+V*(A4+V*A6))
+ fmovm.x (%sp)+,&0x30 # RESTORE FP2-3
+ fadd.x %fp1,%fp0 # FP0 IS LOG(F) + LOG(1+U)
+
+ fmov.l %d0,%fpcr
+ fadd.x KLOG2(%a6),%fp0 # FINAL ADD
+ bra t_inx2
+
+
+LOGNEAR1:
+
+# if the input is exactly equal to one, then exit through ld_pzero.
+# if these 2 lines weren't here, the correct answer would be returned
+# but the INEX2 bit would be set.
+ fcmp.b %fp0,&0x1 # is it equal to one?
+ fbeq.l ld_pzero # yes
+
+#--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
+ fmov.x %fp0,%fp1
+ fsub.s one(%pc),%fp1 # FP1 IS X-1
+ fadd.s one(%pc),%fp0 # FP0 IS X+1
+ fadd.x %fp1,%fp1 # FP1 IS 2(X-1)
+#--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
+#--IN U, U = 2(X-1)/(X+1) = FP1/FP0
+
+LP1CONT2:
+#--THIS IS AN RE-ENTRY POINT FOR LOGNP1
+ fdiv.x %fp0,%fp1 # FP1 IS U
+ fmovm.x &0xc,-(%sp) # SAVE FP2-3
+#--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
+#--LET V=U*U, W=V*V, CALCULATE
+#--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
+#--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
+ fmov.x %fp1,%fp0
+ fmul.x %fp0,%fp0 # FP0 IS V
+ fmov.x %fp1,SAVEU(%a6) # STORE U IN MEMORY, FREE FP1
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS W
+
+ fmov.d LOGB5(%pc),%fp3
+ fmov.d LOGB4(%pc),%fp2
+
+ fmul.x %fp1,%fp3 # W*B5
+ fmul.x %fp1,%fp2 # W*B4
+
+ fadd.d LOGB3(%pc),%fp3 # B3+W*B5
+ fadd.d LOGB2(%pc),%fp2 # B2+W*B4
+
+ fmul.x %fp3,%fp1 # W*(B3+W*B5), FP3 RELEASED
+
+ fmul.x %fp0,%fp2 # V*(B2+W*B4)
+
+ fadd.d LOGB1(%pc),%fp1 # B1+W*(B3+W*B5)
+ fmul.x SAVEU(%a6),%fp0 # FP0 IS U*V
+
+ fadd.x %fp2,%fp1 # B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
+ fmovm.x (%sp)+,&0x30 # FP2-3 RESTORED
+
+ fmul.x %fp1,%fp0 # U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
+
+ fmov.l %d0,%fpcr
+ fadd.x SAVEU(%a6),%fp0
+ bra t_inx2
+
+#--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
+LOGNEG:
+ bra t_operr
+
+ global slognd
+slognd:
+#--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
+
+ mov.l &-100,ADJK(%a6) # INPUT = 2^(ADJK) * FP0
+
+#----normalize the input value by left shifting k bits (k to be determined
+#----below), adjusting exponent and storing -k to ADJK
+#----the value TWOTO100 is no longer needed.
+#----Note that this code assumes the denormalized input is NON-ZERO.
+
+ movm.l &0x3f00,-(%sp) # save some registers {d2-d7}
+ mov.l (%a0),%d3 # D3 is exponent of smallest norm. #
+ mov.l 4(%a0),%d4
+ mov.l 8(%a0),%d5 # (D4,D5) is (Hi_X,Lo_X)
+ clr.l %d2 # D2 used for holding K
+
+ tst.l %d4
+ bne.b Hi_not0
+
+Hi_0:
+ mov.l %d5,%d4
+ clr.l %d5
+ mov.l &32,%d2
+ clr.l %d6
+ bfffo %d4{&0:&32},%d6
+ lsl.l %d6,%d4
+ add.l %d6,%d2 # (D3,D4,D5) is normalized
+
+ mov.l %d3,X(%a6)
+ mov.l %d4,XFRAC(%a6)
+ mov.l %d5,XFRAC+4(%a6)
+ neg.l %d2
+ mov.l %d2,ADJK(%a6)
+ fmov.x X(%a6),%fp0
+ movm.l (%sp)+,&0xfc # restore registers {d2-d7}
+ lea X(%a6),%a0
+ bra.w LOGBGN # begin regular log(X)
+
+Hi_not0:
+ clr.l %d6
+ bfffo %d4{&0:&32},%d6 # find first 1
+ mov.l %d6,%d2 # get k
+ lsl.l %d6,%d4
+ mov.l %d5,%d7 # a copy of D5
+ lsl.l %d6,%d5
+ neg.l %d6
+ add.l &32,%d6
+ lsr.l %d6,%d7
+ or.l %d7,%d4 # (D3,D4,D5) normalized
+
+ mov.l %d3,X(%a6)
+ mov.l %d4,XFRAC(%a6)
+ mov.l %d5,XFRAC+4(%a6)
+ neg.l %d2
+ mov.l %d2,ADJK(%a6)
+ fmov.x X(%a6),%fp0
+ movm.l (%sp)+,&0xfc # restore registers {d2-d7}
+ lea X(%a6),%a0
+ bra.w LOGBGN # begin regular log(X)
+
+ global slognp1
+#--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slognp1:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ fabs.x %fp0 # test magnitude
+ fcmp.x %fp0,LTHOLD(%pc) # compare with min threshold
+ fbgt.w LP1REAL # if greater, continue
+ fmov.l %d0,%fpcr
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x (%a0),%fp0 # return signed argument
+ bra t_catch
+
+LP1REAL:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ mov.l &0x00000000,ADJK(%a6)
+ fmov.x %fp0,%fp1 # FP1 IS INPUT Z
+ fadd.s one(%pc),%fp0 # X := ROUND(1+Z)
+ fmov.x %fp0,X(%a6)
+ mov.w XFRAC(%a6),XDCARE(%a6)
+ mov.l X(%a6),%d1
+ cmp.l %d1,&0
+ ble.w LP1NEG0 # LOG OF ZERO OR -VE
+ cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]?
+ blt.w LOGMAIN
+ cmp.l %d1,&0x3fffc000
+ bgt.w LOGMAIN
+#--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
+#--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
+#--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
+
+LP1NEAR1:
+#--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
+ cmp.l %d1,&0x3ffef07d
+ blt.w LP1CARE
+ cmp.l %d1,&0x3fff8841
+ bgt.w LP1CARE
+
+LP1ONE16:
+#--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
+#--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
+ fadd.x %fp1,%fp1 # FP1 IS 2Z
+ fadd.s one(%pc),%fp0 # FP0 IS 1+X
+#--U = FP1/FP0
+ bra.w LP1CONT2
+
+LP1CARE:
+#--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
+#--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
+#--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
+#--THERE ARE ONLY TWO CASES.
+#--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
+#--CASE 2: 1+Z > 1, THEN K = 0 AND Y-F = (1-F) + Z
+#--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
+#--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
+
+ mov.l XFRAC(%a6),FFRAC(%a6)
+ and.l &0xFE000000,FFRAC(%a6)
+ or.l &0x01000000,FFRAC(%a6) # F OBTAINED
+ cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1
+ bge.b KISZERO
+
+KISNEG1:
+ fmov.s TWO(%pc),%fp0
+ mov.l &0x3fff0000,F(%a6)
+ clr.l F+8(%a6)
+ fsub.x F(%a6),%fp0 # 2-F
+ mov.l FFRAC(%a6),%d1
+ and.l &0x7E000000,%d1
+ asr.l &8,%d1
+ asr.l &8,%d1
+ asr.l &4,%d1 # D0 CONTAINS DISPLACEMENT FOR 1/F
+ fadd.x %fp1,%fp1 # GET 2Z
+ fmovm.x &0xc,-(%sp) # SAVE FP2 {%fp2/%fp3}
+ fadd.x %fp1,%fp0 # FP0 IS Y-F = (2-F)+2Z
+ lea LOGTBL(%pc),%a0 # A0 IS ADDRESS OF 1/F
+ add.l %d1,%a0
+ fmov.s negone(%pc),%fp1 # FP1 IS K = -1
+ bra.w LP1CONT1
+
+KISZERO:
+ fmov.s one(%pc),%fp0
+ mov.l &0x3fff0000,F(%a6)
+ clr.l F+8(%a6)
+ fsub.x F(%a6),%fp0 # 1-F
+ mov.l FFRAC(%a6),%d1
+ and.l &0x7E000000,%d1
+ asr.l &8,%d1
+ asr.l &8,%d1
+ asr.l &4,%d1
+ fadd.x %fp1,%fp0 # FP0 IS Y-F
+ fmovm.x &0xc,-(%sp) # FP2 SAVED {%fp2/%fp3}
+ lea LOGTBL(%pc),%a0
+ add.l %d1,%a0 # A0 IS ADDRESS OF 1/F
+ fmov.s zero(%pc),%fp1 # FP1 IS K = 0
+ bra.w LP1CONT1
+
+LP1NEG0:
+#--FPCR SAVED. D0 IS X IN COMPACT FORM.
+ cmp.l %d1,&0
+ blt.b LP1NEG
+LP1ZERO:
+ fmov.s negone(%pc),%fp0
+
+ fmov.l %d0,%fpcr
+ bra t_dz
+
+LP1NEG:
+ fmov.s zero(%pc),%fp0
+
+ fmov.l %d0,%fpcr
+ bra t_operr
+
+ global slognp1d
+#--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
+# Simply return the denorm
+slognp1d:
+ bra t_extdnrm
+
+#########################################################################
+# satanh(): computes the inverse hyperbolic tangent of a norm input #
+# satanhd(): computes the inverse hyperbolic tangent of a denorm input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = arctanh(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# ATANH #
+# 1. If |X| >= 1, go to 3. #
+# #
+# 2. (|X| < 1) Calculate atanh(X) by #
+# sgn := sign(X) #
+# y := |X| #
+# z := 2y/(1-y) #
+# atanh(X) := sgn * (1/2) * logp1(z) #
+# Exit. #
+# #
+# 3. If |X| > 1, go to 5. #
+# #
+# 4. (|X| = 1) Generate infinity with an appropriate sign and #
+# divide-by-zero by #
+# sgn := sign(X) #
+# atan(X) := sgn / (+0). #
+# Exit. #
+# #
+# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
+# Exit. #
+# #
+#########################################################################
+
+ global satanh
+satanh:
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x3FFF8000
+ bge.b ATANHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
+
+ fabs.x (%a0),%fp0 # Y = |X|
+ fmov.x %fp0,%fp1
+ fneg.x %fp1 # -Y
+ fadd.x %fp0,%fp0 # 2Y
+ fadd.s &0x3F800000,%fp1 # 1-Y
+ fdiv.x %fp1,%fp0 # 2Y/(1-Y)
+ mov.l (%a0),%d1
+ and.l &0x80000000,%d1
+ or.l &0x3F000000,%d1 # SIGN(X)*HALF
+ mov.l %d1,-(%sp)
+
+ mov.l %d0,-(%sp) # save rnd prec,mode
+ clr.l %d0 # pass ext prec,RN
+ fmovm.x &0x01,-(%sp) # save Z on stack
+ lea (%sp),%a0 # pass ptr to Z
+ bsr slognp1 # LOG1P(Z)
+ add.l &0xc,%sp # clear Z from stack
+
+ mov.l (%sp)+,%d0 # fetch old prec,mode
+ fmov.l %d0,%fpcr # load it
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.s (%sp)+,%fp0
+ bra t_catch
+
+ATANHBIG:
+ fabs.x (%a0),%fp0 # |X|
+ fcmp.s %fp0,&0x3F800000
+ fbgt t_operr
+ bra t_dz
+
+ global satanhd
+#--ATANH(X) = X FOR DENORMALIZED X
+satanhd:
+ bra t_extdnrm
+
+#########################################################################
+# slog10(): computes the base-10 logarithm of a normalized input #
+# slog10d(): computes the base-10 logarithm of a denormalized input #
+# slog2(): computes the base-2 logarithm of a normalized input #
+# slog2d(): computes the base-2 logarithm of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = log_10(X) or log_2(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 1.7 ulps in 64 significant bit, #
+# i.e. within 0.5003 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# slog10d: #
+# #
+# Step 0. If X < 0, create a NaN and raise the invalid operation #
+# flag. Otherwise, save FPCR in D1; set FpCR to default. #
+# Notes: Default means round-to-nearest mode, no floating-point #
+# traps, and precision control = double extended. #
+# #
+# Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
+# Notes: Even if X is denormalized, log(X) is always normalized. #
+# #
+# Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
+# 2.1 Restore the user FPCR #
+# 2.2 Return ans := Y * INV_L10. #
+# #
+# slog10: #
+# #
+# Step 0. If X < 0, create a NaN and raise the invalid operation #
+# flag. Otherwise, save FPCR in D1; set FpCR to default. #
+# Notes: Default means round-to-nearest mode, no floating-point #
+# traps, and precision control = double extended. #
+# #
+# Step 1. Call sLogN to obtain Y = log(X), the natural log of X. #
+# #
+# Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
+# 2.1 Restore the user FPCR #
+# 2.2 Return ans := Y * INV_L10. #
+# #
+# sLog2d: #
+# #
+# Step 0. If X < 0, create a NaN and raise the invalid operation #
+# flag. Otherwise, save FPCR in D1; set FpCR to default. #
+# Notes: Default means round-to-nearest mode, no floating-point #
+# traps, and precision control = double extended. #
+# #
+# Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
+# Notes: Even if X is denormalized, log(X) is always normalized. #
+# #
+# Step 2. Compute log_10(X) = log(X) * (1/log(2)). #
+# 2.1 Restore the user FPCR #
+# 2.2 Return ans := Y * INV_L2. #
+# #
+# sLog2: #
+# #
+# Step 0. If X < 0, create a NaN and raise the invalid operation #
+# flag. Otherwise, save FPCR in D1; set FpCR to default. #
+# Notes: Default means round-to-nearest mode, no floating-point #
+# traps, and precision control = double extended. #
+# #
+# Step 1. If X is not an integer power of two, i.e., X != 2^k, #
+# go to Step 3. #
+# #
+# Step 2. Return k. #
+# 2.1 Get integer k, X = 2^k. #
+# 2.2 Restore the user FPCR. #
+# 2.3 Return ans := convert-to-double-extended(k). #
+# #
+# Step 3. Call sLogN to obtain Y = log(X), the natural log of X. #
+# #
+# Step 4. Compute log_2(X) = log(X) * (1/log(2)). #
+# 4.1 Restore the user FPCR #
+# 4.2 Return ans := Y * INV_L2. #
+# #
+#########################################################################
+
+INV_L10:
+ long 0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
+
+INV_L2:
+ long 0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
+
+ global slog10
+#--entry point for Log10(X), X is normalized
+slog10:
+ fmov.b &0x1,%fp0
+ fcmp.x %fp0,(%a0) # if operand == 1,
+ fbeq.l ld_pzero # return an EXACT zero
+
+ mov.l (%a0),%d1
+ blt.w invalid
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ bsr slogn # log(X), X normal.
+ fmov.l (%sp)+,%fpcr
+ fmul.x INV_L10(%pc),%fp0
+ bra t_inx2
+
+ global slog10d
+#--entry point for Log10(X), X is denormalized
+slog10d:
+ mov.l (%a0),%d1
+ blt.w invalid
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ bsr slognd # log(X), X denorm.
+ fmov.l (%sp)+,%fpcr
+ fmul.x INV_L10(%pc),%fp0
+ bra t_minx2
+
+ global slog2
+#--entry point for Log2(X), X is normalized
+slog2:
+ mov.l (%a0),%d1
+ blt.w invalid
+
+ mov.l 8(%a0),%d1
+ bne.b continue # X is not 2^k
+
+ mov.l 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ bne.b continue
+
+#--X = 2^k.
+ mov.w (%a0),%d1
+ and.l &0x00007FFF,%d1
+ sub.l &0x3FFF,%d1
+ beq.l ld_pzero
+ fmov.l %d0,%fpcr
+ fmov.l %d1,%fp0
+ bra t_inx2
+
+continue:
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ bsr slogn # log(X), X normal.
+ fmov.l (%sp)+,%fpcr
+ fmul.x INV_L2(%pc),%fp0
+ bra t_inx2
+
+invalid:
+ bra t_operr
+
+ global slog2d
+#--entry point for Log2(X), X is denormalized
+slog2d:
+ mov.l (%a0),%d1
+ blt.w invalid
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ bsr slognd # log(X), X denorm.
+ fmov.l (%sp)+,%fpcr
+ fmul.x INV_L2(%pc),%fp0
+ bra t_minx2
+
+#########################################################################
+# stwotox(): computes 2**X for a normalized input #
+# stwotoxd(): computes 2**X for a denormalized input #
+# stentox(): computes 10**X for a normalized input #
+# stentoxd(): computes 10**X for a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = 2**X or 10**X #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 2 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# twotox #
+# 1. If |X| > 16480, go to ExpBig. #
+# #
+# 2. If |X| < 2**(-70), go to ExpSm. #
+# #
+# 3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore #
+# decompose N as #
+# N = 64(M + M') + j, j = 0,1,2,...,63. #
+# #
+# 4. Overwrite r := r * log2. Then #
+# 2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
+# Go to expr to compute that expression. #
+# #
+# tentox #
+# 1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig. #
+# #
+# 2. If |X| < 2**(-70), go to ExpSm. #
+# #
+# 3. Set y := X*log_2(10)*64 (base 2 log of 10). Set #
+# N := round-to-int(y). Decompose N as #
+# N = 64(M + M') + j, j = 0,1,2,...,63. #
+# #
+# 4. Define r as #
+# r := ((X - N*L1)-N*L2) * L10 #
+# where L1, L2 are the leading and trailing parts of #
+# log_10(2)/64 and L10 is the natural log of 10. Then #
+# 10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
+# Go to expr to compute that expression. #
+# #
+# expr #
+# 1. Fetch 2**(j/64) from table as Fact1 and Fact2. #
+# #
+# 2. Overwrite Fact1 and Fact2 by #
+# Fact1 := 2**(M) * Fact1 #
+# Fact2 := 2**(M) * Fact2 #
+# Thus Fact1 + Fact2 = 2**(M) * 2**(j/64). #
+# #
+# 3. Calculate P where 1 + P approximates exp(r): #
+# P = r + r*r*(A1+r*(A2+...+r*A5)). #
+# #
+# 4. Let AdjFact := 2**(M'). Return #
+# AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ). #
+# Exit. #
+# #
+# ExpBig #
+# 1. Generate overflow by Huge * Huge if X > 0; otherwise, #
+# generate underflow by Tiny * Tiny. #
+# #
+# ExpSm #
+# 1. Return 1 + X. #
+# #
+#########################################################################
+
+L2TEN64:
+ long 0x406A934F,0x0979A371 # 64LOG10/LOG2
+L10TWO1:
+ long 0x3F734413,0x509F8000 # LOG2/64LOG10
+
+L10TWO2:
+ long 0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
+
+LOG10: long 0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
+
+LOG2: long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+EXPA5: long 0x3F56C16D,0x6F7BD0B2
+EXPA4: long 0x3F811112,0x302C712C
+EXPA3: long 0x3FA55555,0x55554CC1
+EXPA2: long 0x3FC55555,0x55554A54
+EXPA1: long 0x3FE00000,0x00000000,0x00000000,0x00000000
+
+TEXPTBL:
+ long 0x3FFF0000,0x80000000,0x00000000,0x3F738000
+ long 0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
+ long 0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
+ long 0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
+ long 0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
+ long 0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
+ long 0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
+ long 0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
+ long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
+ long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
+ long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
+ long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
+ long 0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
+ long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
+ long 0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
+ long 0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
+ long 0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
+ long 0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
+ long 0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
+ long 0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
+ long 0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
+ long 0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
+ long 0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
+ long 0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
+ long 0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
+ long 0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
+ long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
+ long 0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
+ long 0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
+ long 0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
+ long 0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
+ long 0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
+ long 0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
+ long 0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
+ long 0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
+ long 0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
+ long 0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
+ long 0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
+ long 0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
+ long 0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
+ long 0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
+ long 0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
+ long 0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
+ long 0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
+ long 0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
+ long 0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
+ long 0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
+ long 0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
+ long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
+ long 0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
+ long 0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
+ long 0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
+ long 0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
+ long 0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
+ long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
+ long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
+ long 0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
+ long 0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
+ long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
+ long 0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
+ long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
+ long 0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
+ long 0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
+ long 0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
+
+ set INT,L_SCR1
+
+ set X,FP_SCR0
+ set XDCARE,X+2
+ set XFRAC,X+4
+
+ set ADJFACT,FP_SCR0
+
+ set FACT1,FP_SCR0
+ set FACT1HI,FACT1+4
+ set FACT1LOW,FACT1+8
+
+ set FACT2,FP_SCR1
+ set FACT2HI,FACT2+4
+ set FACT2LOW,FACT2+8
+
+ global stwotox
+#--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stwotox:
+ fmovm.x (%a0),&0x80 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ fmov.x %fp0,X(%a6)
+ and.l &0x7FFFFFFF,%d1
+
+ cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
+ bge.b TWOOK1
+ bra.w EXPBORS
+
+TWOOK1:
+ cmp.l %d1,&0x400D80C0 # |X| > 16480?
+ ble.b TWOMAIN
+ bra.w EXPBORS
+
+TWOMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480
+
+ fmov.x %fp0,%fp1
+ fmul.s &0x42800000,%fp1 # 64 * X
+ fmov.l %fp1,INT(%a6) # N = ROUND-TO-INT(64 X)
+ mov.l %d2,-(%sp)
+ lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
+ fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
+ mov.l INT(%a6),%d1
+ mov.l %d1,%d2
+ and.l &0x3F,%d1 # D0 IS J
+ asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
+ add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
+ asr.l &6,%d2 # d2 IS L, N = 64L + J
+ mov.l %d2,%d1
+ asr.l &1,%d1 # D0 IS M
+ sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
+ add.l &0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmul.s &0x3C800000,%fp1 # (1/64)*N
+ mov.l (%a1)+,FACT1(%a6)
+ mov.l (%a1)+,FACT1HI(%a6)
+ mov.l (%a1)+,FACT1LOW(%a6)
+ mov.w (%a1)+,FACT2(%a6)
+
+ fsub.x %fp1,%fp0 # X - (1/64)*INT(64 X)
+
+ mov.w (%a1)+,FACT2HI(%a6)
+ clr.w FACT2HI+2(%a6)
+ clr.l FACT2LOW(%a6)
+ add.w %d1,FACT1(%a6)
+ fmul.x LOG2(%pc),%fp0 # FP0 IS R
+ add.w %d1,FACT2(%a6)
+
+ bra.w expr
+
+EXPBORS:
+#--FPCR, D0 SAVED
+ cmp.l %d1,&0x3FFF8000
+ bgt.b TEXPBIG
+
+#--|X| IS SMALL, RETURN 1 + X
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ fadd.s &0x3F800000,%fp0 # RETURN 1 + X
+ bra t_pinx2
+
+TEXPBIG:
+#--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
+#--REGISTERS SAVE SO FAR ARE FPCR AND D0
+ mov.l X(%a6),%d1
+ cmp.l %d1,&0
+ blt.b EXPNEG
+
+ bra t_ovfl2 # t_ovfl expects positive value
+
+EXPNEG:
+ bra t_unfl2 # t_unfl expects positive value
+
+ global stwotoxd
+stwotoxd:
+#--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
+
+ fmov.l %d0,%fpcr # set user's rounding mode/precision
+ fmov.s &0x3F800000,%fp0 # RETURN 1 + X
+ mov.l (%a0),%d1
+ or.l &0x00800001,%d1
+ fadd.s %d1,%fp0
+ bra t_pinx2
+
+ global stentox
+#--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stentox:
+ fmovm.x (%a0),&0x80 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ fmov.x %fp0,X(%a6)
+ and.l &0x7FFFFFFF,%d1
+
+ cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
+ bge.b TENOK1
+ bra.w EXPBORS
+
+TENOK1:
+ cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ?
+ ble.b TENMAIN
+ bra.w EXPBORS
+
+TENMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
+
+ fmov.x %fp0,%fp1
+ fmul.d L2TEN64(%pc),%fp1 # X*64*LOG10/LOG2
+ fmov.l %fp1,INT(%a6) # N=INT(X*64*LOG10/LOG2)
+ mov.l %d2,-(%sp)
+ lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
+ fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
+ mov.l INT(%a6),%d1
+ mov.l %d1,%d2
+ and.l &0x3F,%d1 # D0 IS J
+ asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
+ add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
+ asr.l &6,%d2 # d2 IS L, N = 64L + J
+ mov.l %d2,%d1
+ asr.l &1,%d1 # D0 IS M
+ sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
+ add.l &0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmov.x %fp1,%fp2
+
+ fmul.d L10TWO1(%pc),%fp1 # N*(LOG2/64LOG10)_LEAD
+ mov.l (%a1)+,FACT1(%a6)
+
+ fmul.x L10TWO2(%pc),%fp2 # N*(LOG2/64LOG10)_TRAIL
+
+ mov.l (%a1)+,FACT1HI(%a6)
+ mov.l (%a1)+,FACT1LOW(%a6)
+ fsub.x %fp1,%fp0 # X - N L_LEAD
+ mov.w (%a1)+,FACT2(%a6)
+
+ fsub.x %fp2,%fp0 # X - N L_TRAIL
+
+ mov.w (%a1)+,FACT2HI(%a6)
+ clr.w FACT2HI+2(%a6)
+ clr.l FACT2LOW(%a6)
+
+ fmul.x LOG10(%pc),%fp0 # FP0 IS R
+ add.w %d1,FACT1(%a6)
+ add.w %d1,FACT2(%a6)
+
+expr:
+#--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
+#--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
+#--FP0 IS R. THE FOLLOWING CODE COMPUTES
+#-- 2**(M'+M) * 2**(J/64) * EXP(R)
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS S = R*R
+
+ fmov.d EXPA5(%pc),%fp2 # FP2 IS A5
+ fmov.d EXPA4(%pc),%fp3 # FP3 IS A4
+
+ fmul.x %fp1,%fp2 # FP2 IS S*A5
+ fmul.x %fp1,%fp3 # FP3 IS S*A4
+
+ fadd.d EXPA3(%pc),%fp2 # FP2 IS A3+S*A5
+ fadd.d EXPA2(%pc),%fp3 # FP3 IS A2+S*A4
+
+ fmul.x %fp1,%fp2 # FP2 IS S*(A3+S*A5)
+ fmul.x %fp1,%fp3 # FP3 IS S*(A2+S*A4)
+
+ fadd.d EXPA1(%pc),%fp2 # FP2 IS A1+S*(A3+S*A5)
+ fmul.x %fp0,%fp3 # FP3 IS R*S*(A2+S*A4)
+
+ fmul.x %fp1,%fp2 # FP2 IS S*(A1+S*(A3+S*A5))
+ fadd.x %fp3,%fp0 # FP0 IS R+R*S*(A2+S*A4)
+ fadd.x %fp2,%fp0 # FP0 IS EXP(R) - 1
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+#--FINAL RECONSTRUCTION PROCESS
+#--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1) - (1 OR 0)
+
+ fmul.x FACT1(%a6),%fp0
+ fadd.x FACT2(%a6),%fp0
+ fadd.x FACT1(%a6),%fp0
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ mov.w %d2,ADJFACT(%a6) # INSERT EXPONENT
+ mov.l (%sp)+,%d2
+ mov.l &0x80000000,ADJFACT+4(%a6)
+ clr.l ADJFACT+8(%a6)
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x ADJFACT(%a6),%fp0 # FINAL ADJUSTMENT
+ bra t_catch
+
+ global stentoxd
+stentoxd:
+#--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
+
+ fmov.l %d0,%fpcr # set user's rounding mode/precision
+ fmov.s &0x3F800000,%fp0 # RETURN 1 + X
+ mov.l (%a0),%d1
+ or.l &0x00800001,%d1
+ fadd.s %d1,%fp0
+ bra t_pinx2
+
+#########################################################################
+# sscale(): computes the destination operand scaled by the source #
+# operand. If the absoulute value of the source operand is #
+# >= 2^14, an overflow or underflow is returned. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to double-extended source operand X #
+# a1 = pointer to double-extended destination operand Y #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = scale(X,Y) #
+# #
+#########################################################################
+
+set SIGN, L_SCR1
+
+ global sscale
+sscale:
+ mov.l %d0,-(%sp) # store off ctrl bits for now
+
+ mov.w DST_EX(%a1),%d1 # get dst exponent
+ smi.b SIGN(%a6) # use SIGN to hold dst sign
+ andi.l &0x00007fff,%d1 # strip sign from dst exp
+
+ mov.w SRC_EX(%a0),%d0 # check src bounds
+ andi.w &0x7fff,%d0 # clr src sign bit
+ cmpi.w %d0,&0x3fff # is src ~ ZERO?
+ blt.w src_small # yes
+ cmpi.w %d0,&0x400c # no; is src too big?
+ bgt.w src_out # yes
+
+#
+# Source is within 2^14 range.
+#
+src_ok:
+ fintrz.x SRC(%a0),%fp0 # calc int of src
+ fmov.l %fp0,%d0 # int src to d0
+# don't want any accrued bits from the fintrz showing up later since
+# we may need to read the fpsr for the last fp op in t_catch2().
+ fmov.l &0x0,%fpsr
+
+ tst.b DST_HI(%a1) # is dst denormalized?
+ bmi.b sok_norm
+
+# the dst is a DENORM. normalize the DENORM and add the adjustment to
+# the src value. then, jump to the norm part of the routine.
+sok_dnrm:
+ mov.l %d0,-(%sp) # save src for now
+
+ mov.w DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
+ mov.l DST_HI(%a1),FP_SCR0_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
+
+ lea FP_SCR0(%a6),%a0 # pass ptr to DENORM
+ bsr.l norm # normalize the DENORM
+ neg.l %d0
+ add.l (%sp)+,%d0 # add adjustment to src
+
+ fmovm.x FP_SCR0(%a6),&0x80 # load normalized DENORM
+
+ cmpi.w %d0,&-0x3fff # is the shft amt really low?
+ bge.b sok_norm2 # thank goodness no
+
+# the multiply factor that we're trying to create should be a denorm
+# for the multiply to work. therefore, we're going to actually do a
+# multiply with a denorm which will cause an unimplemented data type
+# exception to be put into the machine which will be caught and corrected
+# later. we don't do this with the DENORMs above because this method
+# is slower. but, don't fret, I don't see it being used much either.
+ fmov.l (%sp)+,%fpcr # restore user fpcr
+ mov.l &0x80000000,%d1 # load normalized mantissa
+ subi.l &-0x3fff,%d0 # how many should we shift?
+ neg.l %d0 # make it positive
+ cmpi.b %d0,&0x20 # is it > 32?
+ bge.b sok_dnrm_32 # yes
+ lsr.l %d0,%d1 # no; bit stays in upper lw
+ clr.l -(%sp) # insert zero low mantissa
+ mov.l %d1,-(%sp) # insert new high mantissa
+ clr.l -(%sp) # make zero exponent
+ bra.b sok_norm_cont
+sok_dnrm_32:
+ subi.b &0x20,%d0 # get shift count
+ lsr.l %d0,%d1 # make low mantissa longword
+ mov.l %d1,-(%sp) # insert new low mantissa
+ clr.l -(%sp) # insert zero high mantissa
+ clr.l -(%sp) # make zero exponent
+ bra.b sok_norm_cont
+
+# the src will force the dst to a DENORM value or worse. so, let's
+# create an fp multiply that will create the result.
+sok_norm:
+ fmovm.x DST(%a1),&0x80 # load fp0 with normalized src
+sok_norm2:
+ fmov.l (%sp)+,%fpcr # restore user fpcr
+
+ addi.w &0x3fff,%d0 # turn src amt into exp value
+ swap %d0 # put exponent in high word
+ clr.l -(%sp) # insert new exponent
+ mov.l &0x80000000,-(%sp) # insert new high mantissa
+ mov.l %d0,-(%sp) # insert new lo mantissa
+
+sok_norm_cont:
+ fmov.l %fpcr,%d0 # d0 needs fpcr for t_catch2
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x (%sp)+,%fp0 # do the multiply
+ bra t_catch2 # catch any exceptions
+
+#
+# Source is outside of 2^14 range. Test the sign and branch
+# to the appropriate exception handler.
+#
+src_out:
+ mov.l (%sp)+,%d0 # restore ctrl bits
+ exg %a0,%a1 # swap src,dst ptrs
+ tst.b SRC_EX(%a1) # is src negative?
+ bmi t_unfl # yes; underflow
+ bra t_ovfl_sc # no; overflow
+
+#
+# The source input is below 1, so we check for denormalized numbers
+# and set unfl.
+#
+src_small:
+ tst.b DST_HI(%a1) # is dst denormalized?
+ bpl.b ssmall_done # yes
+
+ mov.l (%sp)+,%d0
+ fmov.l %d0,%fpcr # no; load control bits
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x DST(%a1),%fp0 # simply return dest
+ bra t_catch2
+ssmall_done:
+ mov.l (%sp)+,%d0 # load control bits into d1
+ mov.l %a1,%a0 # pass ptr to dst
+ bra t_resdnrm
+
+#########################################################################
+# smod(): computes the fp MOD of the input values X,Y. #
+# srem(): computes the fp (IEEE) REM of the input values X,Y. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input X #
+# a1 = pointer to extended precision input Y #
+# d0 = round precision,mode #
+# #
+# The input operands X and Y can be either normalized or #
+# denormalized. #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = FREM(X,Y) or FMOD(X,Y) #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# Step 1. Save and strip signs of X and Y: signX := sign(X), #
+# signY := sign(Y), X := |X|, Y := |Y|, #
+# signQ := signX EOR signY. Record whether MOD or REM #
+# is requested. #
+# #
+# Step 2. Set L := expo(X)-expo(Y), k := 0, Q := 0. #
+# If (L < 0) then #
+# R := X, go to Step 4. #
+# else #
+# R := 2^(-L)X, j := L. #
+# endif #
+# #
+# Step 3. Perform MOD(X,Y) #
+# 3.1 If R = Y, go to Step 9. #
+# 3.2 If R > Y, then { R := R - Y, Q := Q + 1} #
+# 3.3 If j = 0, go to Step 4. #
+# 3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to #
+# Step 3.1. #
+# #
+# Step 4. At this point, R = X - QY = MOD(X,Y). Set #
+# Last_Subtract := false (used in Step 7 below). If #
+# MOD is requested, go to Step 6. #
+# #
+# Step 5. R = MOD(X,Y), but REM(X,Y) is requested. #
+# 5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to #
+# Step 6. #
+# 5.2 If R > Y/2, then { set Last_Subtract := true, #
+# Q := Q + 1, Y := signY*Y }. Go to Step 6. #
+# 5.3 This is the tricky case of R = Y/2. If Q is odd, #
+# then { Q := Q + 1, signX := -signX }. #
+# #
+# Step 6. R := signX*R. #
+# #
+# Step 7. If Last_Subtract = true, R := R - Y. #
+# #
+# Step 8. Return signQ, last 7 bits of Q, and R as required. #
+# #
+# Step 9. At this point, R = 2^(-j)*X - Q Y = Y. Thus, #
+# X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1), #
+# R := 0. Return signQ, last 7 bits of Q, and R. #
+# #
+#########################################################################
+
+ set Mod_Flag,L_SCR3
+ set Sc_Flag,L_SCR3+1
+
+ set SignY,L_SCR2
+ set SignX,L_SCR2+2
+ set SignQ,L_SCR3+2
+
+ set Y,FP_SCR0
+ set Y_Hi,Y+4
+ set Y_Lo,Y+8
+
+ set R,FP_SCR1
+ set R_Hi,R+4
+ set R_Lo,R+8
+
+Scale:
+ long 0x00010000,0x80000000,0x00000000,0x00000000
+
+ global smod
+smod:
+ clr.b FPSR_QBYTE(%a6)
+ mov.l %d0,-(%sp) # save ctrl bits
+ clr.b Mod_Flag(%a6)
+ bra.b Mod_Rem
+
+ global srem
+srem:
+ clr.b FPSR_QBYTE(%a6)
+ mov.l %d0,-(%sp) # save ctrl bits
+ mov.b &0x1,Mod_Flag(%a6)
+
+Mod_Rem:
+#..Save sign of X and Y
+ movm.l &0x3f00,-(%sp) # save data registers
+ mov.w SRC_EX(%a0),%d3
+ mov.w %d3,SignY(%a6)
+ and.l &0x00007FFF,%d3 # Y := |Y|
+
+#
+ mov.l SRC_HI(%a0),%d4
+ mov.l SRC_LO(%a0),%d5 # (D3,D4,D5) is |Y|
+
+ tst.l %d3
+ bne.b Y_Normal
+
+ mov.l &0x00003FFE,%d3 # $3FFD + 1
+ tst.l %d4
+ bne.b HiY_not0
+
+HiY_0:
+ mov.l %d5,%d4
+ clr.l %d5
+ sub.l &32,%d3
+ clr.l %d6
+ bfffo %d4{&0:&32},%d6
+ lsl.l %d6,%d4
+ sub.l %d6,%d3 # (D3,D4,D5) is normalized
+# ...with bias $7FFD
+ bra.b Chk_X
+
+HiY_not0:
+ clr.l %d6
+ bfffo %d4{&0:&32},%d6
+ sub.l %d6,%d3
+ lsl.l %d6,%d4
+ mov.l %d5,%d7 # a copy of D5
+ lsl.l %d6,%d5
+ neg.l %d6
+ add.l &32,%d6
+ lsr.l %d6,%d7
+ or.l %d7,%d4 # (D3,D4,D5) normalized
+# ...with bias $7FFD
+ bra.b Chk_X
+
+Y_Normal:
+ add.l &0x00003FFE,%d3 # (D3,D4,D5) normalized
+# ...with bias $7FFD
+
+Chk_X:
+ mov.w DST_EX(%a1),%d0
+ mov.w %d0,SignX(%a6)
+ mov.w SignY(%a6),%d1
+ eor.l %d0,%d1
+ and.l &0x00008000,%d1
+ mov.w %d1,SignQ(%a6) # sign(Q) obtained
+ and.l &0x00007FFF,%d0
+ mov.l DST_HI(%a1),%d1
+ mov.l DST_LO(%a1),%d2 # (D0,D1,D2) is |X|
+ tst.l %d0
+ bne.b X_Normal
+ mov.l &0x00003FFE,%d0
+ tst.l %d1
+ bne.b HiX_not0
+
+HiX_0:
+ mov.l %d2,%d1
+ clr.l %d2
+ sub.l &32,%d0
+ clr.l %d6
+ bfffo %d1{&0:&32},%d6
+ lsl.l %d6,%d1
+ sub.l %d6,%d0 # (D0,D1,D2) is normalized
+# ...with bias $7FFD
+ bra.b Init
+
+HiX_not0:
+ clr.l %d6
+ bfffo %d1{&0:&32},%d6
+ sub.l %d6,%d0
+ lsl.l %d6,%d1
+ mov.l %d2,%d7 # a copy of D2
+ lsl.l %d6,%d2
+ neg.l %d6
+ add.l &32,%d6
+ lsr.l %d6,%d7
+ or.l %d7,%d1 # (D0,D1,D2) normalized
+# ...with bias $7FFD
+ bra.b Init
+
+X_Normal:
+ add.l &0x00003FFE,%d0 # (D0,D1,D2) normalized
+# ...with bias $7FFD
+
+Init:
+#
+ mov.l %d3,L_SCR1(%a6) # save biased exp(Y)
+ mov.l %d0,-(%sp) # save biased exp(X)
+ sub.l %d3,%d0 # L := expo(X)-expo(Y)
+
+ clr.l %d6 # D6 := carry <- 0
+ clr.l %d3 # D3 is Q
+ mov.l &0,%a1 # A1 is k; j+k=L, Q=0
+
+#..(Carry,D1,D2) is R
+ tst.l %d0
+ bge.b Mod_Loop_pre
+
+#..expo(X) < expo(Y). Thus X = mod(X,Y)
+#
+ mov.l (%sp)+,%d0 # restore d0
+ bra.w Get_Mod
+
+Mod_Loop_pre:
+ addq.l &0x4,%sp # erase exp(X)
+#..At this point R = 2^(-L)X; Q = 0; k = 0; and k+j = L
+Mod_Loop:
+ tst.l %d6 # test carry bit
+ bgt.b R_GT_Y
+
+#..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
+ cmp.l %d1,%d4 # compare hi(R) and hi(Y)
+ bne.b R_NE_Y
+ cmp.l %d2,%d5 # compare lo(R) and lo(Y)
+ bne.b R_NE_Y
+
+#..At this point, R = Y
+ bra.w Rem_is_0
+
+R_NE_Y:
+#..use the borrow of the previous compare
+ bcs.b R_LT_Y # borrow is set iff R < Y
+
+R_GT_Y:
+#..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
+#..and Y < (D1,D2) < 2Y. Either way, perform R - Y
+ sub.l %d5,%d2 # lo(R) - lo(Y)
+ subx.l %d4,%d1 # hi(R) - hi(Y)
+ clr.l %d6 # clear carry
+ addq.l &1,%d3 # Q := Q + 1
+
+R_LT_Y:
+#..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
+ tst.l %d0 # see if j = 0.
+ beq.b PostLoop
+
+ add.l %d3,%d3 # Q := 2Q
+ add.l %d2,%d2 # lo(R) = 2lo(R)
+ roxl.l &1,%d1 # hi(R) = 2hi(R) + carry
+ scs %d6 # set Carry if 2(R) overflows
+ addq.l &1,%a1 # k := k+1
+ subq.l &1,%d0 # j := j - 1
+#..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
+
+ bra.b Mod_Loop
+
+PostLoop:
+#..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
+
+#..normalize R.
+ mov.l L_SCR1(%a6),%d0 # new biased expo of R
+ tst.l %d1
+ bne.b HiR_not0
+
+HiR_0:
+ mov.l %d2,%d1
+ clr.l %d2
+ sub.l &32,%d0
+ clr.l %d6
+ bfffo %d1{&0:&32},%d6
+ lsl.l %d6,%d1
+ sub.l %d6,%d0 # (D0,D1,D2) is normalized
+# ...with bias $7FFD
+ bra.b Get_Mod
+
+HiR_not0:
+ clr.l %d6
+ bfffo %d1{&0:&32},%d6
+ bmi.b Get_Mod # already normalized
+ sub.l %d6,%d0
+ lsl.l %d6,%d1
+ mov.l %d2,%d7 # a copy of D2
+ lsl.l %d6,%d2
+ neg.l %d6
+ add.l &32,%d6
+ lsr.l %d6,%d7
+ or.l %d7,%d1 # (D0,D1,D2) normalized
+
+#
+Get_Mod:
+ cmp.l %d0,&0x000041FE
+ bge.b No_Scale
+Do_Scale:
+ mov.w %d0,R(%a6)
+ mov.l %d1,R_Hi(%a6)
+ mov.l %d2,R_Lo(%a6)
+ mov.l L_SCR1(%a6),%d6
+ mov.w %d6,Y(%a6)
+ mov.l %d4,Y_Hi(%a6)
+ mov.l %d5,Y_Lo(%a6)
+ fmov.x R(%a6),%fp0 # no exception
+ mov.b &1,Sc_Flag(%a6)
+ bra.b ModOrRem
+No_Scale:
+ mov.l %d1,R_Hi(%a6)
+ mov.l %d2,R_Lo(%a6)
+ sub.l &0x3FFE,%d0
+ mov.w %d0,R(%a6)
+ mov.l L_SCR1(%a6),%d6
+ sub.l &0x3FFE,%d6
+ mov.l %d6,L_SCR1(%a6)
+ fmov.x R(%a6),%fp0
+ mov.w %d6,Y(%a6)
+ mov.l %d4,Y_Hi(%a6)
+ mov.l %d5,Y_Lo(%a6)
+ clr.b Sc_Flag(%a6)
+
+#
+ModOrRem:
+ tst.b Mod_Flag(%a6)
+ beq.b Fix_Sign
+
+ mov.l L_SCR1(%a6),%d6 # new biased expo(Y)
+ subq.l &1,%d6 # biased expo(Y/2)
+ cmp.l %d0,%d6
+ blt.b Fix_Sign
+ bgt.b Last_Sub
+
+ cmp.l %d1,%d4
+ bne.b Not_EQ
+ cmp.l %d2,%d5
+ bne.b Not_EQ
+ bra.w Tie_Case
+
+Not_EQ:
+ bcs.b Fix_Sign
+
+Last_Sub:
+#
+ fsub.x Y(%a6),%fp0 # no exceptions
+ addq.l &1,%d3 # Q := Q + 1
+
+#
+Fix_Sign:
+#..Get sign of X
+ mov.w SignX(%a6),%d6
+ bge.b Get_Q
+ fneg.x %fp0
+
+#..Get Q
+#
+Get_Q:
+ clr.l %d6
+ mov.w SignQ(%a6),%d6 # D6 is sign(Q)
+ mov.l &8,%d7
+ lsr.l %d7,%d6
+ and.l &0x0000007F,%d3 # 7 bits of Q
+ or.l %d6,%d3 # sign and bits of Q
+# swap %d3
+# fmov.l %fpsr,%d6
+# and.l &0xFF00FFFF,%d6
+# or.l %d3,%d6
+# fmov.l %d6,%fpsr # put Q in fpsr
+ mov.b %d3,FPSR_QBYTE(%a6) # put Q in fpsr
+
+#
+Restore:
+ movm.l (%sp)+,&0xfc # {%d2-%d7}
+ mov.l (%sp)+,%d0
+ fmov.l %d0,%fpcr
+ tst.b Sc_Flag(%a6)
+ beq.b Finish
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x Scale(%pc),%fp0 # may cause underflow
+ bra t_catch2
+# the '040 package did this apparently to see if the dst operand for the
+# preceding fmul was a denorm. but, it better not have been since the
+# algorithm just got done playing with fp0 and expected no exceptions
+# as a result. trust me...
+# bra t_avoid_unsupp # check for denorm as a
+# ;result of the scaling
+
+Finish:
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x %fp0,%fp0 # capture exceptions & round
+ bra t_catch2
+
+Rem_is_0:
+#..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
+ addq.l &1,%d3
+ cmp.l %d0,&8 # D0 is j
+ bge.b Q_Big
+
+ lsl.l %d0,%d3
+ bra.b Set_R_0
+
+Q_Big:
+ clr.l %d3
+
+Set_R_0:
+ fmov.s &0x00000000,%fp0
+ clr.b Sc_Flag(%a6)
+ bra.w Fix_Sign
+
+Tie_Case:
+#..Check parity of Q
+ mov.l %d3,%d6
+ and.l &0x00000001,%d6
+ tst.l %d6
+ beq.w Fix_Sign # Q is even
+
+#..Q is odd, Q := Q + 1, signX := -signX
+ addq.l &1,%d3
+ mov.w SignX(%a6),%d6
+ eor.l &0x00008000,%d6
+ mov.w %d6,SignX(%a6)
+ bra.w Fix_Sign
+
+#########################################################################
+# XDEF **************************************************************** #
+# tag(): return the optype of the input ext fp number #
+# #
+# This routine is used by the 060FPLSP. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision operand #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of type tag #
+# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
+# #
+# ALGORITHM *********************************************************** #
+# Simply test the exponent, j-bit, and mantissa values to #
+# determine the type of operand. #
+# If it's an unnormalized zero, alter the operand and force it #
+# to be a normal zero. #
+# #
+#########################################################################
+
+ global tag
+tag:
+ mov.w FTEMP_EX(%a0), %d0 # extract exponent
+ andi.w &0x7fff, %d0 # strip off sign
+ cmpi.w %d0, &0x7fff # is (EXP == MAX)?
+ beq.b inf_or_nan_x
+not_inf_or_nan_x:
+ btst &0x7,FTEMP_HI(%a0)
+ beq.b not_norm_x
+is_norm_x:
+ mov.b &NORM, %d0
+ rts
+not_norm_x:
+ tst.w %d0 # is exponent = 0?
+ bne.b is_unnorm_x
+not_unnorm_x:
+ tst.l FTEMP_HI(%a0)
+ bne.b is_denorm_x
+ tst.l FTEMP_LO(%a0)
+ bne.b is_denorm_x
+is_zero_x:
+ mov.b &ZERO, %d0
+ rts
+is_denorm_x:
+ mov.b &DENORM, %d0
+ rts
+is_unnorm_x:
+ bsr.l unnorm_fix # convert to norm,denorm,or zero
+ rts
+is_unnorm_reg_x:
+ mov.b &UNNORM, %d0
+ rts
+inf_or_nan_x:
+ tst.l FTEMP_LO(%a0)
+ bne.b is_nan_x
+ mov.l FTEMP_HI(%a0), %d0
+ and.l &0x7fffffff, %d0 # msb is a don't care!
+ bne.b is_nan_x
+is_inf_x:
+ mov.b &INF, %d0
+ rts
+is_nan_x:
+ mov.b &QNAN, %d0
+ rts
+
+#############################################################
+
+qnan: long 0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF **************************************************************** #
+# t_dz(): Handle 060FPLSP dz exception for "flogn" emulation. #
+# t_dz2(): Handle 060FPLSP dz exception for "fatanh" emulation. #
+# #
+# These rouitnes are used by the 060FPLSP package. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand. #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default DZ result. #
+# #
+# ALGORITHM *********************************************************** #
+# Transcendental emulation for the 060FPLSP has detected that #
+# a DZ exception should occur for the instruction. If DZ is disabled, #
+# return the default result. #
+# If DZ is enabled, the dst operand should be returned unscathed #
+# in fp0 while fp1 is used to create a DZ exception so that the #
+# operating system can log that such an event occurred. #
+# #
+#########################################################################
+
+ global t_dz
+t_dz:
+ tst.b SRC_EX(%a0) # check sign for neg or pos
+ bpl.b dz_pinf # branch if pos sign
+
+ global t_dz2
+t_dz2:
+ ori.l &dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
+
+ btst &dz_bit,FPCR_ENABLE(%a6)
+ bne.b dz_minf_ena
+
+# dz is disabled. return a -INF.
+ fmov.s &0xff800000,%fp0 # return -INF
+ rts
+
+# dz is enabled. create a dz exception so the user can record it
+# but use fp1 instead. return the dst operand unscathed in fp0.
+dz_minf_ena:
+ fmovm.x EXC_FP0(%a6),&0x80 # return fp0 unscathed
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmov.s &0xbf800000,%fp1 # load -1
+ fdiv.s &0x00000000,%fp1 # -1 / 0
+ rts
+
+dz_pinf:
+ ori.l &dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
+
+ btst &dz_bit,FPCR_ENABLE(%a6)
+ bne.b dz_pinf_ena
+
+# dz is disabled. return a +INF.
+ fmov.s &0x7f800000,%fp0 # return +INF
+ rts
+
+# dz is enabled. create a dz exception so the user can record it
+# but use fp1 instead. return the dst operand unscathed in fp0.
+dz_pinf_ena:
+ fmovm.x EXC_FP0(%a6),&0x80 # return fp0 unscathed
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmov.s &0x3f800000,%fp1 # load +1
+ fdiv.s &0x00000000,%fp1 # +1 / 0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# t_operr(): Handle 060FPLSP OPERR exception during emulation. #
+# #
+# This routine is used by the 060FPLSP package. #
+# #
+# XREF **************************************************************** #
+# None. #
+# #
+# INPUT *************************************************************** #
+# fp1 = source operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default result #
+# fp1 = unchanged #
+# #
+# ALGORITHM *********************************************************** #
+# An operand error should occur as the result of transcendental #
+# emulation in the 060FPLSP. If OPERR is disabled, just return a NAN #
+# in fp0. If OPERR is enabled, return the dst operand unscathed in fp0 #
+# and the source operand in fp1. Use fp2 to create an OPERR exception #
+# so that the operating system can log the event. #
+# #
+#########################################################################
+
+ global t_operr
+t_operr:
+ ori.l &opnan_mask,USER_FPSR(%a6) # set NAN/OPERR/AIOP
+
+ btst &operr_bit,FPCR_ENABLE(%a6)
+ bne.b operr_ena
+
+# operr is disabled. return a QNAN in fp0
+ fmovm.x qnan(%pc),&0x80 # return QNAN
+ rts
+
+# operr is enabled. create an operr exception so the user can record it
+# but use fp2 instead. return the dst operand unscathed in fp0.
+operr_ena:
+ fmovm.x EXC_FP0(%a6),&0x80 # return fp0 unscathed
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmovm.x &0x04,-(%sp) # save fp2
+ fmov.s &0x7f800000,%fp2 # load +INF
+ fmul.s &0x00000000,%fp2 # +INF x 0
+ fmovm.x (%sp)+,&0x20 # restore fp2
+ rts
+
+pls_huge:
+ long 0x7ffe0000,0xffffffff,0xffffffff
+mns_huge:
+ long 0xfffe0000,0xffffffff,0xffffffff
+pls_tiny:
+ long 0x00000000,0x80000000,0x00000000
+mns_tiny:
+ long 0x80000000,0x80000000,0x00000000
+
+#########################################################################
+# XDEF **************************************************************** #
+# t_unfl(): Handle 060FPLSP underflow exception during emulation. #
+# t_unfl2(): Handle 060FPLSP underflow exception during #
+# emulation. result always positive. #
+# #
+# This routine is used by the 060FPLSP package. #
+# #
+# XREF **************************************************************** #
+# None. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default underflow result #
+# #
+# ALGORITHM *********************************************************** #
+# An underflow should occur as the result of transcendental #
+# emulation in the 060FPLSP. Create an underflow by using "fmul" #
+# and two very small numbers of appropriate sign so the operating #
+# system can log the event. #
+# #
+#########################################################################
+
+ global t_unfl
+t_unfl:
+ tst.b SRC_EX(%a0)
+ bpl.b unf_pos
+
+ global t_unfl2
+t_unfl2:
+ ori.l &unfinx_mask+neg_mask,USER_FPSR(%a6) # set N/UNFL/INEX2/AUNFL/AINEX
+
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmovm.x mns_tiny(%pc),&0x80
+ fmul.x pls_tiny(%pc),%fp0
+
+ fmov.l %fpsr,%d0
+ rol.l &0x8,%d0
+ mov.b %d0,FPSR_CC(%a6)
+ rts
+unf_pos:
+ ori.w &unfinx_mask,FPSR_EXCEPT(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmovm.x pls_tiny(%pc),&0x80
+ fmul.x %fp0,%fp0
+
+ fmov.l %fpsr,%d0
+ rol.l &0x8,%d0
+ mov.b %d0,FPSR_CC(%a6)
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# t_ovfl(): Handle 060FPLSP overflow exception during emulation. #
+# (monadic) #
+# t_ovfl2(): Handle 060FPLSP overflow exception during #
+# emulation. result always positive. (dyadic) #
+# t_ovfl_sc(): Handle 060FPLSP overflow exception during #
+# emulation for "fscale". #
+# #
+# This routine is used by the 060FPLSP package. #
+# #
+# XREF **************************************************************** #
+# None. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default underflow result #
+# #
+# ALGORITHM *********************************************************** #
+# An overflow should occur as the result of transcendental #
+# emulation in the 060FPLSP. Create an overflow by using "fmul" #
+# and two very lareg numbers of appropriate sign so the operating #
+# system can log the event. #
+# For t_ovfl_sc() we take special care not to lose the INEX2 bit. #
+# #
+#########################################################################
+
+ global t_ovfl_sc
+t_ovfl_sc:
+ ori.l &ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
+
+ mov.b %d0,%d1 # fetch rnd prec,mode
+ andi.b &0xc0,%d1 # extract prec
+ beq.w ovfl_work
+
+# dst op is a DENORM. we have to normalize the mantissa to see if the
+# result would be inexact for the given precision. make a copy of the
+# dst so we don't screw up the version passed to us.
+ mov.w LOCAL_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l LOCAL_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l LOCAL_LO(%a0),FP_SCR0_LO(%a6)
+ lea FP_SCR0(%a6),%a0 # pass ptr to FP_SCR0
+ movm.l &0xc080,-(%sp) # save d0-d1/a0
+ bsr.l norm # normalize mantissa
+ movm.l (%sp)+,&0x0103 # restore d0-d1/a0
+
+ cmpi.b %d1,&0x40 # is precision sgl?
+ bne.b ovfl_sc_dbl # no; dbl
+ovfl_sc_sgl:
+ tst.l LOCAL_LO(%a0) # is lo lw of sgl set?
+ bne.b ovfl_sc_inx # yes
+ tst.b 3+LOCAL_HI(%a0) # is lo byte of hi lw set?
+ bne.b ovfl_sc_inx # yes
+ bra.w ovfl_work # don't set INEX2
+ovfl_sc_dbl:
+ mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of
+ andi.l &0x7ff,%d1 # dbl mantissa set?
+ beq.w ovfl_work # no; don't set INEX2
+ovfl_sc_inx:
+ ori.l &inex2_mask,USER_FPSR(%a6) # set INEX2
+ bra.b ovfl_work # continue
+
+ global t_ovfl
+t_ovfl:
+ ori.w &ovfinx_mask,FPSR_EXCEPT(%a6) # set OVFL/INEX2/AOVFL/AINEX
+ovfl_work:
+ tst.b SRC_EX(%a0)
+ bpl.b ovfl_p
+ovfl_m:
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmovm.x mns_huge(%pc),&0x80
+ fmul.x pls_huge(%pc),%fp0
+
+ fmov.l %fpsr,%d0
+ rol.l &0x8,%d0
+ ori.b &neg_mask,%d0
+ mov.b %d0,FPSR_CC(%a6)
+ rts
+ovfl_p:
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmovm.x pls_huge(%pc),&0x80
+ fmul.x pls_huge(%pc),%fp0
+
+ fmov.l %fpsr,%d0
+ rol.l &0x8,%d0
+ mov.b %d0,FPSR_CC(%a6)
+ rts
+
+ global t_ovfl2
+t_ovfl2:
+ ori.w &ovfinx_mask,FPSR_EXCEPT(%a6) # set OVFL/INEX2/AOVFL/AINEX
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmovm.x pls_huge(%pc),&0x80
+ fmul.x pls_huge(%pc),%fp0
+
+ fmov.l %fpsr,%d0
+ rol.l &0x8,%d0
+ mov.b %d0,FPSR_CC(%a6)
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# t_catch(): Handle 060FPLSP OVFL,UNFL,or INEX2 exception during #
+# emulation. #
+# t_catch2(): Handle 060FPLSP OVFL,UNFL,or INEX2 exception during #
+# emulation. #
+# #
+# These routines are used by the 060FPLSP package. #
+# #
+# XREF **************************************************************** #
+# None. #
+# #
+# INPUT *************************************************************** #
+# fp0 = default underflow or overflow result #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default result #
+# #
+# ALGORITHM *********************************************************** #
+# If an overflow or underflow occurred during the last #
+# instruction of transcendental 060FPLSP emulation, then it has already #
+# occurred and has been logged. Now we need to see if an inexact #
+# exception should occur. #
+# #
+#########################################################################
+
+ global t_catch2
+t_catch2:
+ fmov.l %fpsr,%d0
+ or.l %d0,USER_FPSR(%a6)
+ bra.b inx2_work
+
+ global t_catch
+t_catch:
+ fmov.l %fpsr,%d0
+ or.l %d0,USER_FPSR(%a6)
+
+#########################################################################
+# XDEF **************************************************************** #
+# t_inx2(): Handle inexact 060FPLSP exception during emulation. #
+# t_pinx2(): Handle inexact 060FPLSP exception for "+" results. #
+# t_minx2(): Handle inexact 060FPLSP exception for "-" results. #
+# #
+# XREF **************************************************************** #
+# None. #
+# #
+# INPUT *************************************************************** #
+# fp0 = default result #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default result #
+# #
+# ALGORITHM *********************************************************** #
+# The last instruction of transcendental emulation for the #
+# 060FPLSP should be inexact. So, if inexact is enabled, then we create #
+# the event here by adding a large and very small number together #
+# so that the operating system can log the event. #
+# Must check, too, if the result was zero, in which case we just #
+# set the FPSR bits and return. #
+# #
+#########################################################################
+
+ global t_inx2
+t_inx2:
+ fblt.w t_minx2
+ fbeq.w inx2_zero
+
+ global t_pinx2
+t_pinx2:
+ ori.w &inx2a_mask,FPSR_EXCEPT(%a6) # set INEX2/AINEX
+ bra.b inx2_work
+
+ global t_minx2
+t_minx2:
+ ori.l &inx2a_mask+neg_mask,USER_FPSR(%a6)
+
+inx2_work:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+ bne.b inx2_work_ena # yes
+ rts
+inx2_work_ena:
+ fmov.l USER_FPCR(%a6),%fpcr # insert user's exceptions
+ fmov.s &0x3f800000,%fp1 # load +1
+ fadd.x pls_tiny(%pc),%fp1 # cause exception
+ rts
+
+inx2_zero:
+ mov.b &z_bmask,FPSR_CC(%a6)
+ ori.w &inx2a_mask,2+USER_FPSR(%a6) # set INEX/AINEX
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# t_extdnrm(): Handle DENORM inputs in 060FPLSP. #
+# t_resdnrm(): Handle DENORM inputs in 060FPLSP for "fscale". #
+# #
+# This routine is used by the 060FPLSP package. #
+# #
+# XREF **************************************************************** #
+# None. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default result #
+# #
+# ALGORITHM *********************************************************** #
+# For all functions that have a denormalized input and that #
+# f(x)=x, this is the entry point. #
+# DENORM value is moved using "fmove" which triggers an exception #
+# if enabled so the operating system can log the event. #
+# #
+#########################################################################
+
+ global t_extdnrm
+t_extdnrm:
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmov.x SRC_EX(%a0),%fp0
+ fmov.l %fpsr,%d0
+ ori.l &unfinx_mask,%d0
+ or.l %d0,USER_FPSR(%a6)
+ rts
+
+ global t_resdnrm
+t_resdnrm:
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmov.x SRC_EX(%a0),%fp0
+ fmov.l %fpsr,%d0
+ or.l %d0,USER_FPSR(%a6)
+ rts
+
+##########################################
+
+#
+# sto_cos:
+# This is used by fsincos library emulation. The correct
+# values are already in fp0 and fp1 so we do nothing here.
+#
+ global sto_cos
+sto_cos:
+ rts
+
+##########################################
+
+#
+# dst_qnan --- force result when destination is a NaN
+#
+ global dst_qnan
+dst_qnan:
+ fmov.x DST(%a1),%fp0
+ tst.b DST_EX(%a1)
+ bmi.b dst_qnan_m
+dst_qnan_p:
+ mov.b &nan_bmask,FPSR_CC(%a6)
+ rts
+dst_qnan_m:
+ mov.b &nan_bmask+neg_bmask,FPSR_CC(%a6)
+ rts
+
+#
+# src_qnan --- force result when source is a NaN
+#
+ global src_qnan
+src_qnan:
+ fmov.x SRC(%a0),%fp0
+ tst.b SRC_EX(%a0)
+ bmi.b src_qnan_m
+src_qnan_p:
+ mov.b &nan_bmask,FPSR_CC(%a6)
+ rts
+src_qnan_m:
+ mov.b &nan_bmask+neg_bmask,FPSR_CC(%a6)
+ rts
+
+##########################################
+
+#
+# Native instruction support
+#
+# Some systems may need entry points even for 68060 native
+# instructions. These routines are provided for
+# convenience.
+#
+ global _fadds_
+_fadds_:
+ fmov.l %fpcr,-(%sp) # save fpcr
+ fmov.l &0x00000000,%fpcr # clear fpcr for load
+ fmov.s 0x8(%sp),%fp0 # load sgl dst
+ fmov.l (%sp)+,%fpcr # restore fpcr
+ fadd.s 0x8(%sp),%fp0 # fadd w/ sgl src
+ rts
+
+ global _faddd_
+_faddd_:
+ fmov.l %fpcr,-(%sp) # save fpcr
+ fmov.l &0x00000000,%fpcr # clear fpcr for load
+ fmov.d 0x8(%sp),%fp0 # load dbl dst
+ fmov.l (%sp)+,%fpcr # restore fpcr
+ fadd.d 0xc(%sp),%fp0 # fadd w/ dbl src
+ rts
+
+ global _faddx_
+_faddx_:
+ fmovm.x 0x4(%sp),&0x80 # load ext dst
+ fadd.x 0x10(%sp),%fp0 # fadd w/ ext src
+ rts
+
+ global _fsubs_
+_fsubs_:
+ fmov.l %fpcr,-(%sp) # save fpcr
+ fmov.l &0x00000000,%fpcr # clear fpcr for load
+ fmov.s 0x8(%sp),%fp0 # load sgl dst
+ fmov.l (%sp)+,%fpcr # restore fpcr
+ fsub.s 0x8(%sp),%fp0 # fsub w/ sgl src
+ rts
+
+ global _fsubd_
+_fsubd_:
+ fmov.l %fpcr,-(%sp) # save fpcr
+ fmov.l &0x00000000,%fpcr # clear fpcr for load
+ fmov.d 0x8(%sp),%fp0 # load dbl dst
+ fmov.l (%sp)+,%fpcr # restore fpcr
+ fsub.d 0xc(%sp),%fp0 # fsub w/ dbl src
+ rts
+
+ global _fsubx_
+_fsubx_:
+ fmovm.x 0x4(%sp),&0x80 # load ext dst
+ fsub.x 0x10(%sp),%fp0 # fsub w/ ext src
+ rts
+
+ global _fmuls_
+_fmuls_:
+ fmov.l %fpcr,-(%sp) # save fpcr
+ fmov.l &0x00000000,%fpcr # clear fpcr for load
+ fmov.s 0x8(%sp),%fp0 # load sgl dst
+ fmov.l (%sp)+,%fpcr # restore fpcr
+ fmul.s 0x8(%sp),%fp0 # fmul w/ sgl src
+ rts
+
+ global _fmuld_
+_fmuld_:
+ fmov.l %fpcr,-(%sp) # save fpcr
+ fmov.l &0x00000000,%fpcr # clear fpcr for load
+ fmov.d 0x8(%sp),%fp0 # load dbl dst
+ fmov.l (%sp)+,%fpcr # restore fpcr
+ fmul.d 0xc(%sp),%fp0 # fmul w/ dbl src
+ rts
+
+ global _fmulx_
+_fmulx_:
+ fmovm.x 0x4(%sp),&0x80 # load ext dst
+ fmul.x 0x10(%sp),%fp0 # fmul w/ ext src
+ rts
+
+ global _fdivs_
+_fdivs_:
+ fmov.l %fpcr,-(%sp) # save fpcr
+ fmov.l &0x00000000,%fpcr # clear fpcr for load
+ fmov.s 0x8(%sp),%fp0 # load sgl dst
+ fmov.l (%sp)+,%fpcr # restore fpcr
+ fdiv.s 0x8(%sp),%fp0 # fdiv w/ sgl src
+ rts
+
+ global _fdivd_
+_fdivd_:
+ fmov.l %fpcr,-(%sp) # save fpcr
+ fmov.l &0x00000000,%fpcr # clear fpcr for load
+ fmov.d 0x8(%sp),%fp0 # load dbl dst
+ fmov.l (%sp)+,%fpcr # restore fpcr
+ fdiv.d 0xc(%sp),%fp0 # fdiv w/ dbl src
+ rts
+
+ global _fdivx_
+_fdivx_:
+ fmovm.x 0x4(%sp),&0x80 # load ext dst
+ fdiv.x 0x10(%sp),%fp0 # fdiv w/ ext src
+ rts
+
+ global _fabss_
+_fabss_:
+ fabs.s 0x4(%sp),%fp0 # fabs w/ sgl src
+ rts
+
+ global _fabsd_
+_fabsd_:
+ fabs.d 0x4(%sp),%fp0 # fabs w/ dbl src
+ rts
+
+ global _fabsx_
+_fabsx_:
+ fabs.x 0x4(%sp),%fp0 # fabs w/ ext src
+ rts
+
+ global _fnegs_
+_fnegs_:
+ fneg.s 0x4(%sp),%fp0 # fneg w/ sgl src
+ rts
+
+ global _fnegd_
+_fnegd_:
+ fneg.d 0x4(%sp),%fp0 # fneg w/ dbl src
+ rts
+
+ global _fnegx_
+_fnegx_:
+ fneg.x 0x4(%sp),%fp0 # fneg w/ ext src
+ rts
+
+ global _fsqrts_
+_fsqrts_:
+ fsqrt.s 0x4(%sp),%fp0 # fsqrt w/ sgl src
+ rts
+
+ global _fsqrtd_
+_fsqrtd_:
+ fsqrt.d 0x4(%sp),%fp0 # fsqrt w/ dbl src
+ rts
+
+ global _fsqrtx_
+_fsqrtx_:
+ fsqrt.x 0x4(%sp),%fp0 # fsqrt w/ ext src
+ rts
+
+ global _fints_
+_fints_:
+ fint.s 0x4(%sp),%fp0 # fint w/ sgl src
+ rts
+
+ global _fintd_
+_fintd_:
+ fint.d 0x4(%sp),%fp0 # fint w/ dbl src
+ rts
+
+ global _fintx_
+_fintx_:
+ fint.x 0x4(%sp),%fp0 # fint w/ ext src
+ rts
+
+ global _fintrzs_
+_fintrzs_:
+ fintrz.s 0x4(%sp),%fp0 # fintrz w/ sgl src
+ rts
+
+ global _fintrzd_
+_fintrzd_:
+ fintrz.d 0x4(%sp),%fp0 # fintrx w/ dbl src
+ rts
+
+ global _fintrzx_
+_fintrzx_:
+ fintrz.x 0x4(%sp),%fp0 # fintrz w/ ext src
+ rts
+
+########################################################################
+
+#########################################################################
+# src_zero(): Return signed zero according to sign of src operand. #
+#########################################################################
+ global src_zero
+src_zero:
+ tst.b SRC_EX(%a0) # get sign of src operand
+ bmi.b ld_mzero # if neg, load neg zero
+
+#
+# ld_pzero(): return a positive zero.
+#
+ global ld_pzero
+ld_pzero:
+ fmov.s &0x00000000,%fp0 # load +0
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+
+# ld_mzero(): return a negative zero.
+ global ld_mzero
+ld_mzero:
+ fmov.s &0x80000000,%fp0 # load -0
+ mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
+ rts
+
+#########################################################################
+# dst_zero(): Return signed zero according to sign of dst operand. #
+#########################################################################
+ global dst_zero
+dst_zero:
+ tst.b DST_EX(%a1) # get sign of dst operand
+ bmi.b ld_mzero # if neg, load neg zero
+ bra.b ld_pzero # load positive zero
+
+#########################################################################
+# src_inf(): Return signed inf according to sign of src operand. #
+#########################################################################
+ global src_inf
+src_inf:
+ tst.b SRC_EX(%a0) # get sign of src operand
+ bmi.b ld_minf # if negative branch
+
+#
+# ld_pinf(): return a positive infinity.
+#
+ global ld_pinf
+ld_pinf:
+ fmov.s &0x7f800000,%fp0 # load +INF
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'INF' ccode bit
+ rts
+
+#
+# ld_minf():return a negative infinity.
+#
+ global ld_minf
+ld_minf:
+ fmov.s &0xff800000,%fp0 # load -INF
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+ rts
+
+#########################################################################
+# dst_inf(): Return signed inf according to sign of dst operand. #
+#########################################################################
+ global dst_inf
+dst_inf:
+ tst.b DST_EX(%a1) # get sign of dst operand
+ bmi.b ld_minf # if negative branch
+ bra.b ld_pinf
+
+ global szr_inf
+#################################################################
+# szr_inf(): Return +ZERO for a negative src operand or #
+# +INF for a positive src operand. #
+# Routine used for fetox, ftwotox, and ftentox. #
+#################################################################
+szr_inf:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.b ld_pzero
+ bra.b ld_pinf
+
+#########################################################################
+# sopr_inf(): Return +INF for a positive src operand or #
+# jump to operand error routine for a negative src operand. #
+# Routine used for flogn, flognp1, flog10, and flog2. #
+#########################################################################
+ global sopr_inf
+sopr_inf:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.w t_operr
+ bra.b ld_pinf
+
+#################################################################
+# setoxm1i(): Return minus one for a negative src operand or #
+# positive infinity for a positive src operand. #
+# Routine used for fetoxm1. #
+#################################################################
+ global setoxm1i
+setoxm1i:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.b ld_mone
+ bra.b ld_pinf
+
+#########################################################################
+# src_one(): Return signed one according to sign of src operand. #
+#########################################################################
+ global src_one
+src_one:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.b ld_mone
+
+#
+# ld_pone(): return positive one.
+#
+ global ld_pone
+ld_pone:
+ fmov.s &0x3f800000,%fp0 # load +1
+ clr.b FPSR_CC(%a6)
+ rts
+
+#
+# ld_mone(): return negative one.
+#
+ global ld_mone
+ld_mone:
+ fmov.s &0xbf800000,%fp0 # load -1
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+ppiby2: long 0x3fff0000, 0xc90fdaa2, 0x2168c235
+mpiby2: long 0xbfff0000, 0xc90fdaa2, 0x2168c235
+
+#################################################################
+# spi_2(): Return signed PI/2 according to sign of src operand. #
+#################################################################
+ global spi_2
+spi_2:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.b ld_mpi2
+
+#
+# ld_ppi2(): return positive PI/2.
+#
+ global ld_ppi2
+ld_ppi2:
+ fmov.l %d0,%fpcr
+ fmov.x ppiby2(%pc),%fp0 # load +pi/2
+ bra.w t_pinx2 # set INEX2
+
+#
+# ld_mpi2(): return negative PI/2.
+#
+ global ld_mpi2
+ld_mpi2:
+ fmov.l %d0,%fpcr
+ fmov.x mpiby2(%pc),%fp0 # load -pi/2
+ bra.w t_minx2 # set INEX2
+
+####################################################
+# The following routines give support for fsincos. #
+####################################################
+
+#
+# ssincosz(): When the src operand is ZERO, store a one in the
+# cosine register and return a ZERO in fp0 w/ the same sign
+# as the src operand.
+#
+ global ssincosz
+ssincosz:
+ fmov.s &0x3f800000,%fp1
+ tst.b SRC_EX(%a0) # test sign
+ bpl.b sincoszp
+ fmov.s &0x80000000,%fp0 # return sin result in fp0
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6)
+ rts
+sincoszp:
+ fmov.s &0x00000000,%fp0 # return sin result in fp0
+ mov.b &z_bmask,FPSR_CC(%a6)
+ rts
+
+#
+# ssincosi(): When the src operand is INF, store a QNAN in the cosine
+# register and jump to the operand error routine for negative
+# src operands.
+#
+ global ssincosi
+ssincosi:
+ fmov.x qnan(%pc),%fp1 # load NAN
+ bra.w t_operr
+
+#
+# ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
+# register and branch to the src QNAN routine.
+#
+ global ssincosqnan
+ssincosqnan:
+ fmov.x LOCAL_EX(%a0),%fp1
+ bra.w src_qnan
+
+########################################################################
+
+ global smod_sdnrm
+ global smod_snorm
+smod_sdnrm:
+smod_snorm:
+ mov.b DTAG(%a6),%d1
+ beq.l smod
+ cmpi.b %d1,&ZERO
+ beq.w smod_zro
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l smod
+ bra.l dst_qnan
+
+ global smod_szero
+smod_szero:
+ mov.b DTAG(%a6),%d1
+ beq.l t_operr
+ cmpi.b %d1,&ZERO
+ beq.l t_operr
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l t_operr
+ bra.l dst_qnan
+
+ global smod_sinf
+smod_sinf:
+ mov.b DTAG(%a6),%d1
+ beq.l smod_fpn
+ cmpi.b %d1,&ZERO
+ beq.l smod_zro
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l smod_fpn
+ bra.l dst_qnan
+
+smod_zro:
+srem_zro:
+ mov.b SRC_EX(%a0),%d1 # get src sign
+ mov.b DST_EX(%a1),%d0 # get dst sign
+ eor.b %d0,%d1 # get qbyte sign
+ andi.b &0x80,%d1
+ mov.b %d1,FPSR_QBYTE(%a6)
+ tst.b %d0
+ bpl.w ld_pzero
+ bra.w ld_mzero
+
+smod_fpn:
+srem_fpn:
+ clr.b FPSR_QBYTE(%a6)
+ mov.l %d0,-(%sp)
+ mov.b SRC_EX(%a0),%d1 # get src sign
+ mov.b DST_EX(%a1),%d0 # get dst sign
+ eor.b %d0,%d1 # get qbyte sign
+ andi.b &0x80,%d1
+ mov.b %d1,FPSR_QBYTE(%a6)
+ cmpi.b DTAG(%a6),&DENORM
+ bne.b smod_nrm
+ lea DST(%a1),%a0
+ mov.l (%sp)+,%d0
+ bra t_resdnrm
+smod_nrm:
+ fmov.l (%sp)+,%fpcr
+ fmov.x DST(%a1),%fp0
+ tst.b DST_EX(%a1)
+ bmi.b smod_nrm_neg
+ rts
+
+smod_nrm_neg:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' code
+ rts
+
+#########################################################################
+ global srem_snorm
+ global srem_sdnrm
+srem_sdnrm:
+srem_snorm:
+ mov.b DTAG(%a6),%d1
+ beq.l srem
+ cmpi.b %d1,&ZERO
+ beq.w srem_zro
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l srem
+ bra.l dst_qnan
+
+ global srem_szero
+srem_szero:
+ mov.b DTAG(%a6),%d1
+ beq.l t_operr
+ cmpi.b %d1,&ZERO
+ beq.l t_operr
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l t_operr
+ bra.l dst_qnan
+
+ global srem_sinf
+srem_sinf:
+ mov.b DTAG(%a6),%d1
+ beq.w srem_fpn
+ cmpi.b %d1,&ZERO
+ beq.w srem_zro
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l srem_fpn
+ bra.l dst_qnan
+
+#########################################################################
+
+ global sscale_snorm
+ global sscale_sdnrm
+sscale_snorm:
+sscale_sdnrm:
+ mov.b DTAG(%a6),%d1
+ beq.l sscale
+ cmpi.b %d1,&ZERO
+ beq.l dst_zero
+ cmpi.b %d1,&INF
+ beq.l dst_inf
+ cmpi.b %d1,&DENORM
+ beq.l sscale
+ bra.l dst_qnan
+
+ global sscale_szero
+sscale_szero:
+ mov.b DTAG(%a6),%d1
+ beq.l sscale
+ cmpi.b %d1,&ZERO
+ beq.l dst_zero
+ cmpi.b %d1,&INF
+ beq.l dst_inf
+ cmpi.b %d1,&DENORM
+ beq.l sscale
+ bra.l dst_qnan
+
+ global sscale_sinf
+sscale_sinf:
+ mov.b DTAG(%a6),%d1
+ beq.l t_operr
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l t_operr
+
+########################################################################
+
+ global sop_sqnan
+sop_sqnan:
+ mov.b DTAG(%a6),%d1
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l src_qnan
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the #
+# input operand should not be normalized already. #
+# #
+# XDEF **************************************************************** #
+# norm() #
+# #
+# XREF **************************************************************** #
+# none #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer fp extended precision operand to normalize #
+# #
+# OUTPUT ************************************************************** #
+# d0 = number of bit positions the mantissa was shifted #
+# a0 = the input operand's mantissa is normalized; the exponent #
+# is unchanged. #
+# #
+#########################################################################
+ global norm
+norm:
+ mov.l %d2, -(%sp) # create some temp regs
+ mov.l %d3, -(%sp)
+
+ mov.l FTEMP_HI(%a0), %d0 # load hi(mantissa)
+ mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa)
+
+ bfffo %d0{&0:&32}, %d2 # how many places to shift?
+ beq.b norm_lo # hi(man) is all zeroes!
+
+norm_hi:
+ lsl.l %d2, %d0 # left shift hi(man)
+ bfextu %d1{&0:%d2}, %d3 # extract lo bits
+
+ or.l %d3, %d0 # create hi(man)
+ lsl.l %d2, %d1 # create lo(man)
+
+ mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
+ mov.l %d1, FTEMP_LO(%a0) # store new lo(man)
+
+ mov.l %d2, %d0 # return shift amount
+
+ mov.l (%sp)+, %d3 # restore temp regs
+ mov.l (%sp)+, %d2
+
+ rts
+
+norm_lo:
+ bfffo %d1{&0:&32}, %d2 # how many places to shift?
+ lsl.l %d2, %d1 # shift lo(man)
+ add.l &32, %d2 # add 32 to shft amount
+
+ mov.l %d1, FTEMP_HI(%a0) # store hi(man)
+ clr.l FTEMP_LO(%a0) # lo(man) is now zero
+
+ mov.l %d2, %d0 # return shift amount
+
+ mov.l (%sp)+, %d3 # restore temp regs
+ mov.l (%sp)+, %d2
+
+ rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO #
+# - returns corresponding optype tag #
+# #
+# XDEF **************************************************************** #
+# unnorm_fix() #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to unnormalized extended precision number #
+# #
+# OUTPUT ************************************************************** #
+# d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO #
+# a0 = input operand has been converted to a norm, denorm, or #
+# zero; both the exponent and mantissa are changed. #
+# #
+#########################################################################
+
+ global unnorm_fix
+unnorm_fix:
+ bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+ bne.b unnorm_shift # hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+ bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+ beq.w unnorm_zero # yes
+
+ add.w &32, %d0 # no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+ clr.l %d1 # clear top word
+ mov.w FTEMP_EX(%a0), %d1 # extract exponent
+ and.w &0x7fff, %d1 # strip off sgn
+
+ cmp.w %d0, %d1 # will denorm push exp < 0?
+ bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+ sub.w %d0, %d1 # shift exponent value
+ mov.w FTEMP_EX(%a0), %d0 # load old exponent
+ and.w &0x8000, %d0 # save old sign
+ or.w %d0, %d1 # {sgn,new exp}
+ mov.w %d1, FTEMP_EX(%a0) # insert new exponent
+
+ bsr.l norm # normalize UNNORM
+
+ mov.b &NORM, %d0 # return new optype tag
+ rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+ cmp.b %d1, &32 # is exp <= 32?
+ bgt.b unnorm_nrm_zero_lrg # no; go handle large exponent
+
+ bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+ mov.l %d0, FTEMP_HI(%a0) # save new hi(man)
+
+ mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
+ lsl.l %d1, %d0 # extract new lo(man)
+ mov.l %d0, FTEMP_LO(%a0) # save new lo(man)
+
+ and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
+
+ mov.b &DENORM, %d0 # return new optype tag
+ rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+ sub.w &32, %d1 # adjust shft amt by 32
+
+ mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
+ lsl.l %d1, %d0 # left shift lo(man)
+
+ mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
+ clr.l FTEMP_LO(%a0) # lo(man) = 0
+
+ and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
+
+ mov.b &DENORM, %d0 # return new optype tag
+ rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+ and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
+
+ mov.b &ZERO, %d0 # fix optype tag
+ rts
diff --git a/arch/m68k/ifpsp060/src/fpsp.S b/arch/m68k/ifpsp060/src/fpsp.S
new file mode 100644
index 00000000000..3b597a9bbf4
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/fpsp.S
@@ -0,0 +1,24785 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#
+# freal.s:
+# This file is appended to the top of the 060FPSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060FPSP_TABLE.
+# Also, subroutine stubs exist in this file (_fpsp_done for
+# example) that are referenced by the FPSP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The FPSP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the FPSP code easier to read and more mainatinable.
+#
+
+set _off_bsun, 0x00
+set _off_snan, 0x04
+set _off_operr, 0x08
+set _off_ovfl, 0x0c
+set _off_unfl, 0x10
+set _off_dz, 0x14
+set _off_inex, 0x18
+set _off_fline, 0x1c
+set _off_fpu_dis, 0x20
+set _off_trap, 0x24
+set _off_trace, 0x28
+set _off_access, 0x2c
+set _off_done, 0x30
+
+set _off_imr, 0x40
+set _off_dmr, 0x44
+set _off_dmw, 0x48
+set _off_irw, 0x4c
+set _off_irl, 0x50
+set _off_drb, 0x54
+set _off_drw, 0x58
+set _off_drl, 0x5c
+set _off_dwb, 0x60
+set _off_dww, 0x64
+set _off_dwl, 0x68
+
+_060FPSP_TABLE:
+
+###############################################################
+
+# Here's the table of ENTRY POINTS for those linking the package.
+ bra.l _fpsp_snan
+ short 0x0000
+ bra.l _fpsp_operr
+ short 0x0000
+ bra.l _fpsp_ovfl
+ short 0x0000
+ bra.l _fpsp_unfl
+ short 0x0000
+ bra.l _fpsp_dz
+ short 0x0000
+ bra.l _fpsp_inex
+ short 0x0000
+ bra.l _fpsp_fline
+ short 0x0000
+ bra.l _fpsp_unsupp
+ short 0x0000
+ bra.l _fpsp_effadd
+ short 0x0000
+
+ space 56
+
+###############################################################
+ global _fpsp_done
+_fpsp_done:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_done,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_ovfl
+_real_ovfl:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_unfl
+_real_unfl:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_inex
+_real_inex:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_bsun
+_real_bsun:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_operr
+_real_operr:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_snan
+_real_snan:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_dz
+_real_dz:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_fline
+_real_fline:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_fpu_disabled
+_real_fpu_disabled:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_trap
+_real_trap:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_trace
+_real_trace:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_access
+_real_access:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_access,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+#######################################
+
+ global _imem_read
+_imem_read:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read
+_dmem_read:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write
+_dmem_write:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _imem_read_word
+_imem_read_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _imem_read_long
+_imem_read_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_byte
+_dmem_read_byte:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_word
+_dmem_read_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_long
+_dmem_read_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_byte
+_dmem_write_byte:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_word
+_dmem_write_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_long
+_dmem_write_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE, 192 # stack frame size(bytes)
+set LV, -LOCAL_SIZE # stack offset
+
+set EXC_SR, 0x4 # stack status register
+set EXC_PC, 0x6 # stack pc
+set EXC_VOFF, 0xa # stacked vector offset
+set EXC_EA, 0xc # stacked <ea>
+
+set EXC_FP, 0x0 # frame pointer
+
+set EXC_AREGS, -68 # offset of all address regs
+set EXC_DREGS, -100 # offset of all data regs
+set EXC_FPREGS, -36 # offset of all fp regs
+
+set EXC_A7, EXC_AREGS+(7*4) # offset of saved a7
+set OLD_A7, EXC_AREGS+(6*4) # extra copy of saved a7
+set EXC_A6, EXC_AREGS+(6*4) # offset of saved a6
+set EXC_A5, EXC_AREGS+(5*4)
+set EXC_A4, EXC_AREGS+(4*4)
+set EXC_A3, EXC_AREGS+(3*4)
+set EXC_A2, EXC_AREGS+(2*4)
+set EXC_A1, EXC_AREGS+(1*4)
+set EXC_A0, EXC_AREGS+(0*4)
+set EXC_D7, EXC_DREGS+(7*4)
+set EXC_D6, EXC_DREGS+(6*4)
+set EXC_D5, EXC_DREGS+(5*4)
+set EXC_D4, EXC_DREGS+(4*4)
+set EXC_D3, EXC_DREGS+(3*4)
+set EXC_D2, EXC_DREGS+(2*4)
+set EXC_D1, EXC_DREGS+(1*4)
+set EXC_D0, EXC_DREGS+(0*4)
+
+set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
+set EXC_FP1, EXC_FPREGS+(1*12) # offset of saved fp1
+set EXC_FP2, EXC_FPREGS+(2*12) # offset of saved fp2 (not used)
+
+set FP_SCR1, LV+80 # fp scratch 1
+set FP_SCR1_EX, FP_SCR1+0
+set FP_SCR1_SGN, FP_SCR1+2
+set FP_SCR1_HI, FP_SCR1+4
+set FP_SCR1_LO, FP_SCR1+8
+
+set FP_SCR0, LV+68 # fp scratch 0
+set FP_SCR0_EX, FP_SCR0+0
+set FP_SCR0_SGN, FP_SCR0+2
+set FP_SCR0_HI, FP_SCR0+4
+set FP_SCR0_LO, FP_SCR0+8
+
+set FP_DST, LV+56 # fp destination operand
+set FP_DST_EX, FP_DST+0
+set FP_DST_SGN, FP_DST+2
+set FP_DST_HI, FP_DST+4
+set FP_DST_LO, FP_DST+8
+
+set FP_SRC, LV+44 # fp source operand
+set FP_SRC_EX, FP_SRC+0
+set FP_SRC_SGN, FP_SRC+2
+set FP_SRC_HI, FP_SRC+4
+set FP_SRC_LO, FP_SRC+8
+
+set USER_FPIAR, LV+40 # FP instr address register
+
+set USER_FPSR, LV+36 # FP status register
+set FPSR_CC, USER_FPSR+0 # FPSR condition codes
+set FPSR_QBYTE, USER_FPSR+1 # FPSR qoutient byte
+set FPSR_EXCEPT, USER_FPSR+2 # FPSR exception status byte
+set FPSR_AEXCEPT, USER_FPSR+3 # FPSR accrued exception byte
+
+set USER_FPCR, LV+32 # FP control register
+set FPCR_ENABLE, USER_FPCR+2 # FPCR exception enable
+set FPCR_MODE, USER_FPCR+3 # FPCR rounding mode control
+
+set L_SCR3, LV+28 # integer scratch 3
+set L_SCR2, LV+24 # integer scratch 2
+set L_SCR1, LV+20 # integer scratch 1
+
+set STORE_FLG, LV+19 # flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2, LV+24 # temporary space
+set EXC_TEMP, LV+16 # temporary space
+
+set DTAG, LV+15 # destination operand type
+set STAG, LV+14 # source operand type
+
+set SPCOND_FLG, LV+10 # flag: special case (see below)
+
+set EXC_CC, LV+8 # saved condition codes
+set EXC_EXTWPTR, LV+4 # saved current PC (active)
+set EXC_EXTWORD, LV+2 # saved extension word
+set EXC_CMDREG, LV+2 # saved extension word
+set EXC_OPWORD, LV+0 # saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP, 0 # offsets within an
+set FTEMP_EX, 0 # extended precision
+set FTEMP_SGN, 2 # value saved in memory.
+set FTEMP_HI, 4
+set FTEMP_LO, 8
+set FTEMP_GRS, 12
+
+set LOCAL, 0 # offsets within an
+set LOCAL_EX, 0 # extended precision
+set LOCAL_SGN, 2 # value saved in memory.
+set LOCAL_HI, 4
+set LOCAL_LO, 8
+set LOCAL_GRS, 12
+
+set DST, 0 # offsets within an
+set DST_EX, 0 # extended precision
+set DST_HI, 4 # value saved in memory.
+set DST_LO, 8
+
+set SRC, 0 # offsets within an
+set SRC_EX, 0 # extended precision
+set SRC_HI, 4 # value saved in memory.
+set SRC_LO, 8
+
+set SGL_LO, 0x3f81 # min sgl prec exponent
+set SGL_HI, 0x407e # max sgl prec exponent
+set DBL_LO, 0x3c01 # min dbl prec exponent
+set DBL_HI, 0x43fe # max dbl prec exponent
+set EXT_LO, 0x0 # min ext prec exponent
+set EXT_HI, 0x7ffe # max ext prec exponent
+
+set EXT_BIAS, 0x3fff # extended precision bias
+set SGL_BIAS, 0x007f # single precision bias
+set DBL_BIAS, 0x03ff # double precision bias
+
+set NORM, 0x00 # operand type for STAG/DTAG
+set ZERO, 0x01 # operand type for STAG/DTAG
+set INF, 0x02 # operand type for STAG/DTAG
+set QNAN, 0x03 # operand type for STAG/DTAG
+set DENORM, 0x04 # operand type for STAG/DTAG
+set SNAN, 0x05 # operand type for STAG/DTAG
+set UNNORM, 0x06 # operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit, 0x3 # negative result
+set z_bit, 0x2 # zero result
+set inf_bit, 0x1 # infinite result
+set nan_bit, 0x0 # NAN result
+
+set q_sn_bit, 0x7 # sign bit of quotient byte
+
+set bsun_bit, 7 # branch on unordered
+set snan_bit, 6 # signalling NAN
+set operr_bit, 5 # operand error
+set ovfl_bit, 4 # overflow
+set unfl_bit, 3 # underflow
+set dz_bit, 2 # divide by zero
+set inex2_bit, 1 # inexact result 2
+set inex1_bit, 0 # inexact result 1
+
+set aiop_bit, 7 # accrued inexact operation bit
+set aovfl_bit, 6 # accrued overflow bit
+set aunfl_bit, 5 # accrued underflow bit
+set adz_bit, 4 # accrued dz bit
+set ainex_bit, 3 # accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask, 0x08000000 # negative bit mask (lw)
+set inf_mask, 0x02000000 # infinity bit mask (lw)
+set z_mask, 0x04000000 # zero bit mask (lw)
+set nan_mask, 0x01000000 # nan bit mask (lw)
+
+set neg_bmask, 0x08 # negative bit mask (byte)
+set inf_bmask, 0x02 # infinity bit mask (byte)
+set z_bmask, 0x04 # zero bit mask (byte)
+set nan_bmask, 0x01 # nan bit mask (byte)
+
+set bsun_mask, 0x00008000 # bsun exception mask
+set snan_mask, 0x00004000 # snan exception mask
+set operr_mask, 0x00002000 # operr exception mask
+set ovfl_mask, 0x00001000 # overflow exception mask
+set unfl_mask, 0x00000800 # underflow exception mask
+set dz_mask, 0x00000400 # dz exception mask
+set inex2_mask, 0x00000200 # inex2 exception mask
+set inex1_mask, 0x00000100 # inex1 exception mask
+
+set aiop_mask, 0x00000080 # accrued illegal operation
+set aovfl_mask, 0x00000040 # accrued overflow
+set aunfl_mask, 0x00000020 # accrued underflow
+set adz_mask, 0x00000010 # accrued divide by zero
+set ainex_mask, 0x00000008 # accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask, inf_mask+dz_mask+adz_mask
+set opnan_mask, nan_mask+operr_mask+aiop_mask
+set nzi_mask, 0x01ffffff #clears N, Z, and I
+set unfinx_mask, unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask, unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask, ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask, inex1_mask+ainex_mask
+set inx2a_mask, inex2_mask+ainex_mask
+set snaniop_mask, nan_mask+snan_mask+aiop_mask
+set snaniop2_mask, snan_mask+aiop_mask
+set naniop_mask, nan_mask+aiop_mask
+set neginf_mask, neg_mask+inf_mask
+set infaiop_mask, inf_mask+aiop_mask
+set negz_mask, neg_mask+z_mask
+set opaop_mask, operr_mask+aiop_mask
+set unfl_inx_mask, unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask, ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit, 29 # stky bit pos in longword
+
+set sign_bit, 0x7 # sign bit
+set signan_bit, 0x6 # signalling nan bit
+
+set sgl_thresh, 0x3f81 # minimum sgl exponent
+set dbl_thresh, 0x3c01 # minimum dbl exponent
+
+set x_mode, 0x0 # extended precision
+set s_mode, 0x4 # single precision
+set d_mode, 0x8 # double precision
+
+set rn_mode, 0x0 # round-to-nearest
+set rz_mode, 0x1 # round-to-zero
+set rm_mode, 0x2 # round-tp-minus-infinity
+set rp_mode, 0x3 # round-to-plus-infinity
+
+set mantissalen, 64 # length of mantissa in bits
+
+set BYTE, 1 # len(byte) == 1 byte
+set WORD, 2 # len(word) == 2 bytes
+set LONG, 4 # len(longword) == 2 bytes
+
+set BSUN_VEC, 0xc0 # bsun vector offset
+set INEX_VEC, 0xc4 # inexact vector offset
+set DZ_VEC, 0xc8 # dz vector offset
+set UNFL_VEC, 0xcc # unfl vector offset
+set OPERR_VEC, 0xd0 # operr vector offset
+set OVFL_VEC, 0xd4 # ovfl vector offset
+set SNAN_VEC, 0xd8 # snan vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
+set fbsun_flg, 0x02 # flag bit: bsun exception
+set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
+set mda7_flg, 0x08 # flag bit: -(a7) <ea>
+set fmovm_flg, 0x40 # flag bit: fmovm instruction
+set immed_flg, 0x80 # flag bit: &<data> <ea>
+
+set ftrapcc_bit, 0x0
+set fbsun_bit, 0x1
+set mia7_bit, 0x2
+set mda7_bit, 0x3
+set immed_bit, 0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP, 0x0 # fmul instr performed last
+set FDIV_OP, 0x1 # fdiv performed last
+set FADD_OP, 0x2 # fadd performed last
+set FMOV_OP, 0x3 # fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
+T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
+
+PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+ long 0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_ovfl(): 060FPSP entry point for FP Overflow exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Overflow exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# fout() - emulate an opclass 3 instruction #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
+# _real_ovfl() - "callout" for Overflow exception enabled code #
+# _real_inex() - "callout" for Inexact exception enabled code #
+# _real_trace() - "callout" for Trace exception code #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP Ovfl exception stack frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# Overflow Exception enabled: #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# Overflow Exception disabled: #
+# - The system stack is unchanged #
+# - The "exception present" flag in the fsave frame is cleared #
+# #
+# ALGORITHM *********************************************************** #
+# On the 060, if an FP overflow is present as the result of any #
+# instruction, the 060 will take an overflow exception whether the #
+# exception is enabled or disabled in the FPCR. For the disabled case, #
+# This handler emulates the instruction to determine what the correct #
+# default result should be for the operation. This default result is #
+# then stored in either the FP regfile, data regfile, or memory. #
+# Finally, the handler exits through the "callout" _fpsp_done() #
+# denoting that no exceptional conditions exist within the machine. #
+# If the exception is enabled, then this handler must create the #
+# exceptional operand and plave it in the fsave state frame, and store #
+# the default result (only if the instruction is opclass 3). For #
+# exceptions enabled, this handler must exit through the "callout" #
+# _real_ovfl() so that the operating system enabled overflow handler #
+# can handle this case. #
+# Two other conditions exist. First, if overflow was disabled #
+# but the inexact exception was enabled, this handler must exit #
+# through the "callout" _real_inex() regardless of whether the result #
+# was inexact. #
+# Also, in the case of an opclass three instruction where #
+# overflow was disabled and the trace exception was enabled, this #
+# handler must exit through the "callout" _real_trace(). #
+# #
+#########################################################################
+
+ global _fpsp_ovfl
+_fpsp_ovfl:
+
+#$# sub.l &24,%sp # make room for src/dst
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
+ bne.w fovfl_out
+
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+# since, I believe, only NORMs and DENORMs can come through here,
+# maybe we can avoid the subroutine call.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,STAG(%a6) # maybe NORM,DENORM
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
+# will never take this exception.
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b fovfl_extract # monadic
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fovfl_op2_done # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+fovfl_op2_done:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+fovfl_extract:
+
+#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x007f,%d1 # extract extension
+
+ andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+ mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+# the EXOP, if an exception occurred, is in fp1.
+# we must save the default result regardless of whether
+# traps are enabled or disabled.
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0
+ bsr.l store_fpreg
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+ btst &ovfl_bit,FPCR_ENABLE(%a6)
+ bne.b fovfl_ovfl_on
+
+ btst &inex2_bit,FPCR_ENABLE(%a6)
+ bne.b fovfl_inex_on
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+#$# add.l &24,%sp
+ bra.l _fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1. now, simply jump to _real_ovfl()!
+fovfl_ovfl_on:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
+
+ mov.w &0xe005,2+FP_SRC(%a6) # save exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
+
+ unlk %a6
+
+ bra.l _real_ovfl
+
+# overflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+fovfl_inex_on:
+
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
+
+ mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
+ mov.w &0xe001,2+FP_SRC(%a6) # save exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
+
+ unlk %a6
+
+ bra.l _real_inex
+
+########################################################################
+fovfl_out:
+
+
+#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+ mov.b &NORM,STAG(%a6) # set src optype tag
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ bsr.l fout
+
+ btst &ovfl_bit,FPCR_ENABLE(%a6)
+ bne.w fovfl_ovfl_on
+
+ btst &inex2_bit,FPCR_ENABLE(%a6)
+ bne.w fovfl_inex_on
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+#$# add.l &24,%sp
+
+ btst &0x7,(%sp) # is trace on?
+ beq.l _fpsp_done # no
+
+ fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
+ mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
+ bra.l _real_trace
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_unfl(): 060FPSP entry point for FP Underflow exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Underflow exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# fout() - emulate an opclass 3 instruction #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
+# _real_ovfl() - "callout" for Overflow exception enabled code #
+# _real_inex() - "callout" for Inexact exception enabled code #
+# _real_trace() - "callout" for Trace exception code #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP Unfl exception stack frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# Underflow Exception enabled: #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# Underflow Exception disabled: #
+# - The system stack is unchanged #
+# - The "exception present" flag in the fsave frame is cleared #
+# #
+# ALGORITHM *********************************************************** #
+# On the 060, if an FP underflow is present as the result of any #
+# instruction, the 060 will take an underflow exception whether the #
+# exception is enabled or disabled in the FPCR. For the disabled case, #
+# This handler emulates the instruction to determine what the correct #
+# default result should be for the operation. This default result is #
+# then stored in either the FP regfile, data regfile, or memory. #
+# Finally, the handler exits through the "callout" _fpsp_done() #
+# denoting that no exceptional conditions exist within the machine. #
+# If the exception is enabled, then this handler must create the #
+# exceptional operand and plave it in the fsave state frame, and store #
+# the default result (only if the instruction is opclass 3). For #
+# exceptions enabled, this handler must exit through the "callout" #
+# _real_unfl() so that the operating system enabled overflow handler #
+# can handle this case. #
+# Two other conditions exist. First, if underflow was disabled #
+# but the inexact exception was enabled and the result was inexact, #
+# this handler must exit through the "callout" _real_inex(). #
+# was inexact. #
+# Also, in the case of an opclass three instruction where #
+# underflow was disabled and the trace exception was enabled, this #
+# handler must exit through the "callout" _real_trace(). #
+# #
+#########################################################################
+
+ global _fpsp_unfl
+_fpsp_unfl:
+
+#$# sub.l &24,%sp # make room for src/dst
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
+ bne.w funfl_out
+
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,STAG(%a6) # maybe NORM,DENORM
+
+# bit five of the fp ext word separates the monadic and dyadic operations
+# that can pass through fpsp_unfl(). remember that fcmp, and ftst
+# will never take this exception.
+ btst &0x5,1+EXC_CMDREG(%a6) # is op monadic or dyadic?
+ beq.b funfl_extract # monadic
+
+# now, what's left that's not dyadic is fsincos. we can distinguish it
+# from all dyadics by the '0110xxx pattern
+ btst &0x4,1+EXC_CMDREG(%a6) # is op an fsincos?
+ bne.b funfl_extract # yes
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b funfl_op2_done # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+funfl_op2_done:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+funfl_extract:
+
+#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x007f,%d1 # extract extension
+
+ andi.l &0x00ff01ff,USER_FPSR(%a6)
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+ mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0
+ bsr.l store_fpreg
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we need to check
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for. We do these checks only in
+# funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
+# special case will simply exit gracefully with the correct result.
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+ btst &unfl_bit,FPCR_ENABLE(%a6)
+ bne.b funfl_unfl_on
+
+funfl_chkinex:
+ btst &inex2_bit,FPCR_ENABLE(%a6)
+ bne.b funfl_inex_on
+
+funfl_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+#$# add.l &24,%sp
+ bra.l _fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1 (don't forget to save fp0). what to do now?
+# well, we simply have to get to go to _real_unfl()!
+funfl_unfl_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we check here to see
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for.
+ btst &unfl_bit,FPSR_EXCEPT(%a6)
+ beq.w funfl_chkinex
+
+funfl_unfl_on2:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
+
+ mov.w &0xe003,2+FP_SRC(%a6) # save exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
+
+ unlk %a6
+
+ bra.l _real_unfl
+
+# undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+funfl_inex_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception.
+# But, whether bogus or not, if inexact is enabled AND it occurred,
+# then we have to branch to real_inex.
+
+ btst &inex2_bit,FPSR_EXCEPT(%a6)
+ beq.w funfl_exit
+
+funfl_inex_on2:
+
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP to stack
+
+ mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
+ mov.w &0xe001,2+FP_SRC(%a6) # save exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
+
+ unlk %a6
+
+ bra.l _real_inex
+
+#######################################################################
+funfl_out:
+
+
+#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+ mov.b &NORM,STAG(%a6) # set src optype tag
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ bsr.l fout
+
+ btst &unfl_bit,FPCR_ENABLE(%a6)
+ bne.w funfl_unfl_on2
+
+ btst &inex2_bit,FPCR_ENABLE(%a6)
+ bne.w funfl_inex_on2
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+#$# add.l &24,%sp
+
+ btst &0x7,(%sp) # is trace on?
+ beq.l _fpsp_done # no
+
+ fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
+ mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
+ bra.l _real_trace
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented #
+# Data Type" exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Unimplemented Data Type exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_{word,long}() - read instruction word/longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# load_fpn1() - load src operand from FP regfile #
+# fout() - emulate an opclass 3 instruction #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# _real_inex() - "callout" to operating system inexact handler #
+# _fpsp_done() - "callout" for exit; work all done #
+# _real_trace() - "callout" for Trace enabled exception #
+# funimp_skew() - adjust fsave src ops to "incorrect" value #
+# _real_snan() - "callout" for SNAN exception #
+# _real_operr() - "callout" for OPERR exception #
+# _real_ovfl() - "callout" for OVFL exception #
+# _real_unfl() - "callout" for UNFL exception #
+# get_packed() - fetch packed operand from memory #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the "Unimp Data Type" stk frame #
+# - The fsave frame contains the ssrc op (for UNNORM/DENORM) #
+# #
+# OUTPUT ************************************************************** #
+# If Inexact exception (opclass 3): #
+# - The system stack is changed to an Inexact exception stk frame #
+# If SNAN exception (opclass 3): #
+# - The system stack is changed to an SNAN exception stk frame #
+# If OPERR exception (opclass 3): #
+# - The system stack is changed to an OPERR exception stk frame #
+# If OVFL exception (opclass 3): #
+# - The system stack is changed to an OVFL exception stk frame #
+# If UNFL exception (opclass 3): #
+# - The system stack is changed to an UNFL exception stack frame #
+# If Trace exception enabled: #
+# - The system stack is changed to a Trace exception stack frame #
+# Else: (normal case) #
+# - Correct result has been stored as appropriate #
+# #
+# ALGORITHM *********************************************************** #
+# Two main instruction types can enter here: (1) DENORM or UNNORM #
+# unimplemented data types. These can be either opclass 0,2 or 3 #
+# instructions, and (2) PACKED unimplemented data format instructions #
+# also of opclasses 0,2, or 3. #
+# For UNNORM/DENORM opclass 0 and 2, the handler fetches the src #
+# operand from the fsave state frame and the dst operand (if dyadic) #
+# from the FP register file. The instruction is then emulated by #
+# choosing an emulation routine from a table of routines indexed by #
+# instruction type. Once the instruction has been emulated and result #
+# saved, then we check to see if any enabled exceptions resulted from #
+# instruction emulation. If none, then we exit through the "callout" #
+# _fpsp_done(). If there is an enabled FP exception, then we insert #
+# this exception into the FPU in the fsave state frame and then exit #
+# through _fpsp_done(). #
+# PACKED opclass 0 and 2 is similar in how the instruction is #
+# emulated and exceptions handled. The differences occur in how the #
+# handler loads the packed op (by calling get_packed() routine) and #
+# by the fact that a Trace exception could be pending for PACKED ops. #
+# If a Trace exception is pending, then the current exception stack #
+# frame is changed to a Trace exception stack frame and an exit is #
+# made through _real_trace(). #
+# For UNNORM/DENORM opclass 3, the actual move out to memory is #
+# performed by calling the routine fout(). If no exception should occur #
+# as the result of emulation, then an exit either occurs through #
+# _fpsp_done() or through _real_trace() if a Trace exception is pending #
+# (a Trace stack frame must be created here, too). If an FP exception #
+# should occur, then we must create an exception stack frame of that #
+# type and jump to either _real_snan(), _real_operr(), _real_inex(), #
+# _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3 #
+# emulation is performed in a similar manner. #
+# #
+#########################################################################
+
+#
+# (1) DENORM and UNNORM (unimplemented) data types:
+#
+# post-instruction
+# *****************
+# * EA *
+# pre-instruction * *
+# ***************** *****************
+# * 0x0 * 0x0dc * * 0x3 * 0x0dc *
+# ***************** *****************
+# * Next * * Next *
+# * PC * * PC *
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+#
+# (2) PACKED format (unsupported) opclasses two and three:
+# *****************
+# * EA *
+# * *
+# *****************
+# * 0x2 * 0x0dc *
+# *****************
+# * Next *
+# * PC *
+# *****************
+# * SR *
+# *****************
+#
+ global _fpsp_unsupp
+_fpsp_unsupp:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # save fp state
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor mode?
+ bne.b fu_s
+fu_u:
+ mov.l %usp,%a0 # fetch user stack pointer
+ mov.l %a0,EXC_A7(%a6) # save on stack
+ bra.b fu_cont
+# if the exception is an opclass zero or two unimplemented data type
+# exception, then the a7' calculated here is wrong since it doesn't
+# stack an ea. however, we don't need an a7' for this case anyways.
+fu_s:
+ lea 0x4+EXC_EA(%a6),%a0 # load old a7'
+ mov.l %a0,EXC_A7(%a6) # save on stack
+
+fu_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction
+# the FPIAR should be set correctly for ALL exceptions passing through
+# this point.
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
+
+############################
+
+ clr.b SPCOND_FLG(%a6) # clear special condition flag
+
+# Separate opclass three (fpn-to-mem) ops since they have a different
+# stack frame and protocol.
+ btst &0x5,EXC_CMDREG(%a6) # is it an fmove out?
+ bne.w fu_out # yes
+
+# Separate packed opclass two instructions.
+ bfextu EXC_CMDREG(%a6){&0:&6},%d0
+ cmpi.b %d0,&0x13
+ beq.w fu_in_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+ andi.l &0x00ff00ff,USER_FPSR(%a6) # zero exception field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+# Opclass two w/ memory-to-fpn operation will have an incorrect extended
+# precision format if the src format was single or double and the
+# source data type was an INF, NAN, DENORM, or UNNORM
+ lea FP_SRC(%a6),%a0 # pass ptr to input
+ bsr.l fix_skewed_ops
+
+# we don't know whether the src operand or the dst operand (or both) is the
+# UNNORM or DENORM. call the function that tags the operand type. if the
+# input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fu_op2 # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+
+fu_op2:
+ mov.b %d0,STAG(%a6) # save src optype tag
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b fu_extract # monadic
+ cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
+ beq.b fu_extract # yes, so it's monadic, too
+
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fu_op2_done # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+fu_op2_done:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+fu_extract:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
+
+ bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+ mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+# BSUN : none
+# SNAN : all dyadic ops
+# OPERR : fsqrt(-NORM)
+# OVFL : all except ftst,fcmp
+# UNFL : all except ftst,fcmp
+# DZ : fdiv
+# INEX2 : all except ftst,fcmp
+# INEX1 : none (packed doesn't go through here)
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions set
+ bne.b fu_in_ena # some are enabled
+
+fu_in_cont:
+# fcmp and ftst do not store any result.
+ mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
+ andi.b &0x38,%d0 # extract bits 3-5
+ cmpi.b %d0,&0x38 # is instr fcmp or ftst?
+ beq.b fu_in_exit # yes
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l store_fpreg # store the result
+
+fu_in_exit:
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ bra.l _fpsp_done
+
+fu_in_ena:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b fu_in_exc # there is at least one set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+# if (OVFL && ovfl_disabled && inexact_enabled) {
+# branch to _real_inex() (even if the result was exact!);
+# } else {
+# save the result in the proper fp reg (unless the op is fcmp or ftst);
+# return;
+# }
+#
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+ beq.b fu_in_cont # no
+
+fu_in_ovflchk:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+ beq.b fu_in_cont # no
+ bra.w fu_in_exc_ovfl # go insert overflow frame
+
+#
+# An exception occurred and that exception was enabled:
+#
+# shift enabled exception field into lo byte of d0;
+# if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+# ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+# /*
+# * this is the case where we must call _real_inex() now or else
+# * there will be no other way to pass it the exceptional operand
+# */
+# call _real_inex();
+# } else {
+# restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+# }
+#
+fu_in_exc:
+ subi.l &24,%d0 # fix offset to be 0-8
+ cmpi.b %d0,&0x6 # is exception INEX? (6)
+ bne.b fu_in_exc_exit # no
+
+# the enabled exception was inexact
+ btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+ bne.w fu_in_exc_unfl # yes
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+ bne.w fu_in_exc_ovfl # yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+fu_in_exc_exit:
+ mov.l %d0,-(%sp) # save d0
+ bsr.l funimp_skew # skew sgl or dbl inputs
+ mov.l (%sp)+,%d0 # restore d0
+
+ mov.w (tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore src op
+
+ unlk %a6
+
+ bra.l _fpsp_done
+
+tbl_except:
+ short 0xe000,0xe006,0xe004,0xe005
+ short 0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_unfl:
+ mov.w &0x4,%d0
+ bra.b fu_in_exc_exit
+fu_in_exc_ovfl:
+ mov.w &0x03,%d0
+ bra.b fu_in_exc_exit
+
+# If the input operand to this operation was opclass two and a single
+# or double precision denorm, inf, or nan, the operand needs to be
+# "corrected" in order to have the proper equivalent extended precision
+# number.
+ global fix_skewed_ops
+fix_skewed_ops:
+ bfextu EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
+ cmpi.b %d0,&0x11 # is class = 2 & fmt = sgl?
+ beq.b fso_sgl # yes
+ cmpi.b %d0,&0x15 # is class = 2 & fmt = dbl?
+ beq.b fso_dbl # yes
+ rts # no
+
+fso_sgl:
+ mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
+ andi.w &0x7fff,%d0 # strip sign
+ cmpi.w %d0,&0x3f80 # is |exp| == $3f80?
+ beq.b fso_sgl_dnrm_zero # yes
+ cmpi.w %d0,&0x407f # no; is |exp| == $407f?
+ beq.b fso_infnan # yes
+ rts # no
+
+fso_sgl_dnrm_zero:
+ andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+ beq.b fso_zero # it's a skewed zero
+fso_sgl_dnrm:
+# here, we count on norm not to alter a0...
+ bsr.l norm # normalize mantissa
+ neg.w %d0 # -shft amt
+ addi.w &0x3f81,%d0 # adjust new exponent
+ andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
+ or.w %d0,LOCAL_EX(%a0) # insert new exponent
+ rts
+
+fso_zero:
+ andi.w &0x8000,LOCAL_EX(%a0) # clear bogus exponent
+ rts
+
+fso_infnan:
+ andi.b &0x7f,LOCAL_HI(%a0) # clear j-bit
+ ori.w &0x7fff,LOCAL_EX(%a0) # make exponent = $7fff
+ rts
+
+fso_dbl:
+ mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
+ andi.w &0x7fff,%d0 # strip sign
+ cmpi.w %d0,&0x3c00 # is |exp| == $3c00?
+ beq.b fso_dbl_dnrm_zero # yes
+ cmpi.w %d0,&0x43ff # no; is |exp| == $43ff?
+ beq.b fso_infnan # yes
+ rts # no
+
+fso_dbl_dnrm_zero:
+ andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+ bne.b fso_dbl_dnrm # it's a skewed denorm
+ tst.l LOCAL_LO(%a0) # is it a zero?
+ beq.b fso_zero # yes
+fso_dbl_dnrm:
+# here, we count on norm not to alter a0...
+ bsr.l norm # normalize mantissa
+ neg.w %d0 # -shft amt
+ addi.w &0x3c01,%d0 # adjust new exponent
+ andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
+ or.w %d0,LOCAL_EX(%a0) # insert new exponent
+ rts
+
+#################################################################
+
+# fmove out took an unimplemented data type exception.
+# the src operand is in FP_SRC. Call _fout() to write out the result and
+# to determine which exceptions, if any, to take.
+fu_out:
+
+# Separate packed move outs from the UNNORM and DENORM move outs.
+ bfextu EXC_CMDREG(%a6){&3:&3},%d0
+ cmpi.b %d0,&0x3
+ beq.w fu_out_pack
+ cmpi.b %d0,&0x7
+ beq.w fu_out_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+ and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+# the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
+# call here. just figure out what it is...
+ mov.w FP_SRC_EX(%a6),%d0 # get exponent
+ andi.w &0x7fff,%d0 # strip sign
+ beq.b fu_out_denorm # it's a DENORM
+
+ lea FP_SRC(%a6),%a0
+ bsr.l unnorm_fix # yes; fix it
+
+ mov.b %d0,STAG(%a6)
+
+ bra.b fu_out_cont
+fu_out_denorm:
+ mov.b &DENORM,STAG(%a6)
+fu_out_cont:
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ mov.l (%a6),EXC_A6(%a6) # in case a6 changes
+ bsr.l fout # call fmove out routine
+
+# Exceptions in order of precedence:
+# BSUN : none
+# SNAN : none
+# OPERR : fmove.{b,w,l} out of large UNNORM
+# OVFL : fmove.{s,d}
+# UNFL : fmove.{s,d,x}
+# DZ : none
+# INEX2 : all
+# INEX1 : none (packed doesn't travel through here)
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.w fu_out_ena # some are enabled
+
+fu_out_done:
+
+ mov.l EXC_A6(%a6),(%a6) # in case a6 changed
+
+# on extended precision opclass three instructions using pre-decrement or
+# post-increment addressing mode, the address register is not updated. is the
+# address register was the stack pointer used from user mode, then let's update
+# it here. if it was used from supervisor mode, then we have to handle this
+# as a special case.
+ btst &0x5,EXC_SR(%a6)
+ bne.b fu_out_done_s
+
+ mov.l EXC_A7(%a6),%a0 # restore a7
+ mov.l %a0,%usp
+
+fu_out_done_cont:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ btst &0x7,(%sp) # is trace on?
+ bne.b fu_out_trace # yes
+
+ bra.l _fpsp_done
+
+# is the ea mode pre-decrement of the stack pointer from supervisor mode?
+# ("fmov.x fpm,-(a7)") if so,
+fu_out_done_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ bne.b fu_out_done_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place.
+# here, we're counting on the top of the stack to be the old place-holders
+# for fp0/fp1 which have already been restored. that way, we can write
+# over those destinations with the shifted stack frame.
+ fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+ mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+ mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+ btst &0x7,(%sp)
+ bne.b fu_out_trace
+
+ bra.l _fpsp_done
+
+fu_out_ena:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b fu_out_exc # there is at least one set
+
+# no exceptions were set.
+# if a disabled overflow occurred and inexact was enabled but the result
+# was exact, then a branch to _real_inex() is made.
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+ beq.w fu_out_done # no
+
+fu_out_ovflchk:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+ beq.w fu_out_done # no
+ bra.w fu_inex # yes
+
+#
+# The fp move out that took the "Unimplemented Data Type" exception was
+# being traced. Since the stack frames are similar, get the "current" PC
+# from FPIAR and put it in the trace stack frame then jump to _real_trace().
+#
+# UNSUPP FRAME TRACE FRAME
+# ***************** *****************
+# * EA * * Current *
+# * * * PC *
+# ***************** *****************
+# * 0x3 * 0x0dc * * 0x2 * 0x024 *
+# ***************** *****************
+# * Next * * Next *
+# * PC * * PC *
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+#
+fu_out_trace:
+ mov.w &0x2024,0x6(%sp)
+ fmov.l %fpiar,0x8(%sp)
+ bra.l _real_trace
+
+# an exception occurred and that exception was enabled.
+fu_out_exc:
+ subi.l &24,%d0 # fix offset to be 0-8
+
+# we don't mess with the existing fsave frame. just re-insert it and
+# jump to the "_real_{}()" handler...
+ mov.w (tbl_fu_out.b,%pc,%d0.w*2),%d0
+ jmp (tbl_fu_out.b,%pc,%d0.w*1)
+
+ swbeg &0x8
+tbl_fu_out:
+ short tbl_fu_out - tbl_fu_out # BSUN can't happen
+ short tbl_fu_out - tbl_fu_out # SNAN can't happen
+ short fu_operr - tbl_fu_out # OPERR
+ short fu_ovfl - tbl_fu_out # OVFL
+ short fu_unfl - tbl_fu_out # UNFL
+ short tbl_fu_out - tbl_fu_out # DZ can't happen
+ short fu_inex - tbl_fu_out # INEX2
+ short tbl_fu_out - tbl_fu_out # INEX1 won't make it here
+
+# for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
+# frestore it.
+fu_snan:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd8
+ mov.w &0xe006,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+
+
+ bra.l _real_snan
+
+fu_operr:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
+ mov.w &0xe004,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+
+
+ bra.l _real_operr
+
+fu_ovfl:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d4,EXC_VOFF(%a6) # vector offset = 0xd4
+ mov.w &0xe005,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6) # restore EXOP
+
+ unlk %a6
+
+ bra.l _real_ovfl
+
+# underflow can happen for extended precision. extended precision opclass
+# three instruction exceptions don't update the stack pointer. so, if the
+# exception occurred from user mode, then simply update a7 and exit normally.
+# if the exception occurred from supervisor mode, check if
+fu_unfl:
+ mov.l EXC_A6(%a6),(%a6) # restore a6
+
+ btst &0x5,EXC_SR(%a6)
+ bne.w fu_unfl_s
+
+ mov.l EXC_A7(%a6),%a0 # restore a7 whether we need
+ mov.l %a0,%usp # to or not...
+
+fu_unfl_cont:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
+ mov.w &0xe003,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6) # restore EXOP
+
+ unlk %a6
+
+ bra.l _real_unfl
+
+fu_unfl_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
+ bne.b fu_unfl_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place
+# (where the exc frame is currently). make sure it's not at the top of the
+# frame or it will get overwritten when the exc stack frame is shifted "down".
+ fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
+ fmovm.x &0x40,FP_DST(%a6) # put EXOP on stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
+ mov.w &0xe003,2+FP_DST(%a6)
+
+ frestore FP_DST(%a6) # restore EXOP
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+ mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+ mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+ bra.l _real_unfl
+
+# fmove in and out enter here.
+fu_inex:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
+ mov.w &0xe001,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6) # restore EXOP
+
+ unlk %a6
+
+
+ bra.l _real_inex
+
+#########################################################################
+#########################################################################
+fu_in_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+ andi.l &0x0ff00ff,USER_FPSR(%a6) # zero exception field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ bsr.l get_packed # fetch packed src operand
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ bsr.l set_tag_x # set src optype tag
+
+ mov.b %d0,STAG(%a6) # save src optype tag
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b fu_extract_p # monadic
+ cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
+ beq.b fu_extract_p # yes, so it's monadic, too
+
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fu_op2_done_p # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+fu_op2_done_p:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+fu_extract_p:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
+
+ bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+ mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+# BSUN : none
+# SNAN : all dyadic ops
+# OPERR : fsqrt(-NORM)
+# OVFL : all except ftst,fcmp
+# UNFL : all except ftst,fcmp
+# DZ : fdiv
+# INEX2 : all except ftst,fcmp
+# INEX1 : all
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.w fu_in_ena_p # some are enabled
+
+fu_in_cont_p:
+# fcmp and ftst do not store any result.
+ mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
+ andi.b &0x38,%d0 # extract bits 3-5
+ cmpi.b %d0,&0x38 # is instr fcmp or ftst?
+ beq.b fu_in_exit_p # yes
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l store_fpreg # store the result
+
+fu_in_exit_p:
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor?
+ bne.w fu_in_exit_s_p # supervisor
+
+ mov.l EXC_A7(%a6),%a0 # update user a7
+ mov.l %a0,%usp
+
+fu_in_exit_cont_p:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6 # unravel stack frame
+
+ btst &0x7,(%sp) # is trace on?
+ bne.w fu_trace_p # yes
+
+ bra.l _fpsp_done # exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was (a7)+. if so, we'll need to shift the
+# stack frame "up".
+fu_in_exit_s_p:
+ btst &mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
+ beq.b fu_in_exit_cont_p # no
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6 # unravel stack frame
+
+# shift the stack frame "up". we don't really care about the <ea> field.
+ mov.l 0x4(%sp),0x10(%sp)
+ mov.l 0x0(%sp),0xc(%sp)
+ add.l &0xc,%sp
+
+ btst &0x7,(%sp) # is trace on?
+ bne.w fu_trace_p # yes
+
+ bra.l _fpsp_done # exit to os
+
+fu_in_ena_p:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled & set
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b fu_in_exc_p # at least one was set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+# if (OVFL && ovfl_disabled && inexact_enabled) {
+# branch to _real_inex() (even if the result was exact!);
+# } else {
+# save the result in the proper fp reg (unless the op is fcmp or ftst);
+# return;
+# }
+#
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+ beq.w fu_in_cont_p # no
+
+fu_in_ovflchk_p:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+ beq.w fu_in_cont_p # no
+ bra.w fu_in_exc_ovfl_p # do _real_inex() now
+
+#
+# An exception occurred and that exception was enabled:
+#
+# shift enabled exception field into lo byte of d0;
+# if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+# ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+# /*
+# * this is the case where we must call _real_inex() now or else
+# * there will be no other way to pass it the exceptional operand
+# */
+# call _real_inex();
+# } else {
+# restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+# }
+#
+fu_in_exc_p:
+ subi.l &24,%d0 # fix offset to be 0-8
+ cmpi.b %d0,&0x6 # is exception INEX? (6 or 7)
+ blt.b fu_in_exc_exit_p # no
+
+# the enabled exception was inexact
+ btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+ bne.w fu_in_exc_unfl_p # yes
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+ bne.w fu_in_exc_ovfl_p # yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+# as a reminder for future predicted pain and agony, we are passing in fsave the
+# "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
+# this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
+fu_in_exc_exit_p:
+ btst &0x5,EXC_SR(%a6) # user or supervisor?
+ bne.w fu_in_exc_exit_s_p # supervisor
+
+ mov.l EXC_A7(%a6),%a0 # update user a7
+ mov.l %a0,%usp
+
+fu_in_exc_exit_cont_p:
+ mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore src op
+
+ unlk %a6
+
+ btst &0x7,(%sp) # is trace enabled?
+ bne.w fu_trace_p # yes
+
+ bra.l _fpsp_done
+
+tbl_except_p:
+ short 0xe000,0xe006,0xe004,0xe005
+ short 0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_ovfl_p:
+ mov.w &0x3,%d0
+ bra.w fu_in_exc_exit_p
+
+fu_in_exc_unfl_p:
+ mov.w &0x4,%d0
+ bra.w fu_in_exc_exit_p
+
+fu_in_exc_exit_s_p:
+ btst &mia7_bit,SPCOND_FLG(%a6)
+ beq.b fu_in_exc_exit_cont_p
+
+ mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore src op
+
+ unlk %a6 # unravel stack frame
+
+# shift stack frame "up". who cares about <ea> field.
+ mov.l 0x4(%sp),0x10(%sp)
+ mov.l 0x0(%sp),0xc(%sp)
+ add.l &0xc,%sp
+
+ btst &0x7,(%sp) # is trace on?
+ bne.b fu_trace_p # yes
+
+ bra.l _fpsp_done # exit to os
+
+#
+# The opclass two PACKED instruction that took an "Unimplemented Data Type"
+# exception was being traced. Make the "current" PC the FPIAR and put it in the
+# trace stack frame then jump to _real_trace().
+#
+# UNSUPP FRAME TRACE FRAME
+# ***************** *****************
+# * EA * * Current *
+# * * * PC *
+# ***************** *****************
+# * 0x2 * 0x0dc * * 0x2 * 0x024 *
+# ***************** *****************
+# * Next * * Next *
+# * PC * * PC *
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+fu_trace_p:
+ mov.w &0x2024,0x6(%sp)
+ fmov.l %fpiar,0x8(%sp)
+
+ bra.l _real_trace
+
+#########################################################
+#########################################################
+fu_out_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+ and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0
+ bsr.l load_fpn1
+
+# unlike other opclass 3, unimplemented data type exceptions, packed must be
+# able to detect all operand types.
+ lea FP_SRC(%a6),%a0
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fu_op2_p # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+
+fu_op2_p:
+ mov.b %d0,STAG(%a6) # save src optype tag
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ mov.l (%a6),EXC_A6(%a6) # in case a6 changes
+ bsr.l fout # call fmove out routine
+
+# Exceptions in order of precedence:
+# BSUN : no
+# SNAN : yes
+# OPERR : if ((k_factor > +17) || (dec. exp exceeds 3 digits))
+# OVFL : no
+# UNFL : no
+# DZ : no
+# INEX2 : yes
+# INEX1 : no
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.w fu_out_ena_p # some are enabled
+
+fu_out_exit_p:
+ mov.l EXC_A6(%a6),(%a6) # restore a6
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor?
+ bne.b fu_out_exit_s_p # supervisor
+
+ mov.l EXC_A7(%a6),%a0 # update user a7
+ mov.l %a0,%usp
+
+fu_out_exit_cont_p:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6 # unravel stack frame
+
+ btst &0x7,(%sp) # is trace on?
+ bne.w fu_trace_p # yes
+
+ bra.l _fpsp_done # exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was -(a7). if so, we'll need to shift the
+# stack frame "down".
+fu_out_exit_s_p:
+ btst &mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
+ beq.b fu_out_exit_cont_p # no
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+ mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+ mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+ btst &0x7,(%sp)
+ bne.w fu_trace_p
+
+ bra.l _fpsp_done
+
+fu_out_ena_p:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ beq.w fu_out_exit_p
+
+ mov.l EXC_A6(%a6),(%a6) # restore a6
+
+# an exception occurred and that exception was enabled.
+# the only exception possible on packed move out are INEX, OPERR, and SNAN.
+fu_out_exc_p:
+ cmpi.b %d0,&0x1a
+ bgt.w fu_inex_p2
+ beq.w fu_operr_p
+
+fu_snan_p:
+ btst &0x5,EXC_SR(%a6)
+ bne.b fu_snan_s_p
+
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp
+ bra.w fu_snan
+
+fu_snan_s_p:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ bne.w fu_snan
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd0
+ mov.w &0xe006,2+FP_SRC(%a6) # set fsave status
+
+ frestore FP_SRC(%a6) # restore src operand
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+ mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+ mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+
+ bra.l _real_snan
+
+fu_operr_p:
+ btst &0x5,EXC_SR(%a6)
+ bne.w fu_operr_p_s
+
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp
+ bra.w fu_operr
+
+fu_operr_p_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ bne.w fu_operr
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
+ mov.w &0xe004,2+FP_SRC(%a6) # set fsave status
+
+ frestore FP_SRC(%a6) # restore src operand
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+ mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+ mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+
+ bra.l _real_operr
+
+fu_inex_p2:
+ btst &0x5,EXC_SR(%a6)
+ bne.w fu_inex_s_p2
+
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp
+ bra.w fu_inex
+
+fu_inex_s_p2:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ bne.w fu_inex
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
+ mov.w &0xe001,2+FP_SRC(%a6) # set fsave status
+
+ frestore FP_SRC(%a6) # restore src operand
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+ mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+ mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+
+ bra.l _real_inex
+
+#########################################################################
+
+#
+# if we're stuffing a source operand back into an fsave frame then we
+# have to make sure that for single or double source operands that the
+# format stuffed is as weird as the hardware usually makes it.
+#
+ global funimp_skew
+funimp_skew:
+ bfextu EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
+ cmpi.b %d0,&0x1 # was src sgl?
+ beq.b funimp_skew_sgl # yes
+ cmpi.b %d0,&0x5 # was src dbl?
+ beq.b funimp_skew_dbl # yes
+ rts
+
+funimp_skew_sgl:
+ mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
+ andi.w &0x7fff,%d0 # strip sign
+ beq.b funimp_skew_sgl_not
+ cmpi.w %d0,&0x3f80
+ bgt.b funimp_skew_sgl_not
+ neg.w %d0 # make exponent negative
+ addi.w &0x3f81,%d0 # find amt to shift
+ mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man)
+ lsr.l %d0,%d1 # shift it
+ bset &31,%d1 # set j-bit
+ mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man)
+ andi.w &0x8000,FP_SRC_EX(%a6) # clear old exponent
+ ori.w &0x3f80,FP_SRC_EX(%a6) # insert new "skewed" exponent
+funimp_skew_sgl_not:
+ rts
+
+funimp_skew_dbl:
+ mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
+ andi.w &0x7fff,%d0 # strip sign
+ beq.b funimp_skew_dbl_not
+ cmpi.w %d0,&0x3c00
+ bgt.b funimp_skew_dbl_not
+
+ tst.b FP_SRC_EX(%a6) # make "internal format"
+ smi.b 0x2+FP_SRC(%a6)
+ mov.w %d0,FP_SRC_EX(%a6) # insert exponent with cleared sign
+ clr.l %d0 # clear g,r,s
+ lea FP_SRC(%a6),%a0 # pass ptr to src op
+ mov.w &0x3c01,%d1 # pass denorm threshold
+ bsr.l dnrm_lp # denorm it
+ mov.w &0x3c00,%d0 # new exponent
+ tst.b 0x2+FP_SRC(%a6) # is sign set?
+ beq.b fss_dbl_denorm_done # no
+ bset &15,%d0 # set sign
+fss_dbl_denorm_done:
+ bset &0x7,FP_SRC_HI(%a6) # set j-bit
+ mov.w %d0,FP_SRC_EX(%a6) # insert new exponent
+funimp_skew_dbl_not:
+ rts
+
+#########################################################################
+ global _mem_write2
+_mem_write2:
+ btst &0x5,EXC_SR(%a6)
+ beq.l _dmem_write
+ mov.l 0x0(%a0),FP_DST_EX(%a6)
+ mov.l 0x4(%a0),FP_DST_HI(%a6)
+ mov.l 0x8(%a0),FP_DST_LO(%a6)
+ clr.l %d1
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_effadd(): 060FPSP entry point for FP "Unimplemented #
+# effective address" exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Unimplemented Effective Address exception in an operating #
+# system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# decbin() - convert packed data to FP binary data #
+# _real_fpu_disabled() - "callout" for "FPU disabled" exception #
+# _real_access() - "callout" for access error exception #
+# _mem_read() - read extended immediate operand from memory #
+# _fpsp_done() - "callout" for exit; work all done #
+# _real_trace() - "callout" for Trace enabled exception #
+# fmovm_dynamic() - emulate dynamic fmovm instruction #
+# fmovm_ctrl() - emulate fmovm control instruction #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the "Unimplemented <ea>" stk frame #
+# #
+# OUTPUT ************************************************************** #
+# If access error: #
+# - The system stack is changed to an access error stack frame #
+# If FPU disabled: #
+# - The system stack is changed to an FPU disabled stack frame #
+# If Trace exception enabled: #
+# - The system stack is changed to a Trace exception stack frame #
+# Else: (normal case) #
+# - None (correct result has been stored as appropriate) #
+# #
+# ALGORITHM *********************************************************** #
+# This exception handles 3 types of operations: #
+# (1) FP Instructions using extended precision or packed immediate #
+# addressing mode. #
+# (2) The "fmovm.x" instruction w/ dynamic register specification. #
+# (3) The "fmovm.l" instruction w/ 2 or 3 control registers. #
+# #
+# For immediate data operations, the data is read in w/ a #
+# _mem_read() "callout", converted to FP binary (if packed), and used #
+# as the source operand to the instruction specified by the instruction #
+# word. If no FP exception should be reported ads a result of the #
+# emulation, then the result is stored to the destination register and #
+# the handler exits through _fpsp_done(). If an enabled exc has been #
+# signalled as a result of emulation, then an fsave state frame #
+# corresponding to the FP exception type must be entered into the 060 #
+# FPU before exiting. In either the enabled or disabled cases, we #
+# must also check if a Trace exception is pending, in which case, we #
+# must create a Trace exception stack frame from the current exception #
+# stack frame. If no Trace is pending, we simply exit through #
+# _fpsp_done(). #
+# For "fmovm.x", call the routine fmovm_dynamic() which will #
+# decode and emulate the instruction. No FP exceptions can be pending #
+# as a result of this operation emulation. A Trace exception can be #
+# pending, though, which means the current stack frame must be changed #
+# to a Trace stack frame and an exit made through _real_trace(). #
+# For the case of "fmovm.x Dn,-(a7)", where the offending instruction #
+# was executed from supervisor mode, this handler must store the FP #
+# register file values to the system stack by itself since #
+# fmovm_dynamic() can't handle this. A normal exit is made through #
+# fpsp_done(). #
+# For "fmovm.l", fmovm_ctrl() is used to emulate the instruction. #
+# Again, a Trace exception may be pending and an exit made through #
+# _real_trace(). Else, a normal exit is made through _fpsp_done(). #
+# #
+# Before any of the above is attempted, it must be checked to #
+# see if the FPU is disabled. Since the "Unimp <ea>" exception is taken #
+# before the "FPU disabled" exception, but the "FPU disabled" exception #
+# has higher priority, we check the disabled bit in the PCR. If set, #
+# then we must create an 8 word "FPU disabled" exception stack frame #
+# from the current 4 word exception stack frame. This includes #
+# reproducing the effective address of the instruction to put on the #
+# new stack frame. #
+# #
+# In the process of all emulation work, if a _mem_read() #
+# "callout" returns a failing result indicating an access error, then #
+# we must create an access error stack frame from the current stack #
+# frame. This information includes a faulting address and a fault- #
+# status-longword. These are created within this handler. #
+# #
+#########################################################################
+
+ global _fpsp_effadd
+_fpsp_effadd:
+
+# This exception type takes priority over the "Line F Emulator"
+# exception. Therefore, the FPU could be disabled when entering here.
+# So, we must check to see if it's disabled and handle that case separately.
+ mov.l %d0,-(%sp) # save d0
+ movc %pcr,%d0 # load proc cr
+ btst &0x1,%d0 # is FPU disabled?
+ bne.w iea_disabled # yes
+ mov.l (%sp)+,%d0 # restore d0
+
+ link %a6,&-LOCAL_SIZE # init stack frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# PC of instruction that took the exception is the PC in the frame
+ mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
+
+#########################################################################
+
+ tst.w %d0 # is operation fmovem?
+ bmi.w iea_fmovm # yes
+
+#
+# here, we will have:
+# fabs fdabs fsabs facos fmod
+# fadd fdadd fsadd fasin frem
+# fcmp fatan fscale
+# fdiv fddiv fsdiv fatanh fsin
+# fint fcos fsincos
+# fintrz fcosh fsinh
+# fmove fdmove fsmove fetox ftan
+# fmul fdmul fsmul fetoxm1 ftanh
+# fneg fdneg fsneg fgetexp ftentox
+# fsgldiv fgetman ftwotox
+# fsglmul flog10
+# fsqrt flog2
+# fsub fdsub fssub flogn
+# ftst flognp1
+# which can all use f<op>.{x,p}
+# so, now it's immediate data extended precision AND PACKED FORMAT!
+#
+iea_op:
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ btst &0xa,%d0 # is src fmt x or p?
+ bne.b iea_op_pack # packed
+
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
+ lea FP_SRC(%a6),%a1 # pass: ptr to super addr
+ mov.l &0xc,%d0 # pass: 12 bytes
+ bsr.l _imem_read # read extended immediate
+
+ tst.l %d1 # did ifetch fail?
+ bne.w iea_iacc # yes
+
+ bra.b iea_op_setsrc
+
+iea_op_pack:
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
+ lea FP_SRC(%a6),%a1 # pass: ptr to super dst
+ mov.l &0xc,%d0 # pass: 12 bytes
+ bsr.l _imem_read # read packed operand
+
+ tst.l %d1 # did ifetch fail?
+ bne.w iea_iacc # yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+ bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
+ cmpi.w %d0,&0x7fff # INF or NAN?
+ beq.b iea_op_setsrc # operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+ mov.b 3+FP_SRC(%a6),%d0 # get byte 4
+ andi.b &0x0f,%d0 # clear all but last nybble
+ bne.b iea_op_gp_not_spec # not a zero
+ tst.l FP_SRC_HI(%a6) # is lw 2 zero?
+ bne.b iea_op_gp_not_spec # not a zero
+ tst.l FP_SRC_LO(%a6) # is lw 3 zero?
+ beq.b iea_op_setsrc # operand is a ZERO
+iea_op_gp_not_spec:
+ lea FP_SRC(%a6),%a0 # pass: ptr to packed op
+ bsr.l decbin # convert to extended
+ fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
+
+iea_op_setsrc:
+ addi.l &0xc,EXC_EXTWPTR(%a6) # update extension word pointer
+
+# FP_SRC now holds the src operand.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,STAG(%a6) # could be ANYTHING!!!
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b iea_op_getdst # no
+ bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
+ mov.b %d0,STAG(%a6) # set new optype tag
+iea_op_getdst:
+ clr.b STORE_FLG(%a6) # clear "store result" boolean
+
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b iea_op_extract # monadic
+ btst &0x4,1+EXC_CMDREG(%a6) # is operation fsincos,ftst,fcmp?
+ bne.b iea_op_spec # yes
+
+iea_op_loaddst:
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+ bsr.l load_fpn2 # load dst operand
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,DTAG(%a6) # could be ANYTHING!!!
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b iea_op_extract # no
+ bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
+ mov.b %d0,DTAG(%a6) # set new optype tag
+ bra.b iea_op_extract
+
+# the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
+iea_op_spec:
+ btst &0x3,1+EXC_CMDREG(%a6) # is operation fsincos?
+ beq.b iea_op_extract # yes
+# now, we're left with ftst and fcmp. so, first let's tag them so that they don't
+# store a result. then, only fcmp will branch back and pick up a dst operand.
+ st STORE_FLG(%a6) # don't store a final result
+ btst &0x1,1+EXC_CMDREG(%a6) # is operation fcmp?
+ beq.b iea_op_loaddst # yes
+
+iea_op_extract:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass: rnd mode,prec
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x007f,%d1 # extract extension
+
+ fmov.l &0x0,%fpcr
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+ mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+# BSUN : none
+# SNAN : all operations
+# OPERR : all reg-reg or mem-reg operations that can normally operr
+# OVFL : same as OPERR
+# UNFL : same as OPERR
+# DZ : same as OPERR
+# INEX2 : same as OPERR
+# INEX1 : all packed immediate operations
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.b iea_op_ena # some are enabled
+
+# now, we save the result, unless, of course, the operation was ftst or fcmp.
+# these don't save results.
+iea_op_save:
+ tst.b STORE_FLG(%a6) # does this op store a result?
+ bne.b iea_op_exit1 # exit with no frestore
+
+iea_op_store:
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+ bsr.l store_fpreg # store the result
+
+iea_op_exit1:
+ mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+ mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6 # unravel the frame
+
+ btst &0x7,(%sp) # is trace on?
+ bne.w iea_op_trace # yes
+
+ bra.l _fpsp_done # exit to os
+
+iea_op_ena:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enable and set
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b iea_op_exc # at least one was set
+
+# no exception occurred. now, did a disabled, exact overflow occur with inexact
+# enabled? if so, then we have to stuff an overflow frame into the FPU.
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+ beq.b iea_op_save
+
+iea_op_ovfl:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+ beq.b iea_op_store # no
+ bra.b iea_op_exc_ovfl # yes
+
+# an enabled exception occurred. we have to insert the exception type back into
+# the machine.
+iea_op_exc:
+ subi.l &24,%d0 # fix offset to be 0-8
+ cmpi.b %d0,&0x6 # is exception INEX?
+ bne.b iea_op_exc_force # no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame.
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+ bne.b iea_op_exc_ovfl # yes
+ btst &unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+ bne.b iea_op_exc_unfl # yes
+
+iea_op_exc_force:
+ mov.w (tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+ bra.b iea_op_exit2 # exit with frestore
+
+tbl_iea_except:
+ short 0xe002, 0xe006, 0xe004, 0xe005
+ short 0xe003, 0xe002, 0xe001, 0xe001
+
+iea_op_exc_ovfl:
+ mov.w &0xe005,2+FP_SRC(%a6)
+ bra.b iea_op_exit2
+
+iea_op_exc_unfl:
+ mov.w &0xe003,2+FP_SRC(%a6)
+
+iea_op_exit2:
+ mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+ mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore exceptional state
+
+ unlk %a6 # unravel the frame
+
+ btst &0x7,(%sp) # is trace on?
+ bne.b iea_op_trace # yes
+
+ bra.l _fpsp_done # exit to os
+
+#
+# The opclass two instruction that took an "Unimplemented Effective Address"
+# exception was being traced. Make the "current" PC the FPIAR and put it in
+# the trace stack frame then jump to _real_trace().
+#
+# UNIMP EA FRAME TRACE FRAME
+# ***************** *****************
+# * 0x0 * 0x0f0 * * Current *
+# ***************** * PC *
+# * Current * *****************
+# * PC * * 0x2 * 0x024 *
+# ***************** *****************
+# * SR * * Next *
+# ***************** * PC *
+# *****************
+# * SR *
+# *****************
+iea_op_trace:
+ mov.l (%sp),-(%sp) # shift stack frame "down"
+ mov.w 0x8(%sp),0x4(%sp)
+ mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
+ fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
+
+ bra.l _real_trace
+
+#########################################################################
+iea_fmovm:
+ btst &14,%d0 # ctrl or data reg
+ beq.w iea_fmovm_ctrl
+
+iea_fmovm_data:
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor mode
+ bne.b iea_fmovm_data_s
+
+iea_fmovm_data_u:
+ mov.l %usp,%a0
+ mov.l %a0,EXC_A7(%a6) # store current a7
+ bsr.l fmovm_dynamic # do dynamic fmovm
+ mov.l EXC_A7(%a6),%a0 # load possibly new a7
+ mov.l %a0,%usp # update usp
+ bra.w iea_fmovm_exit
+
+iea_fmovm_data_s:
+ clr.b SPCOND_FLG(%a6)
+ lea 0x2+EXC_VOFF(%a6),%a0
+ mov.l %a0,EXC_A7(%a6)
+ bsr.l fmovm_dynamic # do dynamic fmovm
+
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ beq.w iea_fmovm_data_predec
+ cmpi.b SPCOND_FLG(%a6),&mia7_flg
+ bne.w iea_fmovm_exit
+
+# right now, d0 = the size.
+# the data has been fetched from the supervisor stack, but we have not
+# incremented the stack pointer by the appropriate number of bytes.
+# do it here.
+iea_fmovm_data_postinc:
+ btst &0x7,EXC_SR(%a6)
+ bne.b iea_fmovm_data_pi_trace
+
+ mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
+ mov.l EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
+ mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
+
+ lea (EXC_SR,%a6,%d0),%a0
+ mov.l %a0,EXC_SR(%a6)
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+ mov.l (%sp)+,%sp
+ bra.l _fpsp_done
+
+iea_fmovm_data_pi_trace:
+ mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+ mov.l EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
+ mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
+ mov.l EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
+
+ lea (EXC_SR-0x4,%a6,%d0),%a0
+ mov.l %a0,EXC_SR(%a6)
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+ mov.l (%sp)+,%sp
+ bra.l _real_trace
+
+# right now, d1 = size and d0 = the strg.
+iea_fmovm_data_predec:
+ mov.b %d1,EXC_VOFF(%a6) # store strg
+ mov.b %d0,0x1+EXC_VOFF(%a6) # store size
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.l (%a6),-(%sp) # make a copy of a6
+ mov.l %d0,-(%sp) # save d0
+ mov.l %d1,-(%sp) # save d1
+ mov.l EXC_EXTWPTR(%a6),-(%sp) # make a copy of Next PC
+
+ clr.l %d0
+ mov.b 0x1+EXC_VOFF(%a6),%d0 # fetch size
+ neg.l %d0 # get negative of size
+
+ btst &0x7,EXC_SR(%a6) # is trace enabled?
+ beq.b iea_fmovm_data_p2
+
+ mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+ mov.l EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
+ mov.l (%sp)+,(EXC_PC-0x4,%a6,%d0)
+ mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
+
+ pea (%a6,%d0) # create final sp
+ bra.b iea_fmovm_data_p3
+
+iea_fmovm_data_p2:
+ mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
+ mov.l (%sp)+,(EXC_PC,%a6,%d0)
+ mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
+
+ pea (0x4,%a6,%d0) # create final sp
+
+iea_fmovm_data_p3:
+ clr.l %d1
+ mov.b EXC_VOFF(%a6),%d1 # fetch strg
+
+ tst.b %d1
+ bpl.b fm_1
+ fmovm.x &0x80,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_1:
+ lsl.b &0x1,%d1
+ bpl.b fm_2
+ fmovm.x &0x40,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_2:
+ lsl.b &0x1,%d1
+ bpl.b fm_3
+ fmovm.x &0x20,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_3:
+ lsl.b &0x1,%d1
+ bpl.b fm_4
+ fmovm.x &0x10,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_4:
+ lsl.b &0x1,%d1
+ bpl.b fm_5
+ fmovm.x &0x08,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_5:
+ lsl.b &0x1,%d1
+ bpl.b fm_6
+ fmovm.x &0x04,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_6:
+ lsl.b &0x1,%d1
+ bpl.b fm_7
+ fmovm.x &0x02,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_7:
+ lsl.b &0x1,%d1
+ bpl.b fm_end
+ fmovm.x &0x01,(0x4+0x8,%a6,%d0)
+fm_end:
+ mov.l 0x4(%sp),%d1
+ mov.l 0x8(%sp),%d0
+ mov.l 0xc(%sp),%a6
+ mov.l (%sp)+,%sp
+
+ btst &0x7,(%sp) # is trace enabled?
+ beq.l _fpsp_done
+ bra.l _real_trace
+
+#########################################################################
+iea_fmovm_ctrl:
+
+ bsr.l fmovm_ctrl # load ctrl regs
+
+iea_fmovm_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ btst &0x7,EXC_SR(%a6) # is trace on?
+ bne.b iea_fmovm_trace # yes
+
+ mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
+
+ unlk %a6 # unravel the frame
+
+ bra.l _fpsp_done # exit to os
+
+#
+# The control reg instruction that took an "Unimplemented Effective Address"
+# exception was being traced. The "Current PC" for the trace frame is the
+# PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
+# After fixing the stack frame, jump to _real_trace().
+#
+# UNIMP EA FRAME TRACE FRAME
+# ***************** *****************
+# * 0x0 * 0x0f0 * * Current *
+# ***************** * PC *
+# * Current * *****************
+# * PC * * 0x2 * 0x024 *
+# ***************** *****************
+# * SR * * Next *
+# ***************** * PC *
+# *****************
+# * SR *
+# *****************
+# this ain't a pretty solution, but it works:
+# -restore a6 (not with unlk)
+# -shift stack frame down over where old a6 used to be
+# -add LOCAL_SIZE to stack pointer
+iea_fmovm_trace:
+ mov.l (%a6),%a6 # restore frame pointer
+ mov.w EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
+ mov.l EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
+ mov.l EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
+ mov.w &0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
+ add.l &LOCAL_SIZE,%sp # clear stack frame
+
+ bra.l _real_trace
+
+#########################################################################
+# The FPU is disabled and so we should really have taken the "Line
+# F Emulator" exception. So, here we create an 8-word stack frame
+# from our 4-word stack frame. This means we must calculate the length
+# the faulting instruction to get the "next PC". This is trivial for
+# immediate operands but requires some extra work for fmovm dynamic
+# which can use most addressing modes.
+iea_disabled:
+ mov.l (%sp)+,%d0 # restore d0
+
+ link %a6,&-LOCAL_SIZE # init stack frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+
+# PC of instruction that took the exception is the PC in the frame
+ mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
+
+ tst.w %d0 # is instr fmovm?
+ bmi.b iea_dis_fmovm # yes
+# instruction is using an extended precision immediate operand. therefore,
+# the total instruction length is 16 bytes.
+iea_dis_immed:
+ mov.l &0x10,%d0 # 16 bytes of instruction
+ bra.b iea_dis_cont
+iea_dis_fmovm:
+ btst &0xe,%d0 # is instr fmovm ctrl
+ bne.b iea_dis_fmovm_data # no
+# the instruction is a fmovm.l with 2 or 3 registers.
+ bfextu %d0{&19:&3},%d1
+ mov.l &0xc,%d0
+ cmpi.b %d1,&0x7 # move all regs?
+ bne.b iea_dis_cont
+ addq.l &0x4,%d0
+ bra.b iea_dis_cont
+# the instruction is an fmovm.x dynamic which can use many addressing
+# modes and thus can have several different total instruction lengths.
+# call fmovm_calc_ea which will go through the ea calc process and,
+# as a by-product, will tell us how long the instruction is.
+iea_dis_fmovm_data:
+ clr.l %d0
+ bsr.l fmovm_calc_ea
+ mov.l EXC_EXTWPTR(%a6),%d0
+ sub.l EXC_PC(%a6),%d0
+iea_dis_cont:
+ mov.w %d0,EXC_VOFF(%a6) # store stack shift value
+
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+# here, we actually create the 8-word frame from the 4-word frame,
+# with the "next PC" as additional info.
+# the <ea> field is let as undefined.
+ subq.l &0x8,%sp # make room for new stack
+ mov.l %d0,-(%sp) # save d0
+ mov.w 0xc(%sp),0x4(%sp) # move SR
+ mov.l 0xe(%sp),0x6(%sp) # move Current PC
+ clr.l %d0
+ mov.w 0x12(%sp),%d0
+ mov.l 0x6(%sp),0x10(%sp) # move Current PC
+ add.l %d0,0x6(%sp) # make Next PC
+ mov.w &0x402c,0xa(%sp) # insert offset,frame format
+ mov.l (%sp)+,%d0 # restore d0
+
+ bra.l _real_fpu_disabled
+
+##########
+
+iea_iacc:
+ movc %pcr,%d0
+ btst &0x1,%d0
+ bne.b iea_iacc_cont
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
+iea_iacc_cont:
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ subq.w &0x8,%sp # make stack frame bigger
+ mov.l 0x8(%sp),(%sp) # store SR,hi(PC)
+ mov.w 0xc(%sp),0x4(%sp) # store lo(PC)
+ mov.w &0x4008,0x6(%sp) # store voff
+ mov.l 0x2(%sp),0x8(%sp) # store ea
+ mov.l &0x09428001,0xc(%sp) # store fslw
+
+iea_acc_done:
+ btst &0x5,(%sp) # user or supervisor mode?
+ beq.b iea_acc_done2 # user
+ bset &0x2,0xd(%sp) # set supervisor TM bit
+
+iea_acc_done2:
+ bra.l _real_access
+
+iea_dacc:
+ lea -LOCAL_SIZE(%a6),%sp
+
+ movc %pcr,%d1
+ btst &0x1,%d1
+ bne.b iea_dacc_cont
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
+ fmovm.l LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
+iea_dacc_cont:
+ mov.l (%a6),%a6
+
+ mov.l 0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
+ mov.w 0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
+ mov.w &0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
+ mov.l %a0,-0x8+0xc+LOCAL_SIZE(%sp)
+ mov.w %d0,-0x8+0x10+LOCAL_SIZE(%sp)
+ mov.w &0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
+
+ movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
+ add.w &LOCAL_SIZE-0x4,%sp
+
+ bra.b iea_acc_done
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_operr(): 060FPSP entry point for FP Operr exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Operand Error exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# _real_operr() - "callout" to operating system operr handler #
+# _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
+# store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
+# facc_out_{b,w,l}() - store to memory took access error (opcl 3) #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP Operr exception frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# No access error: #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# #
+# ALGORITHM *********************************************************** #
+# In a system where the FP Operr exception is enabled, the goal #
+# is to get to the handler specified at _real_operr(). But, on the 060, #
+# for opclass zero and two instruction taking this exception, the #
+# input operand in the fsave frame may be incorrect for some cases #
+# and needs to be corrected. This handler calls fix_skewed_ops() to #
+# do just this and then exits through _real_operr(). #
+# For opclass 3 instructions, the 060 doesn't store the default #
+# operr result out to memory or data register file as it should. #
+# This code must emulate the move out before finally exiting through #
+# _real_inex(). The move out, if to memory, is performed using #
+# _mem_write() "callout" routines that may return a failing result. #
+# In this special case, the handler must exit through facc_out() #
+# which creates an access error stack frame from the current operr #
+# stack frame. #
+# #
+#########################################################################
+
+ global _fpsp_operr
+_fpsp_operr:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &13,%d0 # is instr an fmove out?
+ bne.b foperr_out # fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed, but can't
+# cause an operr so we don't need to check for them here.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+foperr_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+ bra.l _real_operr
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# operand error exceptions. we do this here before passing control to
+# the user operand error handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+# although packed opclass three operations can take operand error
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_operr() if necessary.
+#
+foperr_out:
+
+ mov.w FP_SRC_EX(%a6),%d1 # fetch exponent
+ andi.w &0x7fff,%d1
+ cmpi.w %d1,&0x7fff
+ bne.b foperr_out_not_qnan
+# the operand is either an infinity or a QNAN.
+ tst.l FP_SRC_LO(%a6)
+ bne.b foperr_out_qnan
+ mov.l FP_SRC_HI(%a6),%d1
+ andi.l &0x7fffffff,%d1
+ beq.b foperr_out_not_qnan
+foperr_out_qnan:
+ mov.l FP_SRC_HI(%a6),L_SCR1(%a6)
+ bra.b foperr_out_jmp
+
+foperr_out_not_qnan:
+ mov.l &0x7fffffff,%d1
+ tst.b FP_SRC_EX(%a6)
+ bpl.b foperr_out_not_qnan2
+ addq.l &0x1,%d1
+foperr_out_not_qnan2:
+ mov.l %d1,L_SCR1(%a6)
+
+foperr_out_jmp:
+ bfextu %d0{&19:&3},%d0 # extract dst format field
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
+ mov.w (tbl_operr.b,%pc,%d0.w*2),%a0
+ jmp (tbl_operr.b,%pc,%a0)
+
+tbl_operr:
+ short foperr_out_l - tbl_operr # long word integer
+ short tbl_operr - tbl_operr # sgl prec shouldn't happen
+ short tbl_operr - tbl_operr # ext prec shouldn't happen
+ short foperr_exit - tbl_operr # packed won't enter here
+ short foperr_out_w - tbl_operr # word integer
+ short tbl_operr - tbl_operr # dbl prec shouldn't happen
+ short foperr_out_b - tbl_operr # byte integer
+ short tbl_operr - tbl_operr # packed won't enter here
+
+foperr_out_b:
+ mov.b L_SCR1(%a6),%d0 # load positive default result
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b foperr_out_b_save_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_byte # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_b # yes
+
+ bra.w foperr_exit
+foperr_out_b_save_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_b # store result to regfile
+ bra.w foperr_exit
+
+foperr_out_w:
+ mov.w L_SCR1(%a6),%d0 # load positive default result
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b foperr_out_w_save_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_word # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_w # yes
+
+ bra.w foperr_exit
+foperr_out_w_save_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_w # store result to regfile
+ bra.w foperr_exit
+
+foperr_out_l:
+ mov.l L_SCR1(%a6),%d0 # load positive default result
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b foperr_out_l_save_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_long # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.w foperr_exit
+foperr_out_l_save_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_l # store result to regfile
+ bra.w foperr_exit
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_snan(): 060FPSP entry point for FP SNAN exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Signalling NAN exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# _real_snan() - "callout" to operating system SNAN handler #
+# _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
+# store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
+# facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3) #
+# _calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea> #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP SNAN exception frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# No access error: #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# #
+# ALGORITHM *********************************************************** #
+# In a system where the FP SNAN exception is enabled, the goal #
+# is to get to the handler specified at _real_snan(). But, on the 060, #
+# for opclass zero and two instructions taking this exception, the #
+# input operand in the fsave frame may be incorrect for some cases #
+# and needs to be corrected. This handler calls fix_skewed_ops() to #
+# do just this and then exits through _real_snan(). #
+# For opclass 3 instructions, the 060 doesn't store the default #
+# SNAN result out to memory or data register file as it should. #
+# This code must emulate the move out before finally exiting through #
+# _real_snan(). The move out, if to memory, is performed using #
+# _mem_write() "callout" routines that may return a failing result. #
+# In this special case, the handler must exit through facc_out() #
+# which creates an access error stack frame from the current SNAN #
+# stack frame. #
+# For the case of an extended precision opclass 3 instruction, #
+# if the effective addressing mode was -() or ()+, then the address #
+# register must get updated by calling _calc_ea_fout(). If the <ea> #
+# was -(a7) from supervisor mode, then the exception frame currently #
+# on the system stack must be carefully moved "down" to make room #
+# for the operand being moved. #
+# #
+#########################################################################
+
+ global _fpsp_snan
+_fpsp_snan:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &13,%d0 # is instr an fmove out?
+ bne.w fsnan_out # fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed and must be
+# fixed here.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+fsnan_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+ bra.l _real_snan
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# snan exceptions. we do this here before passing control to
+# the user snan handler.
+#
+# byte, word, long, and packed destination format operations can pass
+# through here. since packed format operations already were handled by
+# fpsp_unsupp(), then we need to do nothing else for them here.
+# for byte, word, and long, we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+fsnan_out:
+
+ bfextu %d0{&19:&3},%d0 # extract dst format field
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
+ mov.w (tbl_snan.b,%pc,%d0.w*2),%a0
+ jmp (tbl_snan.b,%pc,%a0)
+
+tbl_snan:
+ short fsnan_out_l - tbl_snan # long word integer
+ short fsnan_out_s - tbl_snan # sgl prec shouldn't happen
+ short fsnan_out_x - tbl_snan # ext prec shouldn't happen
+ short tbl_snan - tbl_snan # packed needs no help
+ short fsnan_out_w - tbl_snan # word integer
+ short fsnan_out_d - tbl_snan # dbl prec shouldn't happen
+ short fsnan_out_b - tbl_snan # byte integer
+ short tbl_snan - tbl_snan # packed needs no help
+
+fsnan_out_b:
+ mov.b FP_SRC_HI(%a6),%d0 # load upper byte of SNAN
+ bset &6,%d0 # set SNAN bit
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b fsnan_out_b_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_byte # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_b # yes
+
+ bra.w fsnan_exit
+fsnan_out_b_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_b # store result to regfile
+ bra.w fsnan_exit
+
+fsnan_out_w:
+ mov.w FP_SRC_HI(%a6),%d0 # load upper word of SNAN
+ bset &14,%d0 # set SNAN bit
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b fsnan_out_w_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_word # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_w # yes
+
+ bra.w fsnan_exit
+fsnan_out_w_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_w # store result to regfile
+ bra.w fsnan_exit
+
+fsnan_out_l:
+ mov.l FP_SRC_HI(%a6),%d0 # load upper longword of SNAN
+ bset &30,%d0 # set SNAN bit
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b fsnan_out_l_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_long # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.w fsnan_exit
+fsnan_out_l_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_l # store result to regfile
+ bra.w fsnan_exit
+
+fsnan_out_s:
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b fsnan_out_d_dn # yes
+ mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
+ andi.l &0x80000000,%d0 # keep sign
+ ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
+ mov.l FP_SRC_HI(%a6),%d1 # load mantissa
+ lsr.l &0x8,%d1 # shift mantissa for sgl
+ or.l %d1,%d0 # create sgl SNAN
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_long # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.w fsnan_exit
+fsnan_out_d_dn:
+ mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
+ andi.l &0x80000000,%d0 # keep sign
+ ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
+ mov.l %d1,-(%sp)
+ mov.l FP_SRC_HI(%a6),%d1 # load mantissa
+ lsr.l &0x8,%d1 # shift mantissa for sgl
+ or.l %d1,%d0 # create sgl SNAN
+ mov.l (%sp)+,%d1
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_l # store result to regfile
+ bra.w fsnan_exit
+
+fsnan_out_d:
+ mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
+ andi.l &0x80000000,%d0 # keep sign
+ ori.l &0x7ff80000,%d0 # insert new exponent,SNAN bit
+ mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
+ mov.l %d0,FP_SCR0_EX(%a6) # store to temp space
+ mov.l &11,%d0 # load shift amt
+ lsr.l %d0,%d1
+ or.l %d1,FP_SCR0_EX(%a6) # create dbl hi
+ mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
+ andi.l &0x000007ff,%d1
+ ror.l %d0,%d1
+ mov.l %d1,FP_SCR0_HI(%a6) # store to temp space
+ mov.l FP_SRC_LO(%a6),%d1 # load lo mantissa
+ lsr.l %d0,%d1
+ or.l %d1,FP_SCR0_HI(%a6) # create dbl lo
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ mov.l EXC_EA(%a6),%a1 # pass: dst addr
+ movq.l &0x8,%d0 # pass: size of 8 bytes
+ bsr.l _dmem_write # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_d # yes
+
+ bra.w fsnan_exit
+
+# for extended precision, if the addressing mode is pre-decrement or
+# post-increment, then the address register did not get updated.
+# in addition, for pre-decrement, the stacked <ea> is incorrect.
+fsnan_out_x:
+ clr.b SPCOND_FLG(%a6) # clear special case flag
+
+ mov.w FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
+ clr.w 2+FP_SCR0(%a6)
+ mov.l FP_SRC_HI(%a6),%d0
+ bset &30,%d0
+ mov.l %d0,FP_SCR0_HI(%a6)
+ mov.l FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
+
+ btst &0x5,EXC_SR(%a6) # supervisor mode exception?
+ bne.b fsnan_out_x_s # yes
+
+ mov.l %usp,%a0 # fetch user stack pointer
+ mov.l %a0,EXC_A7(%a6) # save on stack for calc_ea()
+ mov.l (%a6),EXC_A6(%a6)
+
+ bsr.l _calc_ea_fout # find the correct ea,update An
+ mov.l %a0,%a1
+ mov.l %a0,EXC_EA(%a6) # stack correct <ea>
+
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp # restore user stack pointer
+ mov.l EXC_A6(%a6),(%a6)
+
+fsnan_out_x_save:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ movq.l &0xc,%d0 # pass: size of extended
+ bsr.l _dmem_write # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_x # yes
+
+ bra.w fsnan_exit
+
+fsnan_out_x_s:
+ mov.l (%a6),EXC_A6(%a6)
+
+ bsr.l _calc_ea_fout # find the correct ea,update An
+ mov.l %a0,%a1
+ mov.l %a0,EXC_EA(%a6) # stack correct <ea>
+
+ mov.l EXC_A6(%a6),(%a6)
+
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+ bne.b fsnan_out_x_save # no
+
+# the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ mov.l EXC_A6(%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+ mov.l LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
+ mov.l LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
+ mov.l LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+ bra.l _real_snan
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_inex(): 060FPSP entry point for FP Inexact exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Inexact exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# smovcr() - emulate an "fmovcr" instruction #
+# fout() - emulate an opclass 3 instruction #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# _real_inex() - "callout" to operating system inexact handler #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP Inexact exception frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# #
+# ALGORITHM *********************************************************** #
+# In a system where the FP Inexact exception is enabled, the goal #
+# is to get to the handler specified at _real_inex(). But, on the 060, #
+# for opclass zero and two instruction taking this exception, the #
+# hardware doesn't store the correct result to the destination FP #
+# register as did the '040 and '881/2. This handler must emulate the #
+# instruction in order to get this value and then store it to the #
+# correct register before calling _real_inex(). #
+# For opclass 3 instructions, the 060 doesn't store the default #
+# inexact result out to memory or data register file as it should. #
+# This code must emulate the move out by calling fout() before finally #
+# exiting through _real_inex(). #
+# #
+#########################################################################
+
+ global _fpsp_inex
+_fpsp_inex:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &13,%d0 # is instr an fmove out?
+ bne.w finex_out # fmove out
+
+
+# the hardware, for "fabs" and "fneg" w/ a long source format, puts the
+# longword integer directly into the upper longword of the mantissa along
+# w/ an exponent value of 0x401e. we convert this to extended precision here.
+ bfextu %d0{&19:&3},%d0 # fetch instr size
+ bne.b finex_cont # instr size is not long
+ cmpi.w FP_SRC_EX(%a6),&0x401e # is exponent 0x401e?
+ bne.b finex_cont # no
+ fmov.l &0x0,%fpcr
+ fmov.l FP_SRC_HI(%a6),%fp0 # load integer src
+ fmov.x %fp0,FP_SRC(%a6) # store integer as extended precision
+ mov.w &0xe001,0x2+FP_SRC(%a6)
+
+finex_cont:
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+# Here, we zero the ccode and exception byte field since we're going to
+# emulate the whole instruction. Notice, though, that we don't kill the
+# INEX1 bit. This is because a packed op has long since been converted
+# to extended before arriving here. Therefore, we need to retain the
+# INEX1 bit from when the operand was first converted.
+ andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
+ cmpi.b %d1,&0x17 # is op an fmovecr?
+ beq.w finex_fmovcr # yes
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,STAG(%a6) # maybe NORM,DENORM
+
+# bits four and five of the fp extension word separate the monadic and dyadic
+# operations that can pass through fpsp_inex(). remember that fcmp and ftst
+# will never take this exception, but fsincos will.
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b finex_extract # monadic
+
+ btst &0x4,1+EXC_CMDREG(%a6) # is operation an fsincos?
+ bne.b finex_extract # yes
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b finex_op2_done # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+finex_op2_done:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+finex_extract:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x007f,%d1 # extract extension
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+ mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+finex_save:
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0
+ bsr.l store_fpreg
+
+finex_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+ bra.l _real_inex
+
+finex_fmovcr:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.l &0x0000007f,%d1 # pass rom offset
+ bsr.l smovcr
+ bra.b finex_save
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# inexact exceptions. we do this here before passing control to
+# the user inexact handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. so can double and single precision.
+# although packed opclass three operations can take inexact
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_inex() if necessary.
+#
+finex_out:
+
+ mov.b &NORM,STAG(%a6) # src is a NORM
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
+
+ andi.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ bsr.l fout # store the default result
+
+ bra.b finex_exit
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_dz(): 060FPSP entry point for FP DZ exception. #
+# #
+# This handler should be the first code executed upon taking #
+# the FP DZ exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword from memory #
+# fix_skewed_ops() - adjust fsave operand #
+# _real_dz() - "callout" exit point from FP DZ handler #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP DZ exception stack. #
+# - The fsave frame contains the source operand. #
+# #
+# OUTPUT ************************************************************** #
+# - The system stack contains the FP DZ exception stack. #
+# - The fsave frame contains the adjusted source operand. #
+# #
+# ALGORITHM *********************************************************** #
+# In a system where the DZ exception is enabled, the goal is to #
+# get to the handler specified at _real_dz(). But, on the 060, when the #
+# exception is taken, the input operand in the fsave state frame may #
+# be incorrect for some cases and need to be adjusted. So, this package #
+# adjusts the operand using fix_skewed_ops() and then branches to #
+# _real_dz(). #
+# #
+#########################################################################
+
+ global _fpsp_dz
+_fpsp_dz:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source zero
+# in the sgl or dbl format.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+fdz_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+ bra.l _real_dz
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_fline(): 060FPSP entry point for "Line F emulator" exc. #
+# #
+# This handler should be the first code executed upon taking the #
+# "Line F Emulator" exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _fpsp_unimp() - handle "FP Unimplemented" exceptions #
+# _real_fpu_disabled() - handle "FPU disabled" exceptions #
+# _real_fline() - handle "FLINE" exceptions #
+# _imem_read_long() - read instruction longword #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains a "Line F Emulator" exception #
+# stack frame. #
+# #
+# OUTPUT ************************************************************** #
+# - The system stack is unchanged #
+# #
+# ALGORITHM *********************************************************** #
+# When a "Line F Emulator" exception occurs, there are 3 possible #
+# exception types, denoted by the exception stack frame format number: #
+# (1) FPU unimplemented instruction (6 word stack frame) #
+# (2) FPU disabled (8 word stack frame) #
+# (3) Line F (4 word stack frame) #
+# #
+# This module determines which and forks the flow off to the #
+# appropriate "callout" (for "disabled" and "Line F") or to the #
+# correct emulation code (for "FPU unimplemented"). #
+# This code also must check for "fmovecr" instructions w/ a #
+# non-zero <ea> field. These may get flagged as "Line F" but should #
+# really be flagged as "FPU Unimplemented". (This is a "feature" on #
+# the '060. #
+# #
+#########################################################################
+
+ global _fpsp_fline
+_fpsp_fline:
+
+# check to see if this exception is a "FP Unimplemented Instruction"
+# exception. if so, branch directly to that handler's entry point.
+ cmpi.w 0x6(%sp),&0x202c
+ beq.l _fpsp_unimp
+
+# check to see if the FPU is disabled. if so, jump to the OS entry
+# point for that condition.
+ cmpi.w 0x6(%sp),&0x402c
+ beq.l _real_fpu_disabled
+
+# the exception was an "F-Line Illegal" exception. we check to see
+# if the F-Line instruction is an "fmovecr" w/ a non-zero <ea>. if
+# so, convert the F-Line exception stack frame to an FP Unimplemented
+# Instruction exception stack frame else branch to the OS entry
+# point for the F-Line exception handler.
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+
+ mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch instruction words
+
+ bfextu %d0{&0:&10},%d1 # is it an fmovecr?
+ cmpi.w %d1,&0x03c8
+ bne.b fline_fline # no
+
+ bfextu %d0{&16:&6},%d1 # is it an fmovecr?
+ cmpi.b %d1,&0x17
+ bne.b fline_fline # no
+
+# it's an fmovecr w/ a non-zero <ea> that has entered through
+# the F-Line Illegal exception.
+# so, we need to convert the F-Line exception stack frame into an
+# FP Unimplemented Instruction stack frame and jump to that entry
+# point.
+#
+# but, if the FPU is disabled, then we need to jump to the FPU diabled
+# entry point.
+ movc %pcr,%d0
+ btst &0x1,%d0
+ beq.b fline_fmovcr
+
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ sub.l &0x8,%sp # make room for "Next PC", <ea>
+ mov.w 0x8(%sp),(%sp)
+ mov.l 0xa(%sp),0x2(%sp) # move "Current PC"
+ mov.w &0x402c,0x6(%sp)
+ mov.l 0x2(%sp),0xc(%sp)
+ addq.l &0x4,0x2(%sp) # set "Next PC"
+
+ bra.l _real_fpu_disabled
+
+fline_fmovcr:
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ fmov.l 0x2(%sp),%fpiar # set current PC
+ addq.l &0x4,0x2(%sp) # set Next PC
+
+ mov.l (%sp),-(%sp)
+ mov.l 0x8(%sp),0x4(%sp)
+ mov.b &0x20,0x6(%sp)
+
+ bra.l _fpsp_unimp
+
+fline_fline:
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ bra.l _real_fline
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_unimp(): 060FPSP entry point for FP "Unimplemented #
+# Instruction" exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Unimplemented Instruction exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_{word,long}() - read instruction word/longword #
+# load_fop() - load src/dst ops from memory and/or FP regfile #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# tbl_trans - addr of table of emulation routines for trnscndls #
+# _real_access() - "callout" for access error exception #
+# _fpsp_done() - "callout" for exit; work all done #
+# _real_trace() - "callout" for Trace enabled exception #
+# smovcr() - emulate "fmovecr" instruction #
+# funimp_skew() - adjust fsave src ops to "incorrect" value #
+# _ftrapcc() - emulate an "ftrapcc" instruction #
+# _fdbcc() - emulate an "fdbcc" instruction #
+# _fscc() - emulate an "fscc" instruction #
+# _real_trap() - "callout" for Trap exception #
+# _real_bsun() - "callout" for enabled Bsun exception #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the "Unimplemented Instr" stk frame #
+# #
+# OUTPUT ************************************************************** #
+# If access error: #
+# - The system stack is changed to an access error stack frame #
+# If Trace exception enabled: #
+# - The system stack is changed to a Trace exception stack frame #
+# Else: (normal case) #
+# - Correct result has been stored as appropriate #
+# #
+# ALGORITHM *********************************************************** #
+# There are two main cases of instructions that may enter here to #
+# be emulated: (1) the FPgen instructions, most of which were also #
+# unimplemented on the 040, and (2) "ftrapcc", "fscc", and "fdbcc". #
+# For the first set, this handler calls the routine load_fop() #
+# to load the source and destination (for dyadic) operands to be used #
+# for instruction emulation. The correct emulation routine is then #
+# chosen by decoding the instruction type and indexing into an #
+# emulation subroutine index table. After emulation returns, this #
+# handler checks to see if an exception should occur as a result of the #
+# FP instruction emulation. If so, then an FP exception of the correct #
+# type is inserted into the FPU state frame using the "frestore" #
+# instruction before exiting through _fpsp_done(). In either the #
+# exceptional or non-exceptional cases, we must check to see if the #
+# Trace exception is enabled. If so, then we must create a Trace #
+# exception frame from the current exception frame and exit through #
+# _real_trace(). #
+# For "fdbcc", "ftrapcc", and "fscc", the emulation subroutines #
+# _fdbcc(), _ftrapcc(), and _fscc() respectively are used. All three #
+# may flag that a BSUN exception should be taken. If so, then the #
+# current exception stack frame is converted into a BSUN exception #
+# stack frame and an exit is made through _real_bsun(). If the #
+# instruction was "ftrapcc" and a Trap exception should result, a Trap #
+# exception stack frame is created from the current frame and an exit #
+# is made through _real_trap(). If a Trace exception is pending, then #
+# a Trace exception frame is created from the current frame and a jump #
+# is made to _real_trace(). Finally, if none of these conditions exist, #
+# then the handler exits though the callout _fpsp_done(). #
+# #
+# In any of the above scenarios, if a _mem_read() or _mem_write() #
+# "callout" returns a failing value, then an access error stack frame #
+# is created from the current stack frame and an exit is made through #
+# _real_access(). #
+# #
+#########################################################################
+
+#
+# FP UNIMPLEMENTED INSTRUCTION STACK FRAME:
+#
+# *****************
+# * * => <ea> of fp unimp instr.
+# - EA -
+# * *
+# *****************
+# * 0x2 * 0x02c * => frame format and vector offset(vector #11)
+# *****************
+# * *
+# - Next PC - => PC of instr to execute after exc handling
+# * *
+# *****************
+# * SR * => SR at the time the exception was taken
+# *****************
+#
+# Note: the !NULL bit does not get set in the fsave frame when the
+# machine encounters an fp unimp exception. Therefore, it must be set
+# before leaving this handler.
+#
+ global _fpsp_unimp
+_fpsp_unimp:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1
+
+ btst &0x5,EXC_SR(%a6) # user mode exception?
+ bne.b funimp_s # no; supervisor mode
+
+# save the value of the user stack pointer onto the stack frame
+funimp_u:
+ mov.l %usp,%a0 # fetch user stack pointer
+ mov.l %a0,EXC_A7(%a6) # store in stack frame
+ bra.b funimp_cont
+
+# store the value of the supervisor stack pointer BEFORE the exc occurred.
+# old_sp is address just above stacked effective address.
+funimp_s:
+ lea 4+EXC_EA(%a6),%a0 # load old a7'
+ mov.l %a0,EXC_A7(%a6) # store a7'
+ mov.l %a0,OLD_A7(%a6) # make a copy
+
+funimp_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction.
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+############################################################################
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ clr.b SPCOND_FLG(%a6) # clear "special case" flag
+
+# Divide the fp instructions into 8 types based on the TYPE field in
+# bits 6-8 of the opword(classes 6,7 are undefined).
+# (for the '060, only two types can take this exception)
+# bftst %d0{&7:&3} # test TYPE
+ btst &22,%d0 # type 0 or 1 ?
+ bne.w funimp_misc # type 1
+
+#########################################
+# TYPE == 0: General instructions #
+#########################################
+funimp_gen:
+
+ clr.b STORE_FLG(%a6) # clear "store result" flag
+
+# clear the ccode byte and exception status byte
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ bfextu %d0{&16:&6},%d1 # extract upper 6 of cmdreg
+ cmpi.b %d1,&0x17 # is op an fmovecr?
+ beq.w funimp_fmovcr # yes
+
+funimp_gen_op:
+ bsr.l _load_fop # load
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x003f,%d1 # extract extension bits
+ lsl.w &0x3,%d1 # shift right 3 bits
+ or.b STAG(%a6),%d1 # insert src optag bits
+
+ lea FP_DST(%a6),%a1 # pass dst ptr in a1
+ lea FP_SRC(%a6),%a0 # pass src ptr in a0
+
+ mov.w (tbl_trans.w,%pc,%d1.w*2),%d1
+ jsr (tbl_trans.w,%pc,%d1.w*1) # emulate
+
+funimp_fsave:
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.w funimp_ena # some are enabled
+
+funimp_store:
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch Dn
+ bsr.l store_fpreg # store result to fp regfile
+
+funimp_gen_exit:
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+funimp_gen_exit_cmp:
+ cmpi.b SPCOND_FLG(%a6),&mia7_flg # was the ea mode (sp)+ ?
+ beq.b funimp_gen_exit_a7 # yes
+
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg # was the ea mode -(sp) ?
+ beq.b funimp_gen_exit_a7 # yes
+
+funimp_gen_exit_cont:
+ unlk %a6
+
+funimp_gen_exit_cont2:
+ btst &0x7,(%sp) # is trace on?
+ beq.l _fpsp_done # no
+
+# this catches a problem with the case where an exception will be re-inserted
+# into the machine. the frestore has already been executed...so, the fmov.l
+# alone of the control register would trigger an unwanted exception.
+# until I feel like fixing this, we'll sidestep the exception.
+ fsave -(%sp)
+ fmov.l %fpiar,0x14(%sp) # "Current PC" is in FPIAR
+ frestore (%sp)+
+ mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x24
+ bra.l _real_trace
+
+funimp_gen_exit_a7:
+ btst &0x5,EXC_SR(%a6) # supervisor or user mode?
+ bne.b funimp_gen_exit_a7_s # supervisor
+
+ mov.l %a0,-(%sp)
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp
+ mov.l (%sp)+,%a0
+ bra.b funimp_gen_exit_cont
+
+# if the instruction was executed from supervisor mode and the addressing
+# mode was (a7)+, then the stack frame for the rte must be shifted "up"
+# "n" bytes where "n" is the size of the src operand type.
+# f<op>.{b,w,l,s,d,x,p}
+funimp_gen_exit_a7_s:
+ mov.l %d0,-(%sp) # save d0
+ mov.l EXC_A7(%a6),%d0 # load new a7'
+ sub.l OLD_A7(%a6),%d0 # subtract old a7'
+ mov.l 0x2+EXC_PC(%a6),(0x2+EXC_PC,%a6,%d0) # shift stack frame
+ mov.l EXC_SR(%a6),(EXC_SR,%a6,%d0) # shift stack frame
+ mov.w %d0,EXC_SR(%a6) # store incr number
+ mov.l (%sp)+,%d0 # restore d0
+
+ unlk %a6
+
+ add.w (%sp),%sp # stack frame shifted
+ bra.b funimp_gen_exit_cont2
+
+######################
+# fmovecr.x #ccc,fpn #
+######################
+funimp_fmovcr:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.l &0x0000007f,%d1 # pass rom offset in d1
+ bsr.l smovcr
+ bra.w funimp_fsave
+
+#########################################################################
+
+#
+# the user has enabled some exceptions. we figure not to see this too
+# often so that's why it gets lower priority.
+#
+funimp_ena:
+
+# was an exception set that was also enabled?
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled and set
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b funimp_exc # at least one was set
+
+# no exception that was enabled was set BUT if we got an exact overflow
+# and overflow wasn't enabled but inexact was (yech!) then this is
+# an inexact exception; otherwise, return to normal non-exception flow.
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+ beq.w funimp_store # no; return to normal flow
+
+# the overflow w/ exact result happened but was inexact set in the FPCR?
+funimp_ovfl:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+ beq.w funimp_store # no; return to normal flow
+ bra.b funimp_exc_ovfl # yes
+
+# some exception happened that was actually enabled.
+# we'll insert this new exception into the FPU and then return.
+funimp_exc:
+ subi.l &24,%d0 # fix offset to be 0-8
+ cmpi.b %d0,&0x6 # is exception INEX?
+ bne.b funimp_exc_force # no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame. the eventual overflow or underflow handler will see that
+# it's actually an inexact and act appropriately. this is the only easy
+# way to have the EXOP available for the enabled inexact handler when
+# a disabled overflow or underflow has also happened.
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+ bne.b funimp_exc_ovfl # yes
+ btst &unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+ bne.b funimp_exc_unfl # yes
+
+# force the fsave exception status bits to signal an exception of the
+# appropriate type. don't forget to "skew" the source operand in case we
+# "unskewed" the one the hardware initially gave us.
+funimp_exc_force:
+ mov.l %d0,-(%sp) # save d0
+ bsr.l funimp_skew # check for special case
+ mov.l (%sp)+,%d0 # restore d0
+ mov.w (tbl_funimp_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+ bra.b funimp_gen_exit2 # exit with frestore
+
+tbl_funimp_except:
+ short 0xe002, 0xe006, 0xe004, 0xe005
+ short 0xe003, 0xe002, 0xe001, 0xe001
+
+# insert an overflow frame
+funimp_exc_ovfl:
+ bsr.l funimp_skew # check for special case
+ mov.w &0xe005,2+FP_SRC(%a6)
+ bra.b funimp_gen_exit2
+
+# insert an underflow frame
+funimp_exc_unfl:
+ bsr.l funimp_skew # check for special case
+ mov.w &0xe003,2+FP_SRC(%a6)
+
+# this is the general exit point for an enabled exception that will be
+# restored into the machine for the instruction just emulated.
+funimp_gen_exit2:
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # insert exceptional status
+
+ bra.w funimp_gen_exit_cmp
+
+############################################################################
+
+#
+# TYPE == 1: FDB<cc>, FS<cc>, FTRAP<cc>
+#
+# These instructions were implemented on the '881/2 and '040 in hardware but
+# are emulated in software on the '060.
+#
+funimp_misc:
+ bfextu %d0{&10:&3},%d1 # extract mode field
+ cmpi.b %d1,&0x1 # is it an fdb<cc>?
+ beq.w funimp_fdbcc # yes
+ cmpi.b %d1,&0x7 # is it an fs<cc>?
+ bne.w funimp_fscc # yes
+ bfextu %d0{&13:&3},%d1
+ cmpi.b %d1,&0x2 # is it an fs<cc>?
+ blt.w funimp_fscc # yes
+
+#########################
+# ftrap<cc> #
+# ftrap<cc>.w #<data> #
+# ftrap<cc>.l #<data> #
+#########################
+funimp_ftrapcc:
+
+ bsr.l _ftrapcc # FTRAP<cc>()
+
+ cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+ beq.w funimp_bsun # yes
+
+ cmpi.b SPCOND_FLG(%a6),&ftrapcc_flg # should a trap occur?
+ bne.w funimp_done # no
+
+# FP UNIMP FRAME TRAP FRAME
+# ***************** *****************
+# ** <EA> ** ** Current PC **
+# ***************** *****************
+# * 0x2 * 0x02c * * 0x2 * 0x01c *
+# ***************** *****************
+# ** Next PC ** ** Next PC **
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+# (6 words) (6 words)
+#
+# the ftrapcc instruction should take a trap. so, here we must create a
+# trap stack frame from an unimplemented fp instruction stack frame and
+# jump to the user supplied entry point for the trap exception
+funimp_ftrapcc_tp:
+ mov.l USER_FPIAR(%a6),EXC_EA(%a6) # Address = Current PC
+ mov.w &0x201c,EXC_VOFF(%a6) # Vector Offset = 0x01c
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+ bra.l _real_trap
+
+#########################
+# fdb<cc> Dn,<label> #
+#########################
+funimp_fdbcc:
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # read displacement
+
+ tst.l %d1 # did ifetch fail?
+ bne.w funimp_iacc # yes
+
+ ext.l %d0 # sign extend displacement
+
+ bsr.l _fdbcc # FDB<cc>()
+
+ cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+ beq.w funimp_bsun
+
+ bra.w funimp_done # branch to finish
+
+#################
+# fs<cc>.b <ea> #
+#################
+funimp_fscc:
+
+ bsr.l _fscc # FS<cc>()
+
+# I am assuming here that an "fs<cc>.b -(An)" or "fs<cc>.b (An)+" instruction
+# does not need to update "An" before taking a bsun exception.
+ cmpi.b SPCOND_FLG(%a6),&fbsun_flg # is enabled bsun occurring?
+ beq.w funimp_bsun
+
+ btst &0x5,EXC_SR(%a6) # yes; is it a user mode exception?
+ bne.b funimp_fscc_s # no
+
+funimp_fscc_u:
+ mov.l EXC_A7(%a6),%a0 # yes; set new USP
+ mov.l %a0,%usp
+ bra.w funimp_done # branch to finish
+
+# remember, I'm assuming that post-increment is bogus...(it IS!!!)
+# so, the least significant WORD of the stacked effective address got
+# overwritten by the "fs<cc> -(An)". We must shift the stack frame "down"
+# so that the rte will work correctly without destroying the result.
+# even though the operation size is byte, the stack ptr is decr by 2.
+#
+# remember, also, this instruction may be traced.
+funimp_fscc_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg # was a7 modified?
+ bne.w funimp_done # no
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ btst &0x7,(%sp) # is trace enabled?
+ bne.b funimp_fscc_s_trace # yes
+
+ subq.l &0x2,%sp
+ mov.l 0x2(%sp),(%sp) # shift SR,hi(PC) "down"
+ mov.l 0x6(%sp),0x4(%sp) # shift lo(PC),voff "down"
+ bra.l _fpsp_done
+
+funimp_fscc_s_trace:
+ subq.l &0x2,%sp
+ mov.l 0x2(%sp),(%sp) # shift SR,hi(PC) "down"
+ mov.w 0x6(%sp),0x4(%sp) # shift lo(PC)
+ mov.w &0x2024,0x6(%sp) # fmt/voff = $2024
+ fmov.l %fpiar,0x8(%sp) # insert "current PC"
+
+ bra.l _real_trace
+
+#
+# The ftrap<cc>, fs<cc>, or fdb<cc> is to take an enabled bsun. we must convert
+# the fp unimplemented instruction exception stack frame into a bsun stack frame,
+# restore a bsun exception into the machine, and branch to the user
+# supplied bsun hook.
+#
+# FP UNIMP FRAME BSUN FRAME
+# ***************** *****************
+# ** <EA> ** * 0x0 * 0x0c0 *
+# ***************** *****************
+# * 0x2 * 0x02c * ** Current PC **
+# ***************** *****************
+# ** Next PC ** * SR *
+# ***************** *****************
+# * SR * (4 words)
+# *****************
+# (6 words)
+#
+funimp_bsun:
+ mov.w &0x00c0,2+EXC_EA(%a6) # Fmt = 0x0; Vector Offset = 0x0c0
+ mov.l USER_FPIAR(%a6),EXC_VOFF(%a6) # PC = Current PC
+ mov.w EXC_SR(%a6),2+EXC_PC(%a6) # shift SR "up"
+
+ mov.w &0xe000,2+FP_SRC(%a6) # bsun exception enabled
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore bsun exception
+
+ unlk %a6
+
+ addq.l &0x4,%sp # erase sludge
+
+ bra.l _real_bsun # branch to user bsun hook
+
+#
+# all ftrapcc/fscc/fdbcc processing has been completed. unwind the stack frame
+# and return.
+#
+# as usual, we have to check for trace mode being on here. since instructions
+# modifying the supervisor stack frame don't pass through here, this is a
+# relatively easy task.
+#
+funimp_done:
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ btst &0x7,(%sp) # is trace enabled?
+ bne.b funimp_trace # yes
+
+ bra.l _fpsp_done
+
+# FP UNIMP FRAME TRACE FRAME
+# ***************** *****************
+# ** <EA> ** ** Current PC **
+# ***************** *****************
+# * 0x2 * 0x02c * * 0x2 * 0x024 *
+# ***************** *****************
+# ** Next PC ** ** Next PC **
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+# (6 words) (6 words)
+#
+# the fscc instruction should take a trace trap. so, here we must create a
+# trace stack frame from an unimplemented fp instruction stack frame and
+# jump to the user supplied entry point for the trace exception
+funimp_trace:
+ fmov.l %fpiar,0x8(%sp) # current PC is in fpiar
+ mov.b &0x24,0x7(%sp) # vector offset = 0x024
+
+ bra.l _real_trace
+
+################################################################
+
+ global tbl_trans
+ swbeg &0x1c0
+tbl_trans:
+ short tbl_trans - tbl_trans # $00-0 fmovecr all
+ short tbl_trans - tbl_trans # $00-1 fmovecr all
+ short tbl_trans - tbl_trans # $00-2 fmovecr all
+ short tbl_trans - tbl_trans # $00-3 fmovecr all
+ short tbl_trans - tbl_trans # $00-4 fmovecr all
+ short tbl_trans - tbl_trans # $00-5 fmovecr all
+ short tbl_trans - tbl_trans # $00-6 fmovecr all
+ short tbl_trans - tbl_trans # $00-7 fmovecr all
+
+ short tbl_trans - tbl_trans # $01-0 fint norm
+ short tbl_trans - tbl_trans # $01-1 fint zero
+ short tbl_trans - tbl_trans # $01-2 fint inf
+ short tbl_trans - tbl_trans # $01-3 fint qnan
+ short tbl_trans - tbl_trans # $01-5 fint denorm
+ short tbl_trans - tbl_trans # $01-4 fint snan
+ short tbl_trans - tbl_trans # $01-6 fint unnorm
+ short tbl_trans - tbl_trans # $01-7 ERROR
+
+ short ssinh - tbl_trans # $02-0 fsinh norm
+ short src_zero - tbl_trans # $02-1 fsinh zero
+ short src_inf - tbl_trans # $02-2 fsinh inf
+ short src_qnan - tbl_trans # $02-3 fsinh qnan
+ short ssinhd - tbl_trans # $02-5 fsinh denorm
+ short src_snan - tbl_trans # $02-4 fsinh snan
+ short tbl_trans - tbl_trans # $02-6 fsinh unnorm
+ short tbl_trans - tbl_trans # $02-7 ERROR
+
+ short tbl_trans - tbl_trans # $03-0 fintrz norm
+ short tbl_trans - tbl_trans # $03-1 fintrz zero
+ short tbl_trans - tbl_trans # $03-2 fintrz inf
+ short tbl_trans - tbl_trans # $03-3 fintrz qnan
+ short tbl_trans - tbl_trans # $03-5 fintrz denorm
+ short tbl_trans - tbl_trans # $03-4 fintrz snan
+ short tbl_trans - tbl_trans # $03-6 fintrz unnorm
+ short tbl_trans - tbl_trans # $03-7 ERROR
+
+ short tbl_trans - tbl_trans # $04-0 fsqrt norm
+ short tbl_trans - tbl_trans # $04-1 fsqrt zero
+ short tbl_trans - tbl_trans # $04-2 fsqrt inf
+ short tbl_trans - tbl_trans # $04-3 fsqrt qnan
+ short tbl_trans - tbl_trans # $04-5 fsqrt denorm
+ short tbl_trans - tbl_trans # $04-4 fsqrt snan
+ short tbl_trans - tbl_trans # $04-6 fsqrt unnorm
+ short tbl_trans - tbl_trans # $04-7 ERROR
+
+ short tbl_trans - tbl_trans # $05-0 ERROR
+ short tbl_trans - tbl_trans # $05-1 ERROR
+ short tbl_trans - tbl_trans # $05-2 ERROR
+ short tbl_trans - tbl_trans # $05-3 ERROR
+ short tbl_trans - tbl_trans # $05-4 ERROR
+ short tbl_trans - tbl_trans # $05-5 ERROR
+ short tbl_trans - tbl_trans # $05-6 ERROR
+ short tbl_trans - tbl_trans # $05-7 ERROR
+
+ short slognp1 - tbl_trans # $06-0 flognp1 norm
+ short src_zero - tbl_trans # $06-1 flognp1 zero
+ short sopr_inf - tbl_trans # $06-2 flognp1 inf
+ short src_qnan - tbl_trans # $06-3 flognp1 qnan
+ short slognp1d - tbl_trans # $06-5 flognp1 denorm
+ short src_snan - tbl_trans # $06-4 flognp1 snan
+ short tbl_trans - tbl_trans # $06-6 flognp1 unnorm
+ short tbl_trans - tbl_trans # $06-7 ERROR
+
+ short tbl_trans - tbl_trans # $07-0 ERROR
+ short tbl_trans - tbl_trans # $07-1 ERROR
+ short tbl_trans - tbl_trans # $07-2 ERROR
+ short tbl_trans - tbl_trans # $07-3 ERROR
+ short tbl_trans - tbl_trans # $07-4 ERROR
+ short tbl_trans - tbl_trans # $07-5 ERROR
+ short tbl_trans - tbl_trans # $07-6 ERROR
+ short tbl_trans - tbl_trans # $07-7 ERROR
+
+ short setoxm1 - tbl_trans # $08-0 fetoxm1 norm
+ short src_zero - tbl_trans # $08-1 fetoxm1 zero
+ short setoxm1i - tbl_trans # $08-2 fetoxm1 inf
+ short src_qnan - tbl_trans # $08-3 fetoxm1 qnan
+ short setoxm1d - tbl_trans # $08-5 fetoxm1 denorm
+ short src_snan - tbl_trans # $08-4 fetoxm1 snan
+ short tbl_trans - tbl_trans # $08-6 fetoxm1 unnorm
+ short tbl_trans - tbl_trans # $08-7 ERROR
+
+ short stanh - tbl_trans # $09-0 ftanh norm
+ short src_zero - tbl_trans # $09-1 ftanh zero
+ short src_one - tbl_trans # $09-2 ftanh inf
+ short src_qnan - tbl_trans # $09-3 ftanh qnan
+ short stanhd - tbl_trans # $09-5 ftanh denorm
+ short src_snan - tbl_trans # $09-4 ftanh snan
+ short tbl_trans - tbl_trans # $09-6 ftanh unnorm
+ short tbl_trans - tbl_trans # $09-7 ERROR
+
+ short satan - tbl_trans # $0a-0 fatan norm
+ short src_zero - tbl_trans # $0a-1 fatan zero
+ short spi_2 - tbl_trans # $0a-2 fatan inf
+ short src_qnan - tbl_trans # $0a-3 fatan qnan
+ short satand - tbl_trans # $0a-5 fatan denorm
+ short src_snan - tbl_trans # $0a-4 fatan snan
+ short tbl_trans - tbl_trans # $0a-6 fatan unnorm
+ short tbl_trans - tbl_trans # $0a-7 ERROR
+
+ short tbl_trans - tbl_trans # $0b-0 ERROR
+ short tbl_trans - tbl_trans # $0b-1 ERROR
+ short tbl_trans - tbl_trans # $0b-2 ERROR
+ short tbl_trans - tbl_trans # $0b-3 ERROR
+ short tbl_trans - tbl_trans # $0b-4 ERROR
+ short tbl_trans - tbl_trans # $0b-5 ERROR
+ short tbl_trans - tbl_trans # $0b-6 ERROR
+ short tbl_trans - tbl_trans # $0b-7 ERROR
+
+ short sasin - tbl_trans # $0c-0 fasin norm
+ short src_zero - tbl_trans # $0c-1 fasin zero
+ short t_operr - tbl_trans # $0c-2 fasin inf
+ short src_qnan - tbl_trans # $0c-3 fasin qnan
+ short sasind - tbl_trans # $0c-5 fasin denorm
+ short src_snan - tbl_trans # $0c-4 fasin snan
+ short tbl_trans - tbl_trans # $0c-6 fasin unnorm
+ short tbl_trans - tbl_trans # $0c-7 ERROR
+
+ short satanh - tbl_trans # $0d-0 fatanh norm
+ short src_zero - tbl_trans # $0d-1 fatanh zero
+ short t_operr - tbl_trans # $0d-2 fatanh inf
+ short src_qnan - tbl_trans # $0d-3 fatanh qnan
+ short satanhd - tbl_trans # $0d-5 fatanh denorm
+ short src_snan - tbl_trans # $0d-4 fatanh snan
+ short tbl_trans - tbl_trans # $0d-6 fatanh unnorm
+ short tbl_trans - tbl_trans # $0d-7 ERROR
+
+ short ssin - tbl_trans # $0e-0 fsin norm
+ short src_zero - tbl_trans # $0e-1 fsin zero
+ short t_operr - tbl_trans # $0e-2 fsin inf
+ short src_qnan - tbl_trans # $0e-3 fsin qnan
+ short ssind - tbl_trans # $0e-5 fsin denorm
+ short src_snan - tbl_trans # $0e-4 fsin snan
+ short tbl_trans - tbl_trans # $0e-6 fsin unnorm
+ short tbl_trans - tbl_trans # $0e-7 ERROR
+
+ short stan - tbl_trans # $0f-0 ftan norm
+ short src_zero - tbl_trans # $0f-1 ftan zero
+ short t_operr - tbl_trans # $0f-2 ftan inf
+ short src_qnan - tbl_trans # $0f-3 ftan qnan
+ short stand - tbl_trans # $0f-5 ftan denorm
+ short src_snan - tbl_trans # $0f-4 ftan snan
+ short tbl_trans - tbl_trans # $0f-6 ftan unnorm
+ short tbl_trans - tbl_trans # $0f-7 ERROR
+
+ short setox - tbl_trans # $10-0 fetox norm
+ short ld_pone - tbl_trans # $10-1 fetox zero
+ short szr_inf - tbl_trans # $10-2 fetox inf
+ short src_qnan - tbl_trans # $10-3 fetox qnan
+ short setoxd - tbl_trans # $10-5 fetox denorm
+ short src_snan - tbl_trans # $10-4 fetox snan
+ short tbl_trans - tbl_trans # $10-6 fetox unnorm
+ short tbl_trans - tbl_trans # $10-7 ERROR
+
+ short stwotox - tbl_trans # $11-0 ftwotox norm
+ short ld_pone - tbl_trans # $11-1 ftwotox zero
+ short szr_inf - tbl_trans # $11-2 ftwotox inf
+ short src_qnan - tbl_trans # $11-3 ftwotox qnan
+ short stwotoxd - tbl_trans # $11-5 ftwotox denorm
+ short src_snan - tbl_trans # $11-4 ftwotox snan
+ short tbl_trans - tbl_trans # $11-6 ftwotox unnorm
+ short tbl_trans - tbl_trans # $11-7 ERROR
+
+ short stentox - tbl_trans # $12-0 ftentox norm
+ short ld_pone - tbl_trans # $12-1 ftentox zero
+ short szr_inf - tbl_trans # $12-2 ftentox inf
+ short src_qnan - tbl_trans # $12-3 ftentox qnan
+ short stentoxd - tbl_trans # $12-5 ftentox denorm
+ short src_snan - tbl_trans # $12-4 ftentox snan
+ short tbl_trans - tbl_trans # $12-6 ftentox unnorm
+ short tbl_trans - tbl_trans # $12-7 ERROR
+
+ short tbl_trans - tbl_trans # $13-0 ERROR
+ short tbl_trans - tbl_trans # $13-1 ERROR
+ short tbl_trans - tbl_trans # $13-2 ERROR
+ short tbl_trans - tbl_trans # $13-3 ERROR
+ short tbl_trans - tbl_trans # $13-4 ERROR
+ short tbl_trans - tbl_trans # $13-5 ERROR
+ short tbl_trans - tbl_trans # $13-6 ERROR
+ short tbl_trans - tbl_trans # $13-7 ERROR
+
+ short slogn - tbl_trans # $14-0 flogn norm
+ short t_dz2 - tbl_trans # $14-1 flogn zero
+ short sopr_inf - tbl_trans # $14-2 flogn inf
+ short src_qnan - tbl_trans # $14-3 flogn qnan
+ short slognd - tbl_trans # $14-5 flogn denorm
+ short src_snan - tbl_trans # $14-4 flogn snan
+ short tbl_trans - tbl_trans # $14-6 flogn unnorm
+ short tbl_trans - tbl_trans # $14-7 ERROR
+
+ short slog10 - tbl_trans # $15-0 flog10 norm
+ short t_dz2 - tbl_trans # $15-1 flog10 zero
+ short sopr_inf - tbl_trans # $15-2 flog10 inf
+ short src_qnan - tbl_trans # $15-3 flog10 qnan
+ short slog10d - tbl_trans # $15-5 flog10 denorm
+ short src_snan - tbl_trans # $15-4 flog10 snan
+ short tbl_trans - tbl_trans # $15-6 flog10 unnorm
+ short tbl_trans - tbl_trans # $15-7 ERROR
+
+ short slog2 - tbl_trans # $16-0 flog2 norm
+ short t_dz2 - tbl_trans # $16-1 flog2 zero
+ short sopr_inf - tbl_trans # $16-2 flog2 inf
+ short src_qnan - tbl_trans # $16-3 flog2 qnan
+ short slog2d - tbl_trans # $16-5 flog2 denorm
+ short src_snan - tbl_trans # $16-4 flog2 snan
+ short tbl_trans - tbl_trans # $16-6 flog2 unnorm
+ short tbl_trans - tbl_trans # $16-7 ERROR
+
+ short tbl_trans - tbl_trans # $17-0 ERROR
+ short tbl_trans - tbl_trans # $17-1 ERROR
+ short tbl_trans - tbl_trans # $17-2 ERROR
+ short tbl_trans - tbl_trans # $17-3 ERROR
+ short tbl_trans - tbl_trans # $17-4 ERROR
+ short tbl_trans - tbl_trans # $17-5 ERROR
+ short tbl_trans - tbl_trans # $17-6 ERROR
+ short tbl_trans - tbl_trans # $17-7 ERROR
+
+ short tbl_trans - tbl_trans # $18-0 fabs norm
+ short tbl_trans - tbl_trans # $18-1 fabs zero
+ short tbl_trans - tbl_trans # $18-2 fabs inf
+ short tbl_trans - tbl_trans # $18-3 fabs qnan
+ short tbl_trans - tbl_trans # $18-5 fabs denorm
+ short tbl_trans - tbl_trans # $18-4 fabs snan
+ short tbl_trans - tbl_trans # $18-6 fabs unnorm
+ short tbl_trans - tbl_trans # $18-7 ERROR
+
+ short scosh - tbl_trans # $19-0 fcosh norm
+ short ld_pone - tbl_trans # $19-1 fcosh zero
+ short ld_pinf - tbl_trans # $19-2 fcosh inf
+ short src_qnan - tbl_trans # $19-3 fcosh qnan
+ short scoshd - tbl_trans # $19-5 fcosh denorm
+ short src_snan - tbl_trans # $19-4 fcosh snan
+ short tbl_trans - tbl_trans # $19-6 fcosh unnorm
+ short tbl_trans - tbl_trans # $19-7 ERROR
+
+ short tbl_trans - tbl_trans # $1a-0 fneg norm
+ short tbl_trans - tbl_trans # $1a-1 fneg zero
+ short tbl_trans - tbl_trans # $1a-2 fneg inf
+ short tbl_trans - tbl_trans # $1a-3 fneg qnan
+ short tbl_trans - tbl_trans # $1a-5 fneg denorm
+ short tbl_trans - tbl_trans # $1a-4 fneg snan
+ short tbl_trans - tbl_trans # $1a-6 fneg unnorm
+ short tbl_trans - tbl_trans # $1a-7 ERROR
+
+ short tbl_trans - tbl_trans # $1b-0 ERROR
+ short tbl_trans - tbl_trans # $1b-1 ERROR
+ short tbl_trans - tbl_trans # $1b-2 ERROR
+ short tbl_trans - tbl_trans # $1b-3 ERROR
+ short tbl_trans - tbl_trans # $1b-4 ERROR
+ short tbl_trans - tbl_trans # $1b-5 ERROR
+ short tbl_trans - tbl_trans # $1b-6 ERROR
+ short tbl_trans - tbl_trans # $1b-7 ERROR
+
+ short sacos - tbl_trans # $1c-0 facos norm
+ short ld_ppi2 - tbl_trans # $1c-1 facos zero
+ short t_operr - tbl_trans # $1c-2 facos inf
+ short src_qnan - tbl_trans # $1c-3 facos qnan
+ short sacosd - tbl_trans # $1c-5 facos denorm
+ short src_snan - tbl_trans # $1c-4 facos snan
+ short tbl_trans - tbl_trans # $1c-6 facos unnorm
+ short tbl_trans - tbl_trans # $1c-7 ERROR
+
+ short scos - tbl_trans # $1d-0 fcos norm
+ short ld_pone - tbl_trans # $1d-1 fcos zero
+ short t_operr - tbl_trans # $1d-2 fcos inf
+ short src_qnan - tbl_trans # $1d-3 fcos qnan
+ short scosd - tbl_trans # $1d-5 fcos denorm
+ short src_snan - tbl_trans # $1d-4 fcos snan
+ short tbl_trans - tbl_trans # $1d-6 fcos unnorm
+ short tbl_trans - tbl_trans # $1d-7 ERROR
+
+ short sgetexp - tbl_trans # $1e-0 fgetexp norm
+ short src_zero - tbl_trans # $1e-1 fgetexp zero
+ short t_operr - tbl_trans # $1e-2 fgetexp inf
+ short src_qnan - tbl_trans # $1e-3 fgetexp qnan
+ short sgetexpd - tbl_trans # $1e-5 fgetexp denorm
+ short src_snan - tbl_trans # $1e-4 fgetexp snan
+ short tbl_trans - tbl_trans # $1e-6 fgetexp unnorm
+ short tbl_trans - tbl_trans # $1e-7 ERROR
+
+ short sgetman - tbl_trans # $1f-0 fgetman norm
+ short src_zero - tbl_trans # $1f-1 fgetman zero
+ short t_operr - tbl_trans # $1f-2 fgetman inf
+ short src_qnan - tbl_trans # $1f-3 fgetman qnan
+ short sgetmand - tbl_trans # $1f-5 fgetman denorm
+ short src_snan - tbl_trans # $1f-4 fgetman snan
+ short tbl_trans - tbl_trans # $1f-6 fgetman unnorm
+ short tbl_trans - tbl_trans # $1f-7 ERROR
+
+ short tbl_trans - tbl_trans # $20-0 fdiv norm
+ short tbl_trans - tbl_trans # $20-1 fdiv zero
+ short tbl_trans - tbl_trans # $20-2 fdiv inf
+ short tbl_trans - tbl_trans # $20-3 fdiv qnan
+ short tbl_trans - tbl_trans # $20-5 fdiv denorm
+ short tbl_trans - tbl_trans # $20-4 fdiv snan
+ short tbl_trans - tbl_trans # $20-6 fdiv unnorm
+ short tbl_trans - tbl_trans # $20-7 ERROR
+
+ short smod_snorm - tbl_trans # $21-0 fmod norm
+ short smod_szero - tbl_trans # $21-1 fmod zero
+ short smod_sinf - tbl_trans # $21-2 fmod inf
+ short sop_sqnan - tbl_trans # $21-3 fmod qnan
+ short smod_sdnrm - tbl_trans # $21-5 fmod denorm
+ short sop_ssnan - tbl_trans # $21-4 fmod snan
+ short tbl_trans - tbl_trans # $21-6 fmod unnorm
+ short tbl_trans - tbl_trans # $21-7 ERROR
+
+ short tbl_trans - tbl_trans # $22-0 fadd norm
+ short tbl_trans - tbl_trans # $22-1 fadd zero
+ short tbl_trans - tbl_trans # $22-2 fadd inf
+ short tbl_trans - tbl_trans # $22-3 fadd qnan
+ short tbl_trans - tbl_trans # $22-5 fadd denorm
+ short tbl_trans - tbl_trans # $22-4 fadd snan
+ short tbl_trans - tbl_trans # $22-6 fadd unnorm
+ short tbl_trans - tbl_trans # $22-7 ERROR
+
+ short tbl_trans - tbl_trans # $23-0 fmul norm
+ short tbl_trans - tbl_trans # $23-1 fmul zero
+ short tbl_trans - tbl_trans # $23-2 fmul inf
+ short tbl_trans - tbl_trans # $23-3 fmul qnan
+ short tbl_trans - tbl_trans # $23-5 fmul denorm
+ short tbl_trans - tbl_trans # $23-4 fmul snan
+ short tbl_trans - tbl_trans # $23-6 fmul unnorm
+ short tbl_trans - tbl_trans # $23-7 ERROR
+
+ short tbl_trans - tbl_trans # $24-0 fsgldiv norm
+ short tbl_trans - tbl_trans # $24-1 fsgldiv zero
+ short tbl_trans - tbl_trans # $24-2 fsgldiv inf
+ short tbl_trans - tbl_trans # $24-3 fsgldiv qnan
+ short tbl_trans - tbl_trans # $24-5 fsgldiv denorm
+ short tbl_trans - tbl_trans # $24-4 fsgldiv snan
+ short tbl_trans - tbl_trans # $24-6 fsgldiv unnorm
+ short tbl_trans - tbl_trans # $24-7 ERROR
+
+ short srem_snorm - tbl_trans # $25-0 frem norm
+ short srem_szero - tbl_trans # $25-1 frem zero
+ short srem_sinf - tbl_trans # $25-2 frem inf
+ short sop_sqnan - tbl_trans # $25-3 frem qnan
+ short srem_sdnrm - tbl_trans # $25-5 frem denorm
+ short sop_ssnan - tbl_trans # $25-4 frem snan
+ short tbl_trans - tbl_trans # $25-6 frem unnorm
+ short tbl_trans - tbl_trans # $25-7 ERROR
+
+ short sscale_snorm - tbl_trans # $26-0 fscale norm
+ short sscale_szero - tbl_trans # $26-1 fscale zero
+ short sscale_sinf - tbl_trans # $26-2 fscale inf
+ short sop_sqnan - tbl_trans # $26-3 fscale qnan
+ short sscale_sdnrm - tbl_trans # $26-5 fscale denorm
+ short sop_ssnan - tbl_trans # $26-4 fscale snan
+ short tbl_trans - tbl_trans # $26-6 fscale unnorm
+ short tbl_trans - tbl_trans # $26-7 ERROR
+
+ short tbl_trans - tbl_trans # $27-0 fsglmul norm
+ short tbl_trans - tbl_trans # $27-1 fsglmul zero
+ short tbl_trans - tbl_trans # $27-2 fsglmul inf
+ short tbl_trans - tbl_trans # $27-3 fsglmul qnan
+ short tbl_trans - tbl_trans # $27-5 fsglmul denorm
+ short tbl_trans - tbl_trans # $27-4 fsglmul snan
+ short tbl_trans - tbl_trans # $27-6 fsglmul unnorm
+ short tbl_trans - tbl_trans # $27-7 ERROR
+
+ short tbl_trans - tbl_trans # $28-0 fsub norm
+ short tbl_trans - tbl_trans # $28-1 fsub zero
+ short tbl_trans - tbl_trans # $28-2 fsub inf
+ short tbl_trans - tbl_trans # $28-3 fsub qnan
+ short tbl_trans - tbl_trans # $28-5 fsub denorm
+ short tbl_trans - tbl_trans # $28-4 fsub snan
+ short tbl_trans - tbl_trans # $28-6 fsub unnorm
+ short tbl_trans - tbl_trans # $28-7 ERROR
+
+ short tbl_trans - tbl_trans # $29-0 ERROR
+ short tbl_trans - tbl_trans # $29-1 ERROR
+ short tbl_trans - tbl_trans # $29-2 ERROR
+ short tbl_trans - tbl_trans # $29-3 ERROR
+ short tbl_trans - tbl_trans # $29-4 ERROR
+ short tbl_trans - tbl_trans # $29-5 ERROR
+ short tbl_trans - tbl_trans # $29-6 ERROR
+ short tbl_trans - tbl_trans # $29-7 ERROR
+
+ short tbl_trans - tbl_trans # $2a-0 ERROR
+ short tbl_trans - tbl_trans # $2a-1 ERROR
+ short tbl_trans - tbl_trans # $2a-2 ERROR
+ short tbl_trans - tbl_trans # $2a-3 ERROR
+ short tbl_trans - tbl_trans # $2a-4 ERROR
+ short tbl_trans - tbl_trans # $2a-5 ERROR
+ short tbl_trans - tbl_trans # $2a-6 ERROR
+ short tbl_trans - tbl_trans # $2a-7 ERROR
+
+ short tbl_trans - tbl_trans # $2b-0 ERROR
+ short tbl_trans - tbl_trans # $2b-1 ERROR
+ short tbl_trans - tbl_trans # $2b-2 ERROR
+ short tbl_trans - tbl_trans # $2b-3 ERROR
+ short tbl_trans - tbl_trans # $2b-4 ERROR
+ short tbl_trans - tbl_trans # $2b-5 ERROR
+ short tbl_trans - tbl_trans # $2b-6 ERROR
+ short tbl_trans - tbl_trans # $2b-7 ERROR
+
+ short tbl_trans - tbl_trans # $2c-0 ERROR
+ short tbl_trans - tbl_trans # $2c-1 ERROR
+ short tbl_trans - tbl_trans # $2c-2 ERROR
+ short tbl_trans - tbl_trans # $2c-3 ERROR
+ short tbl_trans - tbl_trans # $2c-4 ERROR
+ short tbl_trans - tbl_trans # $2c-5 ERROR
+ short tbl_trans - tbl_trans # $2c-6 ERROR
+ short tbl_trans - tbl_trans # $2c-7 ERROR
+
+ short tbl_trans - tbl_trans # $2d-0 ERROR
+ short tbl_trans - tbl_trans # $2d-1 ERROR
+ short tbl_trans - tbl_trans # $2d-2 ERROR
+ short tbl_trans - tbl_trans # $2d-3 ERROR
+ short tbl_trans - tbl_trans # $2d-4 ERROR
+ short tbl_trans - tbl_trans # $2d-5 ERROR
+ short tbl_trans - tbl_trans # $2d-6 ERROR
+ short tbl_trans - tbl_trans # $2d-7 ERROR
+
+ short tbl_trans - tbl_trans # $2e-0 ERROR
+ short tbl_trans - tbl_trans # $2e-1 ERROR
+ short tbl_trans - tbl_trans # $2e-2 ERROR
+ short tbl_trans - tbl_trans # $2e-3 ERROR
+ short tbl_trans - tbl_trans # $2e-4 ERROR
+ short tbl_trans - tbl_trans # $2e-5 ERROR
+ short tbl_trans - tbl_trans # $2e-6 ERROR
+ short tbl_trans - tbl_trans # $2e-7 ERROR
+
+ short tbl_trans - tbl_trans # $2f-0 ERROR
+ short tbl_trans - tbl_trans # $2f-1 ERROR
+ short tbl_trans - tbl_trans # $2f-2 ERROR
+ short tbl_trans - tbl_trans # $2f-3 ERROR
+ short tbl_trans - tbl_trans # $2f-4 ERROR
+ short tbl_trans - tbl_trans # $2f-5 ERROR
+ short tbl_trans - tbl_trans # $2f-6 ERROR
+ short tbl_trans - tbl_trans # $2f-7 ERROR
+
+ short ssincos - tbl_trans # $30-0 fsincos norm
+ short ssincosz - tbl_trans # $30-1 fsincos zero
+ short ssincosi - tbl_trans # $30-2 fsincos inf
+ short ssincosqnan - tbl_trans # $30-3 fsincos qnan
+ short ssincosd - tbl_trans # $30-5 fsincos denorm
+ short ssincossnan - tbl_trans # $30-4 fsincos snan
+ short tbl_trans - tbl_trans # $30-6 fsincos unnorm
+ short tbl_trans - tbl_trans # $30-7 ERROR
+
+ short ssincos - tbl_trans # $31-0 fsincos norm
+ short ssincosz - tbl_trans # $31-1 fsincos zero
+ short ssincosi - tbl_trans # $31-2 fsincos inf
+ short ssincosqnan - tbl_trans # $31-3 fsincos qnan
+ short ssincosd - tbl_trans # $31-5 fsincos denorm
+ short ssincossnan - tbl_trans # $31-4 fsincos snan
+ short tbl_trans - tbl_trans # $31-6 fsincos unnorm
+ short tbl_trans - tbl_trans # $31-7 ERROR
+
+ short ssincos - tbl_trans # $32-0 fsincos norm
+ short ssincosz - tbl_trans # $32-1 fsincos zero
+ short ssincosi - tbl_trans # $32-2 fsincos inf
+ short ssincosqnan - tbl_trans # $32-3 fsincos qnan
+ short ssincosd - tbl_trans # $32-5 fsincos denorm
+ short ssincossnan - tbl_trans # $32-4 fsincos snan
+ short tbl_trans - tbl_trans # $32-6 fsincos unnorm
+ short tbl_trans - tbl_trans # $32-7 ERROR
+
+ short ssincos - tbl_trans # $33-0 fsincos norm
+ short ssincosz - tbl_trans # $33-1 fsincos zero
+ short ssincosi - tbl_trans # $33-2 fsincos inf
+ short ssincosqnan - tbl_trans # $33-3 fsincos qnan
+ short ssincosd - tbl_trans # $33-5 fsincos denorm
+ short ssincossnan - tbl_trans # $33-4 fsincos snan
+ short tbl_trans - tbl_trans # $33-6 fsincos unnorm
+ short tbl_trans - tbl_trans # $33-7 ERROR
+
+ short ssincos - tbl_trans # $34-0 fsincos norm
+ short ssincosz - tbl_trans # $34-1 fsincos zero
+ short ssincosi - tbl_trans # $34-2 fsincos inf
+ short ssincosqnan - tbl_trans # $34-3 fsincos qnan
+ short ssincosd - tbl_trans # $34-5 fsincos denorm
+ short ssincossnan - tbl_trans # $34-4 fsincos snan
+ short tbl_trans - tbl_trans # $34-6 fsincos unnorm
+ short tbl_trans - tbl_trans # $34-7 ERROR
+
+ short ssincos - tbl_trans # $35-0 fsincos norm
+ short ssincosz - tbl_trans # $35-1 fsincos zero
+ short ssincosi - tbl_trans # $35-2 fsincos inf
+ short ssincosqnan - tbl_trans # $35-3 fsincos qnan
+ short ssincosd - tbl_trans # $35-5 fsincos denorm
+ short ssincossnan - tbl_trans # $35-4 fsincos snan
+ short tbl_trans - tbl_trans # $35-6 fsincos unnorm
+ short tbl_trans - tbl_trans # $35-7 ERROR
+
+ short ssincos - tbl_trans # $36-0 fsincos norm
+ short ssincosz - tbl_trans # $36-1 fsincos zero
+ short ssincosi - tbl_trans # $36-2 fsincos inf
+ short ssincosqnan - tbl_trans # $36-3 fsincos qnan
+ short ssincosd - tbl_trans # $36-5 fsincos denorm
+ short ssincossnan - tbl_trans # $36-4 fsincos snan
+ short tbl_trans - tbl_trans # $36-6 fsincos unnorm
+ short tbl_trans - tbl_trans # $36-7 ERROR
+
+ short ssincos - tbl_trans # $37-0 fsincos norm
+ short ssincosz - tbl_trans # $37-1 fsincos zero
+ short ssincosi - tbl_trans # $37-2 fsincos inf
+ short ssincosqnan - tbl_trans # $37-3 fsincos qnan
+ short ssincosd - tbl_trans # $37-5 fsincos denorm
+ short ssincossnan - tbl_trans # $37-4 fsincos snan
+ short tbl_trans - tbl_trans # $37-6 fsincos unnorm
+ short tbl_trans - tbl_trans # $37-7 ERROR
+
+##########
+
+# the instruction fetch access for the displacement word for the
+# fdbcc emulation failed. here, we create an access error frame
+# from the current frame and branch to _real_access().
+funimp_iacc:
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+
+ mov.l USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+ unlk %a6
+
+ mov.l (%sp),-(%sp) # store SR,hi(PC)
+ mov.w 0x8(%sp),0x4(%sp) # store lo(PC)
+ mov.w &0x4008,0x6(%sp) # store voff
+ mov.l 0x2(%sp),0x8(%sp) # store EA
+ mov.l &0x09428001,0xc(%sp) # store FSLW
+
+ btst &0x5,(%sp) # user or supervisor mode?
+ beq.b funimp_iacc_end # user
+ bset &0x2,0xd(%sp) # set supervisor TM bit
+
+funimp_iacc_end:
+ bra.l _real_access
+
+#########################################################################
+# ssin(): computes the sine of a normalized input #
+# ssind(): computes the sine of a denormalized input #
+# scos(): computes the cosine of a normalized input #
+# scosd(): computes the cosine of a denormalized input #
+# ssincos(): computes the sine and cosine of a normalized input #
+# ssincosd(): computes the sine and cosine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = sin(X) or cos(X) #
+# #
+# For ssincos(X): #
+# fp0 = sin(X) #
+# fp1 = cos(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 1 ulp in 64 significant bit, i.e. #
+# within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# SIN and COS: #
+# 1. If SIN is invoked, set AdjN := 0; otherwise, set AdjN := 1. #
+# #
+# 2. If |X| >= 15Pi or |X| < 2**(-40), go to 7. #
+# #
+# 3. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
+# k = N mod 4, so in particular, k = 0,1,2,or 3. #
+# Overwrite k by k := k + AdjN. #
+# #
+# 4. If k is even, go to 6. #
+# #
+# 5. (k is odd) Set j := (k-1)/2, sgn := (-1)**j. #
+# Return sgn*cos(r) where cos(r) is approximated by an #
+# even polynomial in r, 1 + r*r*(B1+s*(B2+ ... + s*B8)), #
+# s = r*r. #
+# Exit. #
+# #
+# 6. (k is even) Set j := k/2, sgn := (-1)**j. Return sgn*sin(r) #
+# where sin(r) is approximated by an odd polynomial in r #
+# r + r*s*(A1+s*(A2+ ... + s*A7)), s = r*r. #
+# Exit. #
+# #
+# 7. If |X| > 1, go to 9. #
+# #
+# 8. (|X|<2**(-40)) If SIN is invoked, return X; #
+# otherwise return 1. #
+# #
+# 9. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
+# go back to 3. #
+# #
+# SINCOS: #
+# 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
+# #
+# 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
+# k = N mod 4, so in particular, k = 0,1,2,or 3. #
+# #
+# 3. If k is even, go to 5. #
+# #
+# 4. (k is odd) Set j1 := (k-1)/2, j2 := j1 (EOR) (k mod 2), ie. #
+# j1 exclusive or with the l.s.b. of k. #
+# sgn1 := (-1)**j1, sgn2 := (-1)**j2. #
+# SIN(X) = sgn1 * cos(r) and COS(X) = sgn2*sin(r) where #
+# sin(r) and cos(r) are computed as odd and even #
+# polynomials in r, respectively. Exit #
+# #
+# 5. (k is even) Set j1 := k/2, sgn1 := (-1)**j1. #
+# SIN(X) = sgn1 * sin(r) and COS(X) = sgn1*cos(r) where #
+# sin(r) and cos(r) are computed as odd and even #
+# polynomials in r, respectively. Exit #
+# #
+# 6. If |X| > 1, go to 8. #
+# #
+# 7. (|X|<2**(-40)) SIN(X) = X and COS(X) = 1. Exit. #
+# #
+# 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, #
+# go back to 2. #
+# #
+#########################################################################
+
+SINA7: long 0xBD6AAA77,0xCCC994F5
+SINA6: long 0x3DE61209,0x7AAE8DA1
+SINA5: long 0xBE5AE645,0x2A118AE4
+SINA4: long 0x3EC71DE3,0xA5341531
+SINA3: long 0xBF2A01A0,0x1A018B59,0x00000000,0x00000000
+SINA2: long 0x3FF80000,0x88888888,0x888859AF,0x00000000
+SINA1: long 0xBFFC0000,0xAAAAAAAA,0xAAAAAA99,0x00000000
+
+COSB8: long 0x3D2AC4D0,0xD6011EE3
+COSB7: long 0xBDA9396F,0x9F45AC19
+COSB6: long 0x3E21EED9,0x0612C972
+COSB5: long 0xBE927E4F,0xB79D9FCF
+COSB4: long 0x3EFA01A0,0x1A01D423,0x00000000,0x00000000
+COSB3: long 0xBFF50000,0xB60B60B6,0x0B61D438,0x00000000
+COSB2: long 0x3FFA0000,0xAAAAAAAA,0xAAAAAB5E
+COSB1: long 0xBF000000
+
+ set INARG,FP_SCR0
+
+ set X,FP_SCR0
+# set XDCARE,X+2
+ set XFRAC,X+4
+
+ set RPRIME,FP_SCR0
+ set SPRIME,FP_SCR1
+
+ set POSNEG1,L_SCR1
+ set TWOTO63,L_SCR1
+
+ set ENDFLAG,L_SCR2
+ set INT,L_SCR2
+
+ set ADJN,L_SCR3
+
+############################################
+ global ssin
+ssin:
+ mov.l &0,ADJN(%a6) # yes; SET ADJN TO 0
+ bra.b SINBGN
+
+############################################
+ global scos
+scos:
+ mov.l &1,ADJN(%a6) # yes; SET ADJN TO 1
+
+############################################
+SINBGN:
+#--SAVE FPCR, FP1. CHECK IF |X| IS TOO SMALL OR LARGE
+
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ fmov.x %fp0,X(%a6) # save input at X
+
+# "COMPACTIFY" X
+ mov.l (%a0),%d1 # put exp in hi word
+ mov.w 4(%a0),%d1 # fetch hi(man)
+ and.l &0x7FFFFFFF,%d1 # strip sign
+
+ cmpi.l %d1,&0x3FD78000 # is |X| >= 2**(-40)?
+ bge.b SOK1 # no
+ bra.w SINSM # yes; input is very small
+
+SOK1:
+ cmp.l %d1,&0x4004BC7E # is |X| < 15 PI?
+ blt.b SINMAIN # no
+ bra.w SREDUCEX # yes; input is very large
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SINMAIN:
+ fmov.x %fp0,%fp1
+ fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
+
+ lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
+
+ fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
+
+ mov.l INT(%a6),%d1 # make a copy of N
+ asl.l &4,%d1 # N *= 16
+ add.l %d1,%a1 # tbl_addr = a1 + (N*16)
+
+# A1 IS THE ADDRESS OF N*PIBY2
+# ...WHICH IS IN TWO PIECES Y1 & Y2
+ fsub.x (%a1)+,%fp0 # X-Y1
+ fsub.s (%a1),%fp0 # fp0 = R = (X-Y1)-Y2
+
+SINCONT:
+#--continuation from REDUCEX
+
+#--GET N+ADJN AND SEE IF SIN(R) OR COS(R) IS NEEDED
+ mov.l INT(%a6),%d1
+ add.l ADJN(%a6),%d1 # SEE IF D0 IS ODD OR EVEN
+ ror.l &1,%d1 # D0 WAS ODD IFF D0 IS NEGATIVE
+ cmp.l %d1,&0
+ blt.w COSPOLY
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN SGN*SIN(R). SGN*SIN(R) IS COMPUTED BY
+#--R' + R'*S*(A1 + S(A2 + S(A3 + S(A4 + ... + SA7)))), WHERE
+#--R' = SGN*R, S=R*R. THIS CAN BE REWRITTEN AS
+#--R' + R'*S*( [A1+T(A3+T(A5+TA7))] + [S(A2+T(A4+TA6))])
+#--WHERE T=S*S.
+#--NOTE THAT A3 THROUGH A7 ARE STORED IN DOUBLE PRECISION
+#--WHILE A1 AND A2 ARE IN DOUBLE-EXTENDED FORMAT.
+SINPOLY:
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmov.x %fp0,X(%a6) # X IS R
+ fmul.x %fp0,%fp0 # FP0 IS S
+
+ fmov.d SINA7(%pc),%fp3
+ fmov.d SINA6(%pc),%fp2
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS T
+
+ ror.l &1,%d1
+ and.l &0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+ eor.l %d1,X(%a6) # X IS NOW R'= SGN*R
+
+ fmul.x %fp1,%fp3 # TA7
+ fmul.x %fp1,%fp2 # TA6
+
+ fadd.d SINA5(%pc),%fp3 # A5+TA7
+ fadd.d SINA4(%pc),%fp2 # A4+TA6
+
+ fmul.x %fp1,%fp3 # T(A5+TA7)
+ fmul.x %fp1,%fp2 # T(A4+TA6)
+
+ fadd.d SINA3(%pc),%fp3 # A3+T(A5+TA7)
+ fadd.x SINA2(%pc),%fp2 # A2+T(A4+TA6)
+
+ fmul.x %fp3,%fp1 # T(A3+T(A5+TA7))
+
+ fmul.x %fp0,%fp2 # S(A2+T(A4+TA6))
+ fadd.x SINA1(%pc),%fp1 # A1+T(A3+T(A5+TA7))
+ fmul.x X(%a6),%fp0 # R'*S
+
+ fadd.x %fp2,%fp1 # [A1+T(A3+T(A5+TA7))]+[S(A2+T(A4+TA6))]
+
+ fmul.x %fp1,%fp0 # SIN(R')-R'
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fadd.x X(%a6),%fp0 # last inst - possible exception set
+ bra t_inx2
+
+#--LET J BE THE LEAST SIG. BIT OF D0, LET SGN := (-1)**J.
+#--THEN WE RETURN SGN*COS(R). SGN*COS(R) IS COMPUTED BY
+#--SGN + S'*(B1 + S(B2 + S(B3 + S(B4 + ... + SB8)))), WHERE
+#--S=R*R AND S'=SGN*S. THIS CAN BE REWRITTEN AS
+#--SGN + S'*([B1+T(B3+T(B5+TB7))] + [S(B2+T(B4+T(B6+TB8)))])
+#--WHERE T=S*S.
+#--NOTE THAT B4 THROUGH B8 ARE STORED IN DOUBLE PRECISION
+#--WHILE B2 AND B3 ARE IN DOUBLE-EXTENDED FORMAT, B1 IS -1/2
+#--AND IS THEREFORE STORED AS SINGLE PRECISION.
+COSPOLY:
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmul.x %fp0,%fp0 # FP0 IS S
+
+ fmov.d COSB8(%pc),%fp2
+ fmov.d COSB7(%pc),%fp3
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS T
+
+ fmov.x %fp0,X(%a6) # X IS S
+ ror.l &1,%d1
+ and.l &0x80000000,%d1
+# ...LEAST SIG. BIT OF D0 IN SIGN POSITION
+
+ fmul.x %fp1,%fp2 # TB8
+
+ eor.l %d1,X(%a6) # X IS NOW S'= SGN*S
+ and.l &0x80000000,%d1
+
+ fmul.x %fp1,%fp3 # TB7
+
+ or.l &0x3F800000,%d1 # D0 IS SGN IN SINGLE
+ mov.l %d1,POSNEG1(%a6)
+
+ fadd.d COSB6(%pc),%fp2 # B6+TB8
+ fadd.d COSB5(%pc),%fp3 # B5+TB7
+
+ fmul.x %fp1,%fp2 # T(B6+TB8)
+ fmul.x %fp1,%fp3 # T(B5+TB7)
+
+ fadd.d COSB4(%pc),%fp2 # B4+T(B6+TB8)
+ fadd.x COSB3(%pc),%fp3 # B3+T(B5+TB7)
+
+ fmul.x %fp1,%fp2 # T(B4+T(B6+TB8))
+ fmul.x %fp3,%fp1 # T(B3+T(B5+TB7))
+
+ fadd.x COSB2(%pc),%fp2 # B2+T(B4+T(B6+TB8))
+ fadd.s COSB1(%pc),%fp1 # B1+T(B3+T(B5+TB7))
+
+ fmul.x %fp2,%fp0 # S(B2+T(B4+T(B6+TB8)))
+
+ fadd.x %fp1,%fp0
+
+ fmul.x X(%a6),%fp0
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fadd.s POSNEG1(%a6),%fp0 # last inst - possible exception set
+ bra t_inx2
+
+##############################################
+
+# SINe: Big OR Small?
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+SINBORS:
+ cmp.l %d1,&0x3FFF8000
+ bgt.l SREDUCEX
+
+SINSM:
+ mov.l ADJN(%a6),%d1
+ cmp.l %d1,&0
+ bgt.b COSTINY
+
+# here, the operation may underflow iff the precision is sgl or dbl.
+# extended denorms are handled through another entry point.
+SINTINY:
+# mov.w &0x0000,XDCARE(%a6) # JUST IN CASE
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x X(%a6),%fp0 # last inst - possible exception set
+ bra t_catch
+
+COSTINY:
+ fmov.s &0x3F800000,%fp0 # fp0 = 1.0
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fadd.s &0x80800000,%fp0 # last inst - possible exception set
+ bra t_pinx2
+
+################################################
+ global ssind
+#--SIN(X) = X FOR DENORMALIZED X
+ssind:
+ bra t_extdnrm
+
+############################################
+ global scosd
+#--COS(X) = 1 FOR DENORMALIZED X
+scosd:
+ fmov.s &0x3F800000,%fp0 # fp0 = 1.0
+ bra t_pinx2
+
+##################################################
+
+ global ssincos
+ssincos:
+#--SET ADJN TO 4
+ mov.l &4,ADJN(%a6)
+
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ fmov.x %fp0,X(%a6)
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1 # COMPACTIFY X
+
+ cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
+ bge.b SCOK1
+ bra.w SCSM
+
+SCOK1:
+ cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
+ blt.b SCMAIN
+ bra.w SREDUCEX
+
+
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+SCMAIN:
+ fmov.x %fp0,%fp1
+
+ fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
+
+ lea PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
+
+ fmov.l %fp1,INT(%a6) # CONVERT TO INTEGER
+
+ mov.l INT(%a6),%d1
+ asl.l &4,%d1
+ add.l %d1,%a1 # ADDRESS OF N*PIBY2, IN Y1, Y2
+
+ fsub.x (%a1)+,%fp0 # X-Y1
+ fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
+
+SCCONT:
+#--continuation point from REDUCEX
+
+ mov.l INT(%a6),%d1
+ ror.l &1,%d1
+ cmp.l %d1,&0 # D0 < 0 IFF N IS ODD
+ bge.w NEVEN
+
+SNODD:
+#--REGISTERS SAVED SO FAR: D0, A0, FP2.
+ fmovm.x &0x04,-(%sp) # save fp2
+
+ fmov.x %fp0,RPRIME(%a6)
+ fmul.x %fp0,%fp0 # FP0 IS S = R*R
+ fmov.d SINA7(%pc),%fp1 # A7
+ fmov.d COSB8(%pc),%fp2 # B8
+ fmul.x %fp0,%fp1 # SA7
+ fmul.x %fp0,%fp2 # SB8
+
+ mov.l %d2,-(%sp)
+ mov.l %d1,%d2
+ ror.l &1,%d2
+ and.l &0x80000000,%d2
+ eor.l %d1,%d2
+ and.l &0x80000000,%d2
+
+ fadd.d SINA6(%pc),%fp1 # A6+SA7
+ fadd.d COSB7(%pc),%fp2 # B7+SB8
+
+ fmul.x %fp0,%fp1 # S(A6+SA7)
+ eor.l %d2,RPRIME(%a6)
+ mov.l (%sp)+,%d2
+ fmul.x %fp0,%fp2 # S(B7+SB8)
+ ror.l &1,%d1
+ and.l &0x80000000,%d1
+ mov.l &0x3F800000,POSNEG1(%a6)
+ eor.l %d1,POSNEG1(%a6)
+
+ fadd.d SINA5(%pc),%fp1 # A5+S(A6+SA7)
+ fadd.d COSB6(%pc),%fp2 # B6+S(B7+SB8)
+
+ fmul.x %fp0,%fp1 # S(A5+S(A6+SA7))
+ fmul.x %fp0,%fp2 # S(B6+S(B7+SB8))
+ fmov.x %fp0,SPRIME(%a6)
+
+ fadd.d SINA4(%pc),%fp1 # A4+S(A5+S(A6+SA7))
+ eor.l %d1,SPRIME(%a6)
+ fadd.d COSB5(%pc),%fp2 # B5+S(B6+S(B7+SB8))
+
+ fmul.x %fp0,%fp1 # S(A4+...)
+ fmul.x %fp0,%fp2 # S(B5+...)
+
+ fadd.d SINA3(%pc),%fp1 # A3+S(A4+...)
+ fadd.d COSB4(%pc),%fp2 # B4+S(B5+...)
+
+ fmul.x %fp0,%fp1 # S(A3+...)
+ fmul.x %fp0,%fp2 # S(B4+...)
+
+ fadd.x SINA2(%pc),%fp1 # A2+S(A3+...)
+ fadd.x COSB3(%pc),%fp2 # B3+S(B4+...)
+
+ fmul.x %fp0,%fp1 # S(A2+...)
+ fmul.x %fp0,%fp2 # S(B3+...)
+
+ fadd.x SINA1(%pc),%fp1 # A1+S(A2+...)
+ fadd.x COSB2(%pc),%fp2 # B2+S(B3+...)
+
+ fmul.x %fp0,%fp1 # S(A1+...)
+ fmul.x %fp2,%fp0 # S(B2+...)
+
+ fmul.x RPRIME(%a6),%fp1 # R'S(A1+...)
+ fadd.s COSB1(%pc),%fp0 # B1+S(B2...)
+ fmul.x SPRIME(%a6),%fp0 # S'(B1+S(B2+...))
+
+ fmovm.x (%sp)+,&0x20 # restore fp2
+
+ fmov.l %d0,%fpcr
+ fadd.x RPRIME(%a6),%fp1 # COS(X)
+ bsr sto_cos # store cosine result
+ fadd.s POSNEG1(%a6),%fp0 # SIN(X)
+ bra t_inx2
+
+NEVEN:
+#--REGISTERS SAVED SO FAR: FP2.
+ fmovm.x &0x04,-(%sp) # save fp2
+
+ fmov.x %fp0,RPRIME(%a6)
+ fmul.x %fp0,%fp0 # FP0 IS S = R*R
+
+ fmov.d COSB8(%pc),%fp1 # B8
+ fmov.d SINA7(%pc),%fp2 # A7
+
+ fmul.x %fp0,%fp1 # SB8
+ fmov.x %fp0,SPRIME(%a6)
+ fmul.x %fp0,%fp2 # SA7
+
+ ror.l &1,%d1
+ and.l &0x80000000,%d1
+
+ fadd.d COSB7(%pc),%fp1 # B7+SB8
+ fadd.d SINA6(%pc),%fp2 # A6+SA7
+
+ eor.l %d1,RPRIME(%a6)
+ eor.l %d1,SPRIME(%a6)
+
+ fmul.x %fp0,%fp1 # S(B7+SB8)
+
+ or.l &0x3F800000,%d1
+ mov.l %d1,POSNEG1(%a6)
+
+ fmul.x %fp0,%fp2 # S(A6+SA7)
+
+ fadd.d COSB6(%pc),%fp1 # B6+S(B7+SB8)
+ fadd.d SINA5(%pc),%fp2 # A5+S(A6+SA7)
+
+ fmul.x %fp0,%fp1 # S(B6+S(B7+SB8))
+ fmul.x %fp0,%fp2 # S(A5+S(A6+SA7))
+
+ fadd.d COSB5(%pc),%fp1 # B5+S(B6+S(B7+SB8))
+ fadd.d SINA4(%pc),%fp2 # A4+S(A5+S(A6+SA7))
+
+ fmul.x %fp0,%fp1 # S(B5+...)
+ fmul.x %fp0,%fp2 # S(A4+...)
+
+ fadd.d COSB4(%pc),%fp1 # B4+S(B5+...)
+ fadd.d SINA3(%pc),%fp2 # A3+S(A4+...)
+
+ fmul.x %fp0,%fp1 # S(B4+...)
+ fmul.x %fp0,%fp2 # S(A3+...)
+
+ fadd.x COSB3(%pc),%fp1 # B3+S(B4+...)
+ fadd.x SINA2(%pc),%fp2 # A2+S(A3+...)
+
+ fmul.x %fp0,%fp1 # S(B3+...)
+ fmul.x %fp0,%fp2 # S(A2+...)
+
+ fadd.x COSB2(%pc),%fp1 # B2+S(B3+...)
+ fadd.x SINA1(%pc),%fp2 # A1+S(A2+...)
+
+ fmul.x %fp0,%fp1 # S(B2+...)
+ fmul.x %fp2,%fp0 # s(a1+...)
+
+
+ fadd.s COSB1(%pc),%fp1 # B1+S(B2...)
+ fmul.x RPRIME(%a6),%fp0 # R'S(A1+...)
+ fmul.x SPRIME(%a6),%fp1 # S'(B1+S(B2+...))
+
+ fmovm.x (%sp)+,&0x20 # restore fp2
+
+ fmov.l %d0,%fpcr
+ fadd.s POSNEG1(%a6),%fp1 # COS(X)
+ bsr sto_cos # store cosine result
+ fadd.x RPRIME(%a6),%fp0 # SIN(X)
+ bra t_inx2
+
+################################################
+
+SCBORS:
+ cmp.l %d1,&0x3FFF8000
+ bgt.w SREDUCEX
+
+################################################
+
+SCSM:
+# mov.w &0x0000,XDCARE(%a6)
+ fmov.s &0x3F800000,%fp1
+
+ fmov.l %d0,%fpcr
+ fsub.s &0x00800000,%fp1
+ bsr sto_cos # store cosine result
+ fmov.l %fpcr,%d0 # d0 must have fpcr,too
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x X(%a6),%fp0
+ bra t_catch
+
+##############################################
+
+ global ssincosd
+#--SIN AND COS OF X FOR DENORMALIZED X
+ssincosd:
+ mov.l %d0,-(%sp) # save d0
+ fmov.s &0x3F800000,%fp1
+ bsr sto_cos # store cosine result
+ mov.l (%sp)+,%d0 # restore d0
+ bra t_extdnrm
+
+############################################
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+SREDUCEX:
+ fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
+ mov.l %d2,-(%sp) # save d2
+ fmov.s &0x00000000,%fp1 # fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration. In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+ cmp.l %d1,&0x7ffeffff # is arg dangerously large?
+ bne.b SLOOP # no
+
+# yes; create 2**16383*PI/2
+ mov.w &0x7ffe,FP_SCR0_EX(%a6)
+ mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+ mov.w &0x7fdc,FP_SCR1_EX(%a6)
+ mov.l &0x85a308d3,FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6)
+
+ ftest.x %fp0 # test sign of argument
+ fblt.w sred_neg
+
+ or.b &0x80,FP_SCR0_EX(%a6) # positive arg
+ or.b &0x80,FP_SCR1_EX(%a6)
+sred_neg:
+ fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
+ fmov.x %fp0,%fp1 # save high result in fp1
+ fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
+ fsub.x %fp0,%fp1 # determine low component of result
+ fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+SLOOP:
+ fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
+ mov.w INARG(%a6),%d1
+ mov.l %d1,%a1 # save a copy of D0
+ and.l &0x00007FFF,%d1
+ sub.l &0x00003FFF,%d1 # d0 = K
+ cmp.l %d1,&28
+ ble.b SLASTLOOP
+SCONTLOOP:
+ sub.l &27,%d1 # d0 = L := K-27
+ mov.b &0,ENDFLAG(%a6)
+ bra.b SWORK
+SLASTLOOP:
+ clr.l %d1 # d0 = L := 0
+ mov.b &1,ENDFLAG(%a6)
+
+SWORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
+#--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+ mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
+ sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
+
+ mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
+ mov.l &0x4E44152A,FP_SCR0_LO(%a6)
+ mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
+
+ fmov.x %fp0,%fp2
+ fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+ mov.l %a1,%d2
+ swap %d2
+ and.l &0x80000000,%d2
+ or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
+ mov.l %d2,TWOTO63(%a6)
+ fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
+ fsub.s TWOTO63(%a6),%fp2 # fp2 = N
+# fint.x %fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+ mov.l %d1,%d2 # d2 = L
+
+ add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
+ mov.w %d2,FP_SCR0_EX(%a6)
+ mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
+
+ add.l &0x00003FDD,%d1
+ mov.w %d1,FP_SCR1_EX(%a6)
+ mov.l &0x85A308D3,FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
+
+ mov.b ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+ fmov.x %fp2,%fp4 # fp4 = N
+ fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
+ fmov.x %fp2,%fp5 # fp5 = N
+ fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
+ fmov.x %fp4,%fp3 # fp3 = W = N*P1
+
+#--we want P+p = W+w but |p| <= half ulp of P
+#--Then, we need to compute A := R-P and a := r-p
+ fadd.x %fp5,%fp3 # fp3 = P
+ fsub.x %fp3,%fp4 # fp4 = W-P
+
+ fsub.x %fp3,%fp0 # fp0 = A := R - P
+ fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
+
+ fmov.x %fp0,%fp3 # fp3 = A
+ fsub.x %fp4,%fp1 # fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+ fadd.x %fp1,%fp0 # fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+ cmp.b %d1,&0
+ bgt.w SRESTORE
+
+#--Need to calculate r
+ fsub.x %fp0,%fp3 # fp3 = A-R
+ fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
+ bra.w SLOOP
+
+SRESTORE:
+ fmov.l %fp2,INT(%a6)
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
+
+ mov.l ADJN(%a6),%d1
+ cmp.l %d1,&4
+
+ blt.w SINCONT
+ bra.w SCCONT
+
+#########################################################################
+# stan(): computes the tangent of a normalized input #
+# stand(): computes the tangent of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = tan(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulp in 64 significant bit, i.e. #
+# within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# 1. If |X| >= 15Pi or |X| < 2**(-40), go to 6. #
+# #
+# 2. Decompose X as X = N(Pi/2) + r where |r| <= Pi/4. Let #
+# k = N mod 2, so in particular, k = 0 or 1. #
+# #
+# 3. If k is odd, go to 5. #
+# #
+# 4. (k is even) Tan(X) = tan(r) and tan(r) is approximated by a #
+# rational function U/V where #
+# U = r + r*s*(P1 + s*(P2 + s*P3)), and #
+# V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r. #
+# Exit. #
+# #
+# 4. (k is odd) Tan(X) = -cot(r). Since tan(r) is approximated by #
+# a rational function U/V where #
+# U = r + r*s*(P1 + s*(P2 + s*P3)), and #
+# V = 1 + s*(Q1 + s*(Q2 + s*(Q3 + s*Q4))), s = r*r, #
+# -Cot(r) = -V/U. Exit. #
+# #
+# 6. If |X| > 1, go to 8. #
+# #
+# 7. (|X|<2**(-40)) Tan(X) = X. Exit. #
+# #
+# 8. Overwrite X by X := X rem 2Pi. Now that |X| <= Pi, go back #
+# to 2. #
+# #
+#########################################################################
+
+TANQ4:
+ long 0x3EA0B759,0xF50F8688
+TANP3:
+ long 0xBEF2BAA5,0xA8924F04
+
+TANQ3:
+ long 0xBF346F59,0xB39BA65F,0x00000000,0x00000000
+
+TANP2:
+ long 0x3FF60000,0xE073D3FC,0x199C4A00,0x00000000
+
+TANQ2:
+ long 0x3FF90000,0xD23CD684,0x15D95FA1,0x00000000
+
+TANP1:
+ long 0xBFFC0000,0x8895A6C5,0xFB423BCA,0x00000000
+
+TANQ1:
+ long 0xBFFD0000,0xEEF57E0D,0xA84BC8CE,0x00000000
+
+INVTWOPI:
+ long 0x3FFC0000,0xA2F9836E,0x4E44152A,0x00000000
+
+TWOPI1:
+ long 0x40010000,0xC90FDAA2,0x00000000,0x00000000
+TWOPI2:
+ long 0x3FDF0000,0x85A308D4,0x00000000,0x00000000
+
+#--N*PI/2, -32 <= N <= 32, IN A LEADING TERM IN EXT. AND TRAILING
+#--TERM IN SGL. NOTE THAT PI IS 64-BIT LONG, THUS N*PI/2 IS AT
+#--MOST 69 BITS LONG.
+# global PITBL
+PITBL:
+ long 0xC0040000,0xC90FDAA2,0x2168C235,0x21800000
+ long 0xC0040000,0xC2C75BCD,0x105D7C23,0xA0D00000
+ long 0xC0040000,0xBC7EDCF7,0xFF523611,0xA1E80000
+ long 0xC0040000,0xB6365E22,0xEE46F000,0x21480000
+ long 0xC0040000,0xAFEDDF4D,0xDD3BA9EE,0xA1200000
+ long 0xC0040000,0xA9A56078,0xCC3063DD,0x21FC0000
+ long 0xC0040000,0xA35CE1A3,0xBB251DCB,0x21100000
+ long 0xC0040000,0x9D1462CE,0xAA19D7B9,0xA1580000
+ long 0xC0040000,0x96CBE3F9,0x990E91A8,0x21E00000
+ long 0xC0040000,0x90836524,0x88034B96,0x20B00000
+ long 0xC0040000,0x8A3AE64F,0x76F80584,0xA1880000
+ long 0xC0040000,0x83F2677A,0x65ECBF73,0x21C40000
+ long 0xC0030000,0xFB53D14A,0xA9C2F2C2,0x20000000
+ long 0xC0030000,0xEEC2D3A0,0x87AC669F,0x21380000
+ long 0xC0030000,0xE231D5F6,0x6595DA7B,0xA1300000
+ long 0xC0030000,0xD5A0D84C,0x437F4E58,0x9FC00000
+ long 0xC0030000,0xC90FDAA2,0x2168C235,0x21000000
+ long 0xC0030000,0xBC7EDCF7,0xFF523611,0xA1680000
+ long 0xC0030000,0xAFEDDF4D,0xDD3BA9EE,0xA0A00000
+ long 0xC0030000,0xA35CE1A3,0xBB251DCB,0x20900000
+ long 0xC0030000,0x96CBE3F9,0x990E91A8,0x21600000
+ long 0xC0030000,0x8A3AE64F,0x76F80584,0xA1080000
+ long 0xC0020000,0xFB53D14A,0xA9C2F2C2,0x1F800000
+ long 0xC0020000,0xE231D5F6,0x6595DA7B,0xA0B00000
+ long 0xC0020000,0xC90FDAA2,0x2168C235,0x20800000
+ long 0xC0020000,0xAFEDDF4D,0xDD3BA9EE,0xA0200000
+ long 0xC0020000,0x96CBE3F9,0x990E91A8,0x20E00000
+ long 0xC0010000,0xFB53D14A,0xA9C2F2C2,0x1F000000
+ long 0xC0010000,0xC90FDAA2,0x2168C235,0x20000000
+ long 0xC0010000,0x96CBE3F9,0x990E91A8,0x20600000
+ long 0xC0000000,0xC90FDAA2,0x2168C235,0x1F800000
+ long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x1F000000
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+ long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x9F000000
+ long 0x40000000,0xC90FDAA2,0x2168C235,0x9F800000
+ long 0x40010000,0x96CBE3F9,0x990E91A8,0xA0600000
+ long 0x40010000,0xC90FDAA2,0x2168C235,0xA0000000
+ long 0x40010000,0xFB53D14A,0xA9C2F2C2,0x9F000000
+ long 0x40020000,0x96CBE3F9,0x990E91A8,0xA0E00000
+ long 0x40020000,0xAFEDDF4D,0xDD3BA9EE,0x20200000
+ long 0x40020000,0xC90FDAA2,0x2168C235,0xA0800000
+ long 0x40020000,0xE231D5F6,0x6595DA7B,0x20B00000
+ long 0x40020000,0xFB53D14A,0xA9C2F2C2,0x9F800000
+ long 0x40030000,0x8A3AE64F,0x76F80584,0x21080000
+ long 0x40030000,0x96CBE3F9,0x990E91A8,0xA1600000
+ long 0x40030000,0xA35CE1A3,0xBB251DCB,0xA0900000
+ long 0x40030000,0xAFEDDF4D,0xDD3BA9EE,0x20A00000
+ long 0x40030000,0xBC7EDCF7,0xFF523611,0x21680000
+ long 0x40030000,0xC90FDAA2,0x2168C235,0xA1000000
+ long 0x40030000,0xD5A0D84C,0x437F4E58,0x1FC00000
+ long 0x40030000,0xE231D5F6,0x6595DA7B,0x21300000
+ long 0x40030000,0xEEC2D3A0,0x87AC669F,0xA1380000
+ long 0x40030000,0xFB53D14A,0xA9C2F2C2,0xA0000000
+ long 0x40040000,0x83F2677A,0x65ECBF73,0xA1C40000
+ long 0x40040000,0x8A3AE64F,0x76F80584,0x21880000
+ long 0x40040000,0x90836524,0x88034B96,0xA0B00000
+ long 0x40040000,0x96CBE3F9,0x990E91A8,0xA1E00000
+ long 0x40040000,0x9D1462CE,0xAA19D7B9,0x21580000
+ long 0x40040000,0xA35CE1A3,0xBB251DCB,0xA1100000
+ long 0x40040000,0xA9A56078,0xCC3063DD,0xA1FC0000
+ long 0x40040000,0xAFEDDF4D,0xDD3BA9EE,0x21200000
+ long 0x40040000,0xB6365E22,0xEE46F000,0xA1480000
+ long 0x40040000,0xBC7EDCF7,0xFF523611,0x21E80000
+ long 0x40040000,0xC2C75BCD,0x105D7C23,0x20D00000
+ long 0x40040000,0xC90FDAA2,0x2168C235,0xA1800000
+
+ set INARG,FP_SCR0
+
+ set TWOTO63,L_SCR1
+ set INT,L_SCR1
+ set ENDFLAG,L_SCR2
+
+ global stan
+stan:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+
+ cmp.l %d1,&0x3FD78000 # |X| >= 2**(-40)?
+ bge.b TANOK1
+ bra.w TANSM
+TANOK1:
+ cmp.l %d1,&0x4004BC7E # |X| < 15 PI?
+ blt.b TANMAIN
+ bra.w REDUCEX
+
+TANMAIN:
+#--THIS IS THE USUAL CASE, |X| <= 15 PI.
+#--THE ARGUMENT REDUCTION IS DONE BY TABLE LOOK UP.
+ fmov.x %fp0,%fp1
+ fmul.d TWOBYPI(%pc),%fp1 # X*2/PI
+
+ lea.l PITBL+0x200(%pc),%a1 # TABLE OF N*PI/2, N = -32,...,32
+
+ fmov.l %fp1,%d1 # CONVERT TO INTEGER
+
+ asl.l &4,%d1
+ add.l %d1,%a1 # ADDRESS N*PIBY2 IN Y1, Y2
+
+ fsub.x (%a1)+,%fp0 # X-Y1
+
+ fsub.s (%a1),%fp0 # FP0 IS R = (X-Y1)-Y2
+
+ ror.l &5,%d1
+ and.l &0x80000000,%d1 # D0 WAS ODD IFF D0 < 0
+
+TANCONT:
+ fmovm.x &0x0c,-(%sp) # save fp2,fp3
+
+ cmp.l %d1,&0
+ blt.w NODD
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # S = R*R
+
+ fmov.d TANQ4(%pc),%fp3
+ fmov.d TANP3(%pc),%fp2
+
+ fmul.x %fp1,%fp3 # SQ4
+ fmul.x %fp1,%fp2 # SP3
+
+ fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
+ fadd.x TANP2(%pc),%fp2 # P2+SP3
+
+ fmul.x %fp1,%fp3 # S(Q3+SQ4)
+ fmul.x %fp1,%fp2 # S(P2+SP3)
+
+ fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
+ fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
+
+ fmul.x %fp1,%fp3 # S(Q2+S(Q3+SQ4))
+ fmul.x %fp1,%fp2 # S(P1+S(P2+SP3))
+
+ fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
+ fmul.x %fp0,%fp2 # RS(P1+S(P2+SP3))
+
+ fmul.x %fp3,%fp1 # S(Q1+S(Q2+S(Q3+SQ4)))
+
+ fadd.x %fp2,%fp0 # R+RS(P1+S(P2+SP3))
+
+ fadd.s &0x3F800000,%fp1 # 1+S(Q1+...)
+
+ fmovm.x (%sp)+,&0x30 # restore fp2,fp3
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fdiv.x %fp1,%fp0 # last inst - possible exception set
+ bra t_inx2
+
+NODD:
+ fmov.x %fp0,%fp1
+ fmul.x %fp0,%fp0 # S = R*R
+
+ fmov.d TANQ4(%pc),%fp3
+ fmov.d TANP3(%pc),%fp2
+
+ fmul.x %fp0,%fp3 # SQ4
+ fmul.x %fp0,%fp2 # SP3
+
+ fadd.d TANQ3(%pc),%fp3 # Q3+SQ4
+ fadd.x TANP2(%pc),%fp2 # P2+SP3
+
+ fmul.x %fp0,%fp3 # S(Q3+SQ4)
+ fmul.x %fp0,%fp2 # S(P2+SP3)
+
+ fadd.x TANQ2(%pc),%fp3 # Q2+S(Q3+SQ4)
+ fadd.x TANP1(%pc),%fp2 # P1+S(P2+SP3)
+
+ fmul.x %fp0,%fp3 # S(Q2+S(Q3+SQ4))
+ fmul.x %fp0,%fp2 # S(P1+S(P2+SP3))
+
+ fadd.x TANQ1(%pc),%fp3 # Q1+S(Q2+S(Q3+SQ4))
+ fmul.x %fp1,%fp2 # RS(P1+S(P2+SP3))
+
+ fmul.x %fp3,%fp0 # S(Q1+S(Q2+S(Q3+SQ4)))
+
+ fadd.x %fp2,%fp1 # R+RS(P1+S(P2+SP3))
+ fadd.s &0x3F800000,%fp0 # 1+S(Q1+...)
+
+ fmovm.x (%sp)+,&0x30 # restore fp2,fp3
+
+ fmov.x %fp1,-(%sp)
+ eor.l &0x80000000,(%sp)
+
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ fdiv.x (%sp)+,%fp0 # last inst - possible exception set
+ bra t_inx2
+
+TANBORS:
+#--IF |X| > 15PI, WE USE THE GENERAL ARGUMENT REDUCTION.
+#--IF |X| < 2**(-40), RETURN X OR 1.
+ cmp.l %d1,&0x3FFF8000
+ bgt.b REDUCEX
+
+TANSM:
+ fmov.x %fp0,-(%sp)
+ fmov.l %d0,%fpcr # restore users round mode,prec
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x (%sp)+,%fp0 # last inst - posibble exception set
+ bra t_catch
+
+ global stand
+#--TAN(X) = X FOR DENORMALIZED X
+stand:
+ bra t_extdnrm
+
+#--WHEN REDUCEX IS USED, THE CODE WILL INEVITABLY BE SLOW.
+#--THIS REDUCTION METHOD, HOWEVER, IS MUCH FASTER THAN USING
+#--THE REMAINDER INSTRUCTION WHICH IS NOW IN SOFTWARE.
+REDUCEX:
+ fmovm.x &0x3c,-(%sp) # save {fp2-fp5}
+ mov.l %d2,-(%sp) # save d2
+ fmov.s &0x00000000,%fp1 # fp1 = 0
+
+#--If compact form of abs(arg) in d0=$7ffeffff, argument is so large that
+#--there is a danger of unwanted overflow in first LOOP iteration. In this
+#--case, reduce argument by one remainder step to make subsequent reduction
+#--safe.
+ cmp.l %d1,&0x7ffeffff # is arg dangerously large?
+ bne.b LOOP # no
+
+# yes; create 2**16383*PI/2
+ mov.w &0x7ffe,FP_SCR0_EX(%a6)
+ mov.l &0xc90fdaa2,FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6)
+
+# create low half of 2**16383*PI/2 at FP_SCR1
+ mov.w &0x7fdc,FP_SCR1_EX(%a6)
+ mov.l &0x85a308d3,FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6)
+
+ ftest.x %fp0 # test sign of argument
+ fblt.w red_neg
+
+ or.b &0x80,FP_SCR0_EX(%a6) # positive arg
+ or.b &0x80,FP_SCR1_EX(%a6)
+red_neg:
+ fadd.x FP_SCR0(%a6),%fp0 # high part of reduction is exact
+ fmov.x %fp0,%fp1 # save high result in fp1
+ fadd.x FP_SCR1(%a6),%fp0 # low part of reduction
+ fsub.x %fp0,%fp1 # determine low component of result
+ fadd.x FP_SCR1(%a6),%fp1 # fp0/fp1 are reduced argument.
+
+#--ON ENTRY, FP0 IS X, ON RETURN, FP0 IS X REM PI/2, |X| <= PI/4.
+#--integer quotient will be stored in N
+#--Intermeditate remainder is 66-bit long; (R,r) in (FP0,FP1)
+LOOP:
+ fmov.x %fp0,INARG(%a6) # +-2**K * F, 1 <= F < 2
+ mov.w INARG(%a6),%d1
+ mov.l %d1,%a1 # save a copy of D0
+ and.l &0x00007FFF,%d1
+ sub.l &0x00003FFF,%d1 # d0 = K
+ cmp.l %d1,&28
+ ble.b LASTLOOP
+CONTLOOP:
+ sub.l &27,%d1 # d0 = L := K-27
+ mov.b &0,ENDFLAG(%a6)
+ bra.b WORK
+LASTLOOP:
+ clr.l %d1 # d0 = L := 0
+ mov.b &1,ENDFLAG(%a6)
+
+WORK:
+#--FIND THE REMAINDER OF (R,r) W.R.T. 2**L * (PI/2). L IS SO CHOSEN
+#--THAT INT( X * (2/PI) / 2**(L) ) < 2**29.
+
+#--CREATE 2**(-L) * (2/PI), SIGN(INARG)*2**(63),
+#--2**L * (PIby2_1), 2**L * (PIby2_2)
+
+ mov.l &0x00003FFE,%d2 # BIASED EXP OF 2/PI
+ sub.l %d1,%d2 # BIASED EXP OF 2**(-L)*(2/PI)
+
+ mov.l &0xA2F9836E,FP_SCR0_HI(%a6)
+ mov.l &0x4E44152A,FP_SCR0_LO(%a6)
+ mov.w %d2,FP_SCR0_EX(%a6) # FP_SCR0 = 2**(-L)*(2/PI)
+
+ fmov.x %fp0,%fp2
+ fmul.x FP_SCR0(%a6),%fp2 # fp2 = X * 2**(-L)*(2/PI)
+
+#--WE MUST NOW FIND INT(FP2). SINCE WE NEED THIS VALUE IN
+#--FLOATING POINT FORMAT, THE TWO FMOVE'S FMOVE.L FP <--> N
+#--WILL BE TOO INEFFICIENT. THE WAY AROUND IT IS THAT
+#--(SIGN(INARG)*2**63 + FP2) - SIGN(INARG)*2**63 WILL GIVE
+#--US THE DESIRED VALUE IN FLOATING POINT.
+ mov.l %a1,%d2
+ swap %d2
+ and.l &0x80000000,%d2
+ or.l &0x5F000000,%d2 # d2 = SIGN(INARG)*2**63 IN SGL
+ mov.l %d2,TWOTO63(%a6)
+ fadd.s TWOTO63(%a6),%fp2 # THE FRACTIONAL PART OF FP1 IS ROUNDED
+ fsub.s TWOTO63(%a6),%fp2 # fp2 = N
+# fintrz.x %fp2,%fp2
+
+#--CREATING 2**(L)*Piby2_1 and 2**(L)*Piby2_2
+ mov.l %d1,%d2 # d2 = L
+
+ add.l &0x00003FFF,%d2 # BIASED EXP OF 2**L * (PI/2)
+ mov.w %d2,FP_SCR0_EX(%a6)
+ mov.l &0xC90FDAA2,FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6) # FP_SCR0 = 2**(L) * Piby2_1
+
+ add.l &0x00003FDD,%d1
+ mov.w %d1,FP_SCR1_EX(%a6)
+ mov.l &0x85A308D3,FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6) # FP_SCR1 = 2**(L) * Piby2_2
+
+ mov.b ENDFLAG(%a6),%d1
+
+#--We are now ready to perform (R+r) - N*P1 - N*P2, P1 = 2**(L) * Piby2_1 and
+#--P2 = 2**(L) * Piby2_2
+ fmov.x %fp2,%fp4 # fp4 = N
+ fmul.x FP_SCR0(%a6),%fp4 # fp4 = W = N*P1
+ fmov.x %fp2,%fp5 # fp5 = N
+ fmul.x FP_SCR1(%a6),%fp5 # fp5 = w = N*P2
+ fmov.x %fp4,%fp3 # fp3 = W = N*P1
+
+#--we want P+p = W+w but |p| <= half ulp of P
+#--Then, we need to compute A := R-P and a := r-p
+ fadd.x %fp5,%fp3 # fp3 = P
+ fsub.x %fp3,%fp4 # fp4 = W-P
+
+ fsub.x %fp3,%fp0 # fp0 = A := R - P
+ fadd.x %fp5,%fp4 # fp4 = p = (W-P)+w
+
+ fmov.x %fp0,%fp3 # fp3 = A
+ fsub.x %fp4,%fp1 # fp1 = a := r - p
+
+#--Now we need to normalize (A,a) to "new (R,r)" where R+r = A+a but
+#--|r| <= half ulp of R.
+ fadd.x %fp1,%fp0 # fp0 = R := A+a
+#--No need to calculate r if this is the last loop
+ cmp.b %d1,&0
+ bgt.w RESTORE
+
+#--Need to calculate r
+ fsub.x %fp0,%fp3 # fp3 = A-R
+ fadd.x %fp3,%fp1 # fp1 = r := (A-R)+a
+ bra.w LOOP
+
+RESTORE:
+ fmov.l %fp2,INT(%a6)
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x (%sp)+,&0x3c # restore {fp2-fp5}
+
+ mov.l INT(%a6),%d1
+ ror.l &1,%d1
+
+ bra.w TANCONT
+
+#########################################################################
+# satan(): computes the arctangent of a normalized number #
+# satand(): computes the arctangent of a denormalized number #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = arctan(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 2 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# Step 1. If |X| >= 16 or |X| < 1/16, go to Step 5. #
+# #
+# Step 2. Let X = sgn * 2**k * 1.xxxxxxxx...x. #
+# Note that k = -4, -3,..., or 3. #
+# Define F = sgn * 2**k * 1.xxxx1, i.e. the first 5 #
+# significant bits of X with a bit-1 attached at the 6-th #
+# bit position. Define u to be u = (X-F) / (1 + X*F). #
+# #
+# Step 3. Approximate arctan(u) by a polynomial poly. #
+# #
+# Step 4. Return arctan(F) + poly, arctan(F) is fetched from a #
+# table of values calculated beforehand. Exit. #
+# #
+# Step 5. If |X| >= 16, go to Step 7. #
+# #
+# Step 6. Approximate arctan(X) by an odd polynomial in X. Exit. #
+# #
+# Step 7. Define X' = -1/X. Approximate arctan(X') by an odd #
+# polynomial in X'. #
+# Arctan(X) = sign(X)*Pi/2 + arctan(X'). Exit. #
+# #
+#########################################################################
+
+ATANA3: long 0xBFF6687E,0x314987D8
+ATANA2: long 0x4002AC69,0x34A26DB3
+ATANA1: long 0xBFC2476F,0x4E1DA28E
+
+ATANB6: long 0x3FB34444,0x7F876989
+ATANB5: long 0xBFB744EE,0x7FAF45DB
+ATANB4: long 0x3FBC71C6,0x46940220
+ATANB3: long 0xBFC24924,0x921872F9
+ATANB2: long 0x3FC99999,0x99998FA9
+ATANB1: long 0xBFD55555,0x55555555
+
+ATANC5: long 0xBFB70BF3,0x98539E6A
+ATANC4: long 0x3FBC7187,0x962D1D7D
+ATANC3: long 0xBFC24924,0x827107B8
+ATANC2: long 0x3FC99999,0x9996263E
+ATANC1: long 0xBFD55555,0x55555536
+
+PPIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+NPIBY2: long 0xBFFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+PTINY: long 0x00010000,0x80000000,0x00000000,0x00000000
+NTINY: long 0x80010000,0x80000000,0x00000000,0x00000000
+
+ATANTBL:
+ long 0x3FFB0000,0x83D152C5,0x060B7A51,0x00000000
+ long 0x3FFB0000,0x8BC85445,0x65498B8B,0x00000000
+ long 0x3FFB0000,0x93BE4060,0x17626B0D,0x00000000
+ long 0x3FFB0000,0x9BB3078D,0x35AEC202,0x00000000
+ long 0x3FFB0000,0xA3A69A52,0x5DDCE7DE,0x00000000
+ long 0x3FFB0000,0xAB98E943,0x62765619,0x00000000
+ long 0x3FFB0000,0xB389E502,0xF9C59862,0x00000000
+ long 0x3FFB0000,0xBB797E43,0x6B09E6FB,0x00000000
+ long 0x3FFB0000,0xC367A5C7,0x39E5F446,0x00000000
+ long 0x3FFB0000,0xCB544C61,0xCFF7D5C6,0x00000000
+ long 0x3FFB0000,0xD33F62F8,0x2488533E,0x00000000
+ long 0x3FFB0000,0xDB28DA81,0x62404C77,0x00000000
+ long 0x3FFB0000,0xE310A407,0x8AD34F18,0x00000000
+ long 0x3FFB0000,0xEAF6B0A8,0x188EE1EB,0x00000000
+ long 0x3FFB0000,0xF2DAF194,0x9DBE79D5,0x00000000
+ long 0x3FFB0000,0xFABD5813,0x61D47E3E,0x00000000
+ long 0x3FFC0000,0x8346AC21,0x0959ECC4,0x00000000
+ long 0x3FFC0000,0x8B232A08,0x304282D8,0x00000000
+ long 0x3FFC0000,0x92FB70B8,0xD29AE2F9,0x00000000
+ long 0x3FFC0000,0x9ACF476F,0x5CCD1CB4,0x00000000
+ long 0x3FFC0000,0xA29E7630,0x4954F23F,0x00000000
+ long 0x3FFC0000,0xAA68C5D0,0x8AB85230,0x00000000
+ long 0x3FFC0000,0xB22DFFFD,0x9D539F83,0x00000000
+ long 0x3FFC0000,0xB9EDEF45,0x3E900EA5,0x00000000
+ long 0x3FFC0000,0xC1A85F1C,0xC75E3EA5,0x00000000
+ long 0x3FFC0000,0xC95D1BE8,0x28138DE6,0x00000000
+ long 0x3FFC0000,0xD10BF300,0x840D2DE4,0x00000000
+ long 0x3FFC0000,0xD8B4B2BA,0x6BC05E7A,0x00000000
+ long 0x3FFC0000,0xE0572A6B,0xB42335F6,0x00000000
+ long 0x3FFC0000,0xE7F32A70,0xEA9CAA8F,0x00000000
+ long 0x3FFC0000,0xEF888432,0x64ECEFAA,0x00000000
+ long 0x3FFC0000,0xF7170A28,0xECC06666,0x00000000
+ long 0x3FFD0000,0x812FD288,0x332DAD32,0x00000000
+ long 0x3FFD0000,0x88A8D1B1,0x218E4D64,0x00000000
+ long 0x3FFD0000,0x9012AB3F,0x23E4AEE8,0x00000000
+ long 0x3FFD0000,0x976CC3D4,0x11E7F1B9,0x00000000
+ long 0x3FFD0000,0x9EB68949,0x3889A227,0x00000000
+ long 0x3FFD0000,0xA5EF72C3,0x4487361B,0x00000000
+ long 0x3FFD0000,0xAD1700BA,0xF07A7227,0x00000000
+ long 0x3FFD0000,0xB42CBCFA,0xFD37EFB7,0x00000000
+ long 0x3FFD0000,0xBB303A94,0x0BA80F89,0x00000000
+ long 0x3FFD0000,0xC22115C6,0xFCAEBBAF,0x00000000
+ long 0x3FFD0000,0xC8FEF3E6,0x86331221,0x00000000
+ long 0x3FFD0000,0xCFC98330,0xB4000C70,0x00000000
+ long 0x3FFD0000,0xD6807AA1,0x102C5BF9,0x00000000
+ long 0x3FFD0000,0xDD2399BC,0x31252AA3,0x00000000
+ long 0x3FFD0000,0xE3B2A855,0x6B8FC517,0x00000000
+ long 0x3FFD0000,0xEA2D764F,0x64315989,0x00000000
+ long 0x3FFD0000,0xF3BF5BF8,0xBAD1A21D,0x00000000
+ long 0x3FFE0000,0x801CE39E,0x0D205C9A,0x00000000
+ long 0x3FFE0000,0x8630A2DA,0xDA1ED066,0x00000000
+ long 0x3FFE0000,0x8C1AD445,0xF3E09B8C,0x00000000
+ long 0x3FFE0000,0x91DB8F16,0x64F350E2,0x00000000
+ long 0x3FFE0000,0x97731420,0x365E538C,0x00000000
+ long 0x3FFE0000,0x9CE1C8E6,0xA0B8CDBA,0x00000000
+ long 0x3FFE0000,0xA22832DB,0xCADAAE09,0x00000000
+ long 0x3FFE0000,0xA746F2DD,0xB7602294,0x00000000
+ long 0x3FFE0000,0xAC3EC0FB,0x997DD6A2,0x00000000
+ long 0x3FFE0000,0xB110688A,0xEBDC6F6A,0x00000000
+ long 0x3FFE0000,0xB5BCC490,0x59ECC4B0,0x00000000
+ long 0x3FFE0000,0xBA44BC7D,0xD470782F,0x00000000
+ long 0x3FFE0000,0xBEA94144,0xFD049AAC,0x00000000
+ long 0x3FFE0000,0xC2EB4ABB,0x661628B6,0x00000000
+ long 0x3FFE0000,0xC70BD54C,0xE602EE14,0x00000000
+ long 0x3FFE0000,0xCD000549,0xADEC7159,0x00000000
+ long 0x3FFE0000,0xD48457D2,0xD8EA4EA3,0x00000000
+ long 0x3FFE0000,0xDB948DA7,0x12DECE3B,0x00000000
+ long 0x3FFE0000,0xE23855F9,0x69E8096A,0x00000000
+ long 0x3FFE0000,0xE8771129,0xC4353259,0x00000000
+ long 0x3FFE0000,0xEE57C16E,0x0D379C0D,0x00000000
+ long 0x3FFE0000,0xF3E10211,0xA87C3779,0x00000000
+ long 0x3FFE0000,0xF919039D,0x758B8D41,0x00000000
+ long 0x3FFE0000,0xFE058B8F,0x64935FB3,0x00000000
+ long 0x3FFF0000,0x8155FB49,0x7B685D04,0x00000000
+ long 0x3FFF0000,0x83889E35,0x49D108E1,0x00000000
+ long 0x3FFF0000,0x859CFA76,0x511D724B,0x00000000
+ long 0x3FFF0000,0x87952ECF,0xFF8131E7,0x00000000
+ long 0x3FFF0000,0x89732FD1,0x9557641B,0x00000000
+ long 0x3FFF0000,0x8B38CAD1,0x01932A35,0x00000000
+ long 0x3FFF0000,0x8CE7A8D8,0x301EE6B5,0x00000000
+ long 0x3FFF0000,0x8F46A39E,0x2EAE5281,0x00000000
+ long 0x3FFF0000,0x922DA7D7,0x91888487,0x00000000
+ long 0x3FFF0000,0x94D19FCB,0xDEDF5241,0x00000000
+ long 0x3FFF0000,0x973AB944,0x19D2A08B,0x00000000
+ long 0x3FFF0000,0x996FF00E,0x08E10B96,0x00000000
+ long 0x3FFF0000,0x9B773F95,0x12321DA7,0x00000000
+ long 0x3FFF0000,0x9D55CC32,0x0F935624,0x00000000
+ long 0x3FFF0000,0x9F100575,0x006CC571,0x00000000
+ long 0x3FFF0000,0xA0A9C290,0xD97CC06C,0x00000000
+ long 0x3FFF0000,0xA22659EB,0xEBC0630A,0x00000000
+ long 0x3FFF0000,0xA388B4AF,0xF6EF0EC9,0x00000000
+ long 0x3FFF0000,0xA4D35F10,0x61D292C4,0x00000000
+ long 0x3FFF0000,0xA60895DC,0xFBE3187E,0x00000000
+ long 0x3FFF0000,0xA72A51DC,0x7367BEAC,0x00000000
+ long 0x3FFF0000,0xA83A5153,0x0956168F,0x00000000
+ long 0x3FFF0000,0xA93A2007,0x7539546E,0x00000000
+ long 0x3FFF0000,0xAA9E7245,0x023B2605,0x00000000
+ long 0x3FFF0000,0xAC4C84BA,0x6FE4D58F,0x00000000
+ long 0x3FFF0000,0xADCE4A4A,0x606B9712,0x00000000
+ long 0x3FFF0000,0xAF2A2DCD,0x8D263C9C,0x00000000
+ long 0x3FFF0000,0xB0656F81,0xF22265C7,0x00000000
+ long 0x3FFF0000,0xB1846515,0x0F71496A,0x00000000
+ long 0x3FFF0000,0xB28AAA15,0x6F9ADA35,0x00000000
+ long 0x3FFF0000,0xB37B44FF,0x3766B895,0x00000000
+ long 0x3FFF0000,0xB458C3DC,0xE9630433,0x00000000
+ long 0x3FFF0000,0xB525529D,0x562246BD,0x00000000
+ long 0x3FFF0000,0xB5E2CCA9,0x5F9D88CC,0x00000000
+ long 0x3FFF0000,0xB692CADA,0x7ACA1ADA,0x00000000
+ long 0x3FFF0000,0xB736AEA7,0xA6925838,0x00000000
+ long 0x3FFF0000,0xB7CFAB28,0x7E9F7B36,0x00000000
+ long 0x3FFF0000,0xB85ECC66,0xCB219835,0x00000000
+ long 0x3FFF0000,0xB8E4FD5A,0x20A593DA,0x00000000
+ long 0x3FFF0000,0xB99F41F6,0x4AFF9BB5,0x00000000
+ long 0x3FFF0000,0xBA7F1E17,0x842BBE7B,0x00000000
+ long 0x3FFF0000,0xBB471285,0x7637E17D,0x00000000
+ long 0x3FFF0000,0xBBFABE8A,0x4788DF6F,0x00000000
+ long 0x3FFF0000,0xBC9D0FAD,0x2B689D79,0x00000000
+ long 0x3FFF0000,0xBD306A39,0x471ECD86,0x00000000
+ long 0x3FFF0000,0xBDB6C731,0x856AF18A,0x00000000
+ long 0x3FFF0000,0xBE31CAC5,0x02E80D70,0x00000000
+ long 0x3FFF0000,0xBEA2D55C,0xE33194E2,0x00000000
+ long 0x3FFF0000,0xBF0B10B7,0xC03128F0,0x00000000
+ long 0x3FFF0000,0xBF6B7A18,0xDACB778D,0x00000000
+ long 0x3FFF0000,0xBFC4EA46,0x63FA18F6,0x00000000
+ long 0x3FFF0000,0xC0181BDE,0x8B89A454,0x00000000
+ long 0x3FFF0000,0xC065B066,0xCFBF6439,0x00000000
+ long 0x3FFF0000,0xC0AE345F,0x56340AE6,0x00000000
+ long 0x3FFF0000,0xC0F22291,0x9CB9E6A7,0x00000000
+
+ set X,FP_SCR0
+ set XDCARE,X+2
+ set XFRAC,X+4
+ set XFRACLO,X+8
+
+ set ATANF,FP_SCR1
+ set ATANFHI,ATANF+4
+ set ATANFLO,ATANF+8
+
+ global satan
+#--ENTRY POINT FOR ATAN(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+satan:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ fmov.x %fp0,X(%a6)
+ and.l &0x7FFFFFFF,%d1
+
+ cmp.l %d1,&0x3FFB8000 # |X| >= 1/16?
+ bge.b ATANOK1
+ bra.w ATANSM
+
+ATANOK1:
+ cmp.l %d1,&0x4002FFFF # |X| < 16 ?
+ ble.b ATANMAIN
+ bra.w ATANBIG
+
+#--THE MOST LIKELY CASE, |X| IN [1/16, 16). WE USE TABLE TECHNIQUE
+#--THE IDEA IS ATAN(X) = ATAN(F) + ATAN( [X-F] / [1+XF] ).
+#--SO IF F IS CHOSEN TO BE CLOSE TO X AND ATAN(F) IS STORED IN
+#--A TABLE, ALL WE NEED IS TO APPROXIMATE ATAN(U) WHERE
+#--U = (X-F)/(1+XF) IS SMALL (REMEMBER F IS CLOSE TO X). IT IS
+#--TRUE THAT A DIVIDE IS NOW NEEDED, BUT THE APPROXIMATION FOR
+#--ATAN(U) IS A VERY SHORT POLYNOMIAL AND THE INDEXING TO
+#--FETCH F AND SAVING OF REGISTERS CAN BE ALL HIDED UNDER THE
+#--DIVIDE. IN THE END THIS METHOD IS MUCH FASTER THAN A TRADITIONAL
+#--ONE. NOTE ALSO THAT THE TRADITIONAL SCHEME THAT APPROXIMATE
+#--ATAN(X) DIRECTLY WILL NEED TO USE A RATIONAL APPROXIMATION
+#--(DIVISION NEEDED) ANYWAY BECAUSE A POLYNOMIAL APPROXIMATION
+#--WILL INVOLVE A VERY LONG POLYNOMIAL.
+
+#--NOW WE SEE X AS +-2^K * 1.BBBBBBB....B <- 1. + 63 BITS
+#--WE CHOSE F TO BE +-2^K * 1.BBBB1
+#--THAT IS IT MATCHES THE EXPONENT AND FIRST 5 BITS OF X, THE
+#--SIXTH BITS IS SET TO BE 1. SINCE K = -4, -3, ..., 3, THERE
+#--ARE ONLY 8 TIMES 16 = 2^7 = 128 |F|'S. SINCE ATAN(-|F|) IS
+#-- -ATAN(|F|), WE NEED TO STORE ONLY ATAN(|F|).
+
+ATANMAIN:
+
+ and.l &0xF8000000,XFRAC(%a6) # FIRST 5 BITS
+ or.l &0x04000000,XFRAC(%a6) # SET 6-TH BIT TO 1
+ mov.l &0x00000000,XFRACLO(%a6) # LOCATION OF X IS NOW F
+
+ fmov.x %fp0,%fp1 # FP1 IS X
+ fmul.x X(%a6),%fp1 # FP1 IS X*F, NOTE THAT X*F > 0
+ fsub.x X(%a6),%fp0 # FP0 IS X-F
+ fadd.s &0x3F800000,%fp1 # FP1 IS 1 + X*F
+ fdiv.x %fp1,%fp0 # FP0 IS U = (X-F)/(1+X*F)
+
+#--WHILE THE DIVISION IS TAKING ITS TIME, WE FETCH ATAN(|F|)
+#--CREATE ATAN(F) AND STORE IT IN ATANF, AND
+#--SAVE REGISTERS FP2.
+
+ mov.l %d2,-(%sp) # SAVE d2 TEMPORARILY
+ mov.l %d1,%d2 # THE EXP AND 16 BITS OF X
+ and.l &0x00007800,%d1 # 4 VARYING BITS OF F'S FRACTION
+ and.l &0x7FFF0000,%d2 # EXPONENT OF F
+ sub.l &0x3FFB0000,%d2 # K+4
+ asr.l &1,%d2
+ add.l %d2,%d1 # THE 7 BITS IDENTIFYING F
+ asr.l &7,%d1 # INDEX INTO TBL OF ATAN(|F|)
+ lea ATANTBL(%pc),%a1
+ add.l %d1,%a1 # ADDRESS OF ATAN(|F|)
+ mov.l (%a1)+,ATANF(%a6)
+ mov.l (%a1)+,ATANFHI(%a6)
+ mov.l (%a1)+,ATANFLO(%a6) # ATANF IS NOW ATAN(|F|)
+ mov.l X(%a6),%d1 # LOAD SIGN AND EXPO. AGAIN
+ and.l &0x80000000,%d1 # SIGN(F)
+ or.l %d1,ATANF(%a6) # ATANF IS NOW SIGN(F)*ATAN(|F|)
+ mov.l (%sp)+,%d2 # RESTORE d2
+
+#--THAT'S ALL I HAVE TO DO FOR NOW,
+#--BUT ALAS, THE DIVIDE IS STILL CRANKING!
+
+#--U IN FP0, WE ARE NOW READY TO COMPUTE ATAN(U) AS
+#--U + A1*U*V*(A2 + V*(A3 + V)), V = U*U
+#--THE POLYNOMIAL MAY LOOK STRANGE, BUT IS NEVERTHELESS CORRECT.
+#--THE NATURAL FORM IS U + U*V*(A1 + V*(A2 + V*A3))
+#--WHAT WE HAVE HERE IS MERELY A1 = A3, A2 = A1/A3, A3 = A2/A3.
+#--THE REASON FOR THIS REARRANGEMENT IS TO MAKE THE INDEPENDENT
+#--PARTS A1*U*V AND (A2 + ... STUFF) MORE LOAD-BALANCED
+
+ fmovm.x &0x04,-(%sp) # save fp2
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1
+ fmov.d ATANA3(%pc),%fp2
+ fadd.x %fp1,%fp2 # A3+V
+ fmul.x %fp1,%fp2 # V*(A3+V)
+ fmul.x %fp0,%fp1 # U*V
+ fadd.d ATANA2(%pc),%fp2 # A2+V*(A3+V)
+ fmul.d ATANA1(%pc),%fp1 # A1*U*V
+ fmul.x %fp2,%fp1 # A1*U*V*(A2+V*(A3+V))
+ fadd.x %fp1,%fp0 # ATAN(U), FP1 RELEASED
+
+ fmovm.x (%sp)+,&0x20 # restore fp2
+
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ fadd.x ATANF(%a6),%fp0 # ATAN(X)
+ bra t_inx2
+
+ATANBORS:
+#--|X| IS IN d0 IN COMPACT FORM. FP1, d0 SAVED.
+#--FP0 IS X AND |X| <= 1/16 OR |X| >= 16.
+ cmp.l %d1,&0x3FFF8000
+ bgt.w ATANBIG # I.E. |X| >= 16
+
+ATANSM:
+#--|X| <= 1/16
+#--IF |X| < 2^(-40), RETURN X AS ANSWER. OTHERWISE, APPROXIMATE
+#--ATAN(X) BY X + X*Y*(B1+Y*(B2+Y*(B3+Y*(B4+Y*(B5+Y*B6)))))
+#--WHICH IS X + X*Y*( [B1+Z*(B3+Z*B5)] + [Y*(B2+Z*(B4+Z*B6)] )
+#--WHERE Y = X*X, AND Z = Y*Y.
+
+ cmp.l %d1,&0x3FD78000
+ blt.w ATANTINY
+
+#--COMPUTE POLYNOMIAL
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmul.x %fp0,%fp0 # FPO IS Y = X*X
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
+
+ fmov.d ATANB6(%pc),%fp2
+ fmov.d ATANB5(%pc),%fp3
+
+ fmul.x %fp1,%fp2 # Z*B6
+ fmul.x %fp1,%fp3 # Z*B5
+
+ fadd.d ATANB4(%pc),%fp2 # B4+Z*B6
+ fadd.d ATANB3(%pc),%fp3 # B3+Z*B5
+
+ fmul.x %fp1,%fp2 # Z*(B4+Z*B6)
+ fmul.x %fp3,%fp1 # Z*(B3+Z*B5)
+
+ fadd.d ATANB2(%pc),%fp2 # B2+Z*(B4+Z*B6)
+ fadd.d ATANB1(%pc),%fp1 # B1+Z*(B3+Z*B5)
+
+ fmul.x %fp0,%fp2 # Y*(B2+Z*(B4+Z*B6))
+ fmul.x X(%a6),%fp0 # X*Y
+
+ fadd.x %fp2,%fp1 # [B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))]
+
+ fmul.x %fp1,%fp0 # X*Y*([B1+Z*(B3+Z*B5)]+[Y*(B2+Z*(B4+Z*B6))])
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ fadd.x X(%a6),%fp0
+ bra t_inx2
+
+ATANTINY:
+#--|X| < 2^(-40), ATAN(X) = X
+
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x X(%a6),%fp0 # last inst - possible exception set
+
+ bra t_catch
+
+ATANBIG:
+#--IF |X| > 2^(100), RETURN SIGN(X)*(PI/2 - TINY). OTHERWISE,
+#--RETURN SIGN(X)*PI/2 + ATAN(-1/X).
+ cmp.l %d1,&0x40638000
+ bgt.w ATANHUGE
+
+#--APPROXIMATE ATAN(-1/X) BY
+#--X'+X'*Y*(C1+Y*(C2+Y*(C3+Y*(C4+Y*C5)))), X' = -1/X, Y = X'*X'
+#--THIS CAN BE RE-WRITTEN AS
+#--X'+X'*Y*( [C1+Z*(C3+Z*C5)] + [Y*(C2+Z*C4)] ), Z = Y*Y.
+
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmov.s &0xBF800000,%fp1 # LOAD -1
+ fdiv.x %fp0,%fp1 # FP1 IS -1/X
+
+#--DIVIDE IS STILL CRANKING
+
+ fmov.x %fp1,%fp0 # FP0 IS X'
+ fmul.x %fp0,%fp0 # FP0 IS Y = X'*X'
+ fmov.x %fp1,X(%a6) # X IS REALLY X'
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS Z = Y*Y
+
+ fmov.d ATANC5(%pc),%fp3
+ fmov.d ATANC4(%pc),%fp2
+
+ fmul.x %fp1,%fp3 # Z*C5
+ fmul.x %fp1,%fp2 # Z*B4
+
+ fadd.d ATANC3(%pc),%fp3 # C3+Z*C5
+ fadd.d ATANC2(%pc),%fp2 # C2+Z*C4
+
+ fmul.x %fp3,%fp1 # Z*(C3+Z*C5), FP3 RELEASED
+ fmul.x %fp0,%fp2 # Y*(C2+Z*C4)
+
+ fadd.d ATANC1(%pc),%fp1 # C1+Z*(C3+Z*C5)
+ fmul.x X(%a6),%fp0 # X'*Y
+
+ fadd.x %fp2,%fp1 # [Y*(C2+Z*C4)]+[C1+Z*(C3+Z*C5)]
+
+ fmul.x %fp1,%fp0 # X'*Y*([B1+Z*(B3+Z*B5)]
+# ... +[Y*(B2+Z*(B4+Z*B6))])
+ fadd.x X(%a6),%fp0
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ tst.b (%a0)
+ bpl.b pos_big
+
+neg_big:
+ fadd.x NPIBY2(%pc),%fp0
+ bra t_minx2
+
+pos_big:
+ fadd.x PPIBY2(%pc),%fp0
+ bra t_pinx2
+
+ATANHUGE:
+#--RETURN SIGN(X)*(PIBY2 - TINY) = SIGN(X)*PIBY2 - SIGN(X)*TINY
+ tst.b (%a0)
+ bpl.b pos_huge
+
+neg_huge:
+ fmov.x NPIBY2(%pc),%fp0
+ fmov.l %d0,%fpcr
+ fadd.x PTINY(%pc),%fp0
+ bra t_minx2
+
+pos_huge:
+ fmov.x PPIBY2(%pc),%fp0
+ fmov.l %d0,%fpcr
+ fadd.x NTINY(%pc),%fp0
+ bra t_pinx2
+
+ global satand
+#--ENTRY POINT FOR ATAN(X) FOR DENORMALIZED ARGUMENT
+satand:
+ bra t_extdnrm
+
+#########################################################################
+# sasin(): computes the inverse sine of a normalized input #
+# sasind(): computes the inverse sine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = arcsin(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# ASIN #
+# 1. If |X| >= 1, go to 3. #
+# #
+# 2. (|X| < 1) Calculate asin(X) by #
+# z := sqrt( [1-X][1+X] ) #
+# asin(X) = atan( x / z ). #
+# Exit. #
+# #
+# 3. If |X| > 1, go to 5. #
+# #
+# 4. (|X| = 1) sgn := sign(X), return asin(X) := sgn * Pi/2. Exit.#
+# #
+# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
+# Exit. #
+# #
+#########################################################################
+
+ global sasin
+sasin:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x3FFF8000
+ bge.b ASINBIG
+
+# This catch is added here for the '060 QSP. Originally, the call to
+# satan() would handle this case by causing the exception which would
+# not be caught until gen_except(). Now, with the exceptions being
+# detected inside of satan(), the exception would have been handled there
+# instead of inside sasin() as expected.
+ cmp.l %d1,&0x3FD78000
+ blt.w ASINTINY
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ASIN(X) = ATAN( X / SQRT( (1-X)(1+X) ) )
+
+ASINMAIN:
+ fmov.s &0x3F800000,%fp1
+ fsub.x %fp0,%fp1 # 1-X
+ fmovm.x &0x4,-(%sp) # {fp2}
+ fmov.s &0x3F800000,%fp2
+ fadd.x %fp0,%fp2 # 1+X
+ fmul.x %fp2,%fp1 # (1+X)(1-X)
+ fmovm.x (%sp)+,&0x20 # {fp2}
+ fsqrt.x %fp1 # SQRT([1-X][1+X])
+ fdiv.x %fp1,%fp0 # X/SQRT([1-X][1+X])
+ fmovm.x &0x01,-(%sp) # save X/SQRT(...)
+ lea (%sp),%a0 # pass ptr to X/SQRT(...)
+ bsr satan
+ add.l &0xc,%sp # clear X/SQRT(...) from stack
+ bra t_inx2
+
+ASINBIG:
+ fabs.x %fp0 # |X|
+ fcmp.s %fp0,&0x3F800000
+ fbgt t_operr # cause an operr exception
+
+#--|X| = 1, ASIN(X) = +- PI/2.
+ASINONE:
+ fmov.x PIBY2(%pc),%fp0
+ mov.l (%a0),%d1
+ and.l &0x80000000,%d1 # SIGN BIT OF X
+ or.l &0x3F800000,%d1 # +-1 IN SGL FORMAT
+ mov.l %d1,-(%sp) # push SIGN(X) IN SGL-FMT
+ fmov.l %d0,%fpcr
+ fmul.s (%sp)+,%fp0
+ bra t_inx2
+
+#--|X| < 2^(-40), ATAN(X) = X
+ASINTINY:
+ fmov.l %d0,%fpcr # restore users rnd mode,prec
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x (%a0),%fp0 # last inst - possible exception
+ bra t_catch
+
+ global sasind
+#--ASIN(X) = X FOR DENORMALIZED X
+sasind:
+ bra t_extdnrm
+
+#########################################################################
+# sacos(): computes the inverse cosine of a normalized input #
+# sacosd(): computes the inverse cosine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = arccos(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# ACOS #
+# 1. If |X| >= 1, go to 3. #
+# #
+# 2. (|X| < 1) Calculate acos(X) by #
+# z := (1-X) / (1+X) #
+# acos(X) = 2 * atan( sqrt(z) ). #
+# Exit. #
+# #
+# 3. If |X| > 1, go to 5. #
+# #
+# 4. (|X| = 1) If X > 0, return 0. Otherwise, return Pi. Exit. #
+# #
+# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
+# Exit. #
+# #
+#########################################################################
+
+ global sacos
+sacos:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1 # pack exp w/ upper 16 fraction
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x3FFF8000
+ bge.b ACOSBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--ACOS(X) = 2 * ATAN( SQRT( (1-X)/(1+X) ) )
+
+ACOSMAIN:
+ fmov.s &0x3F800000,%fp1
+ fadd.x %fp0,%fp1 # 1+X
+ fneg.x %fp0 # -X
+ fadd.s &0x3F800000,%fp0 # 1-X
+ fdiv.x %fp1,%fp0 # (1-X)/(1+X)
+ fsqrt.x %fp0 # SQRT((1-X)/(1+X))
+ mov.l %d0,-(%sp) # save original users fpcr
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save SQRT(...) to stack
+ lea (%sp),%a0 # pass ptr to sqrt
+ bsr satan # ATAN(SQRT([1-X]/[1+X]))
+ add.l &0xc,%sp # clear SQRT(...) from stack
+
+ fmov.l (%sp)+,%fpcr # restore users round prec,mode
+ fadd.x %fp0,%fp0 # 2 * ATAN( STUFF )
+ bra t_pinx2
+
+ACOSBIG:
+ fabs.x %fp0
+ fcmp.s %fp0,&0x3F800000
+ fbgt t_operr # cause an operr exception
+
+#--|X| = 1, ACOS(X) = 0 OR PI
+ tst.b (%a0) # is X positive or negative?
+ bpl.b ACOSP1
+
+#--X = -1
+#Returns PI and inexact exception
+ACOSM1:
+ fmov.x PI(%pc),%fp0 # load PI
+ fmov.l %d0,%fpcr # load round mode,prec
+ fadd.s &0x00800000,%fp0 # add a small value
+ bra t_pinx2
+
+ACOSP1:
+ bra ld_pzero # answer is positive zero
+
+ global sacosd
+#--ACOS(X) = PI/2 FOR DENORMALIZED X
+sacosd:
+ fmov.l %d0,%fpcr # load user's rnd mode/prec
+ fmov.x PIBY2(%pc),%fp0
+ bra t_pinx2
+
+#########################################################################
+# setox(): computes the exponential for a normalized input #
+# setoxd(): computes the exponential for a denormalized input #
+# setoxm1(): computes the exponential minus 1 for a normalized input #
+# setoxm1d(): computes the exponential minus 1 for a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = exp(X) or exp(X)-1 #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 0.85 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM and IMPLEMENTATION **************************************** #
+# #
+# setoxd #
+# ------ #
+# Step 1. Set ans := 1.0 #
+# #
+# Step 2. Return ans := ans + sign(X)*2^(-126). Exit. #
+# Notes: This will always generate one exception -- inexact. #
+# #
+# #
+# setox #
+# ----- #
+# #
+# Step 1. Filter out extreme cases of input argument. #
+# 1.1 If |X| >= 2^(-65), go to Step 1.3. #
+# 1.2 Go to Step 7. #
+# 1.3 If |X| < 16380 log(2), go to Step 2. #
+# 1.4 Go to Step 8. #
+# Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
+# To avoid the use of floating-point comparisons, a #
+# compact representation of |X| is used. This format is a #
+# 32-bit integer, the upper (more significant) 16 bits #
+# are the sign and biased exponent field of |X|; the #
+# lower 16 bits are the 16 most significant fraction #
+# (including the explicit bit) bits of |X|. Consequently, #
+# the comparisons in Steps 1.1 and 1.3 can be performed #
+# by integer comparison. Note also that the constant #
+# 16380 log(2) used in Step 1.3 is also in the compact #
+# form. Thus taking the branch to Step 2 guarantees #
+# |X| < 16380 log(2). There is no harm to have a small #
+# number of cases where |X| is less than, but close to, #
+# 16380 log(2) and the branch to Step 9 is taken. #
+# #
+# Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
+# 2.1 Set AdjFlag := 0 (indicates the branch 1.3 -> 2 #
+# was taken) #
+# 2.2 N := round-to-nearest-integer( X * 64/log2 ). #
+# 2.3 Calculate J = N mod 64; so J = 0,1,2,..., #
+# or 63. #
+# 2.4 Calculate M = (N - J)/64; so N = 64M + J. #
+# 2.5 Calculate the address of the stored value of #
+# 2^(J/64). #
+# 2.6 Create the value Scale = 2^M. #
+# Notes: The calculation in 2.2 is really performed by #
+# Z := X * constant #
+# N := round-to-nearest-integer(Z) #
+# where #
+# constant := single-precision( 64/log 2 ). #
+# #
+# Using a single-precision constant avoids memory #
+# access. Another effect of using a single-precision #
+# "constant" is that the calculated value Z is #
+# #
+# Z = X*(64/log2)*(1+eps), |eps| <= 2^(-24). #
+# #
+# This error has to be considered later in Steps 3 and 4. #
+# #
+# Step 3. Calculate X - N*log2/64. #
+# 3.1 R := X + N*L1, #
+# where L1 := single-precision(-log2/64). #
+# 3.2 R := R + N*L2, #
+# L2 := extended-precision(-log2/64 - L1).#
+# Notes: a) The way L1 and L2 are chosen ensures L1+L2 #
+# approximate the value -log2/64 to 88 bits of accuracy. #
+# b) N*L1 is exact because N is no longer than 22 bits #
+# and L1 is no longer than 24 bits. #
+# c) The calculation X+N*L1 is also exact due to #
+# cancellation. Thus, R is practically X+N(L1+L2) to full #
+# 64 bits. #
+# d) It is important to estimate how large can |R| be #
+# after Step 3.2. #
+# #
+# N = rnd-to-int( X*64/log2 (1+eps) ), |eps|<=2^(-24) #
+# X*64/log2 (1+eps) = N + f, |f| <= 0.5 #
+# X*64/log2 - N = f - eps*X 64/log2 #
+# X - N*log2/64 = f*log2/64 - eps*X #
+# #
+# #
+# Now |X| <= 16446 log2, thus #
+# #
+# |X - N*log2/64| <= (0.5 + 16446/2^(18))*log2/64 #
+# <= 0.57 log2/64. #
+# This bound will be used in Step 4. #
+# #
+# Step 4. Approximate exp(R)-1 by a polynomial #
+# p = R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5)))) #
+# Notes: a) In order to reduce memory access, the coefficients #
+# are made as "short" as possible: A1 (which is 1/2), A4 #
+# and A5 are single precision; A2 and A3 are double #
+# precision. #
+# b) Even with the restrictions above, #
+# |p - (exp(R)-1)| < 2^(-68.8) for all |R| <= 0.0062. #
+# Note that 0.0062 is slightly bigger than 0.57 log2/64. #
+# c) To fully utilize the pipeline, p is separated into #
+# two independent pieces of roughly equal complexities #
+# p = [ R + R*S*(A2 + S*A4) ] + #
+# [ S*(A1 + S*(A3 + S*A5)) ] #
+# where S = R*R. #
+# #
+# Step 5. Compute 2^(J/64)*exp(R) = 2^(J/64)*(1+p) by #
+# ans := T + ( T*p + t) #
+# where T and t are the stored values for 2^(J/64). #
+# Notes: 2^(J/64) is stored as T and t where T+t approximates #
+# 2^(J/64) to roughly 85 bits; T is in extended precision #
+# and t is in single precision. Note also that T is #
+# rounded to 62 bits so that the last two bits of T are #
+# zero. The reason for such a special form is that T-1, #
+# T-2, and T-8 will all be exact --- a property that will #
+# give much more accurate computation of the function #
+# EXPM1. #
+# #
+# Step 6. Reconstruction of exp(X) #
+# exp(X) = 2^M * 2^(J/64) * exp(R). #
+# 6.1 If AdjFlag = 0, go to 6.3 #
+# 6.2 ans := ans * AdjScale #
+# 6.3 Restore the user FPCR #
+# 6.4 Return ans := ans * Scale. Exit. #
+# Notes: If AdjFlag = 0, we have X = Mlog2 + Jlog2/64 + R, #
+# |M| <= 16380, and Scale = 2^M. Moreover, exp(X) will #
+# neither overflow nor underflow. If AdjFlag = 1, that #
+# means that #
+# X = (M1+M)log2 + Jlog2/64 + R, |M1+M| >= 16380. #
+# Hence, exp(X) may overflow or underflow or neither. #
+# When that is the case, AdjScale = 2^(M1) where M1 is #
+# approximately M. Thus 6.2 will never cause #
+# over/underflow. Possible exception in 6.4 is overflow #
+# or underflow. The inexact exception is not generated in #
+# 6.4. Although one can argue that the inexact flag #
+# should always be raised, to simulate that exception #
+# cost to much than the flag is worth in practical uses. #
+# #
+# Step 7. Return 1 + X. #
+# 7.1 ans := X #
+# 7.2 Restore user FPCR. #
+# 7.3 Return ans := 1 + ans. Exit #
+# Notes: For non-zero X, the inexact exception will always be #
+# raised by 7.3. That is the only exception raised by 7.3.#
+# Note also that we use the FMOVEM instruction to move X #
+# in Step 7.1 to avoid unnecessary trapping. (Although #
+# the FMOVEM may not seem relevant since X is normalized, #
+# the precaution will be useful in the library version of #
+# this code where the separate entry for denormalized #
+# inputs will be done away with.) #
+# #
+# Step 8. Handle exp(X) where |X| >= 16380log2. #
+# 8.1 If |X| > 16480 log2, go to Step 9. #
+# (mimic 2.2 - 2.6) #
+# 8.2 N := round-to-integer( X * 64/log2 ) #
+# 8.3 Calculate J = N mod 64, J = 0,1,...,63 #
+# 8.4 K := (N-J)/64, M1 := truncate(K/2), M = K-M1, #
+# AdjFlag := 1. #
+# 8.5 Calculate the address of the stored value #
+# 2^(J/64). #
+# 8.6 Create the values Scale = 2^M, AdjScale = 2^M1. #
+# 8.7 Go to Step 3. #
+# Notes: Refer to notes for 2.2 - 2.6. #
+# #
+# Step 9. Handle exp(X), |X| > 16480 log2. #
+# 9.1 If X < 0, go to 9.3 #
+# 9.2 ans := Huge, go to 9.4 #
+# 9.3 ans := Tiny. #
+# 9.4 Restore user FPCR. #
+# 9.5 Return ans := ans * ans. Exit. #
+# Notes: Exp(X) will surely overflow or underflow, depending on #
+# X's sign. "Huge" and "Tiny" are respectively large/tiny #
+# extended-precision numbers whose square over/underflow #
+# with an inexact result. Thus, 9.5 always raises the #
+# inexact together with either overflow or underflow. #
+# #
+# setoxm1d #
+# -------- #
+# #
+# Step 1. Set ans := 0 #
+# #
+# Step 2. Return ans := X + ans. Exit. #
+# Notes: This will return X with the appropriate rounding #
+# precision prescribed by the user FPCR. #
+# #
+# setoxm1 #
+# ------- #
+# #
+# Step 1. Check |X| #
+# 1.1 If |X| >= 1/4, go to Step 1.3. #
+# 1.2 Go to Step 7. #
+# 1.3 If |X| < 70 log(2), go to Step 2. #
+# 1.4 Go to Step 10. #
+# Notes: The usual case should take the branches 1.1 -> 1.3 -> 2.#
+# However, it is conceivable |X| can be small very often #
+# because EXPM1 is intended to evaluate exp(X)-1 #
+# accurately when |X| is small. For further details on #
+# the comparisons, see the notes on Step 1 of setox. #
+# #
+# Step 2. Calculate N = round-to-nearest-int( X * 64/log2 ). #
+# 2.1 N := round-to-nearest-integer( X * 64/log2 ). #
+# 2.2 Calculate J = N mod 64; so J = 0,1,2,..., #
+# or 63. #
+# 2.3 Calculate M = (N - J)/64; so N = 64M + J. #
+# 2.4 Calculate the address of the stored value of #
+# 2^(J/64). #
+# 2.5 Create the values Sc = 2^M and #
+# OnebySc := -2^(-M). #
+# Notes: See the notes on Step 2 of setox. #
+# #
+# Step 3. Calculate X - N*log2/64. #
+# 3.1 R := X + N*L1, #
+# where L1 := single-precision(-log2/64). #
+# 3.2 R := R + N*L2, #
+# L2 := extended-precision(-log2/64 - L1).#
+# Notes: Applying the analysis of Step 3 of setox in this case #
+# shows that |R| <= 0.0055 (note that |X| <= 70 log2 in #
+# this case). #
+# #
+# Step 4. Approximate exp(R)-1 by a polynomial #
+# p = R+R*R*(A1+R*(A2+R*(A3+R*(A4+R*(A5+R*A6))))) #
+# Notes: a) In order to reduce memory access, the coefficients #
+# are made as "short" as possible: A1 (which is 1/2), A5 #
+# and A6 are single precision; A2, A3 and A4 are double #
+# precision. #
+# b) Even with the restriction above, #
+# |p - (exp(R)-1)| < |R| * 2^(-72.7) #
+# for all |R| <= 0.0055. #
+# c) To fully utilize the pipeline, p is separated into #
+# two independent pieces of roughly equal complexity #
+# p = [ R*S*(A2 + S*(A4 + S*A6)) ] + #
+# [ R + S*(A1 + S*(A3 + S*A5)) ] #
+# where S = R*R. #
+# #
+# Step 5. Compute 2^(J/64)*p by #
+# p := T*p #
+# where T and t are the stored values for 2^(J/64). #
+# Notes: 2^(J/64) is stored as T and t where T+t approximates #
+# 2^(J/64) to roughly 85 bits; T is in extended precision #
+# and t is in single precision. Note also that T is #
+# rounded to 62 bits so that the last two bits of T are #
+# zero. The reason for such a special form is that T-1, #
+# T-2, and T-8 will all be exact --- a property that will #
+# be exploited in Step 6 below. The total relative error #
+# in p is no bigger than 2^(-67.7) compared to the final #
+# result. #
+# #
+# Step 6. Reconstruction of exp(X)-1 #
+# exp(X)-1 = 2^M * ( 2^(J/64) + p - 2^(-M) ). #
+# 6.1 If M <= 63, go to Step 6.3. #
+# 6.2 ans := T + (p + (t + OnebySc)). Go to 6.6 #
+# 6.3 If M >= -3, go to 6.5. #
+# 6.4 ans := (T + (p + t)) + OnebySc. Go to 6.6 #
+# 6.5 ans := (T + OnebySc) + (p + t). #
+# 6.6 Restore user FPCR. #
+# 6.7 Return ans := Sc * ans. Exit. #
+# Notes: The various arrangements of the expressions give #
+# accurate evaluations. #
+# #
+# Step 7. exp(X)-1 for |X| < 1/4. #
+# 7.1 If |X| >= 2^(-65), go to Step 9. #
+# 7.2 Go to Step 8. #
+# #
+# Step 8. Calculate exp(X)-1, |X| < 2^(-65). #
+# 8.1 If |X| < 2^(-16312), goto 8.3 #
+# 8.2 Restore FPCR; return ans := X - 2^(-16382). #
+# Exit. #
+# 8.3 X := X * 2^(140). #
+# 8.4 Restore FPCR; ans := ans - 2^(-16382). #
+# Return ans := ans*2^(140). Exit #
+# Notes: The idea is to return "X - tiny" under the user #
+# precision and rounding modes. To avoid unnecessary #
+# inefficiency, we stay away from denormalized numbers #
+# the best we can. For |X| >= 2^(-16312), the #
+# straightforward 8.2 generates the inexact exception as #
+# the case warrants. #
+# #
+# Step 9. Calculate exp(X)-1, |X| < 1/4, by a polynomial #
+# p = X + X*X*(B1 + X*(B2 + ... + X*B12)) #
+# Notes: a) In order to reduce memory access, the coefficients #
+# are made as "short" as possible: B1 (which is 1/2), B9 #
+# to B12 are single precision; B3 to B8 are double #
+# precision; and B2 is double extended. #
+# b) Even with the restriction above, #
+# |p - (exp(X)-1)| < |X| 2^(-70.6) #
+# for all |X| <= 0.251. #
+# Note that 0.251 is slightly bigger than 1/4. #
+# c) To fully preserve accuracy, the polynomial is #
+# computed as #
+# X + ( S*B1 + Q ) where S = X*X and #
+# Q = X*S*(B2 + X*(B3 + ... + X*B12)) #
+# d) To fully utilize the pipeline, Q is separated into #
+# two independent pieces of roughly equal complexity #
+# Q = [ X*S*(B2 + S*(B4 + ... + S*B12)) ] + #
+# [ S*S*(B3 + S*(B5 + ... + S*B11)) ] #
+# #
+# Step 10. Calculate exp(X)-1 for |X| >= 70 log 2. #
+# 10.1 If X >= 70log2 , exp(X) - 1 = exp(X) for all #
+# practical purposes. Therefore, go to Step 1 of setox. #
+# 10.2 If X <= -70log2, exp(X) - 1 = -1 for all practical #
+# purposes. #
+# ans := -1 #
+# Restore user FPCR #
+# Return ans := ans + 2^(-126). Exit. #
+# Notes: 10.2 will always create an inexact and return -1 + tiny #
+# in the user rounding precision and mode. #
+# #
+#########################################################################
+
+L2: long 0x3FDC0000,0x82E30865,0x4361C4C6,0x00000000
+
+EEXPA3: long 0x3FA55555,0x55554CC1
+EEXPA2: long 0x3FC55555,0x55554A54
+
+EM1A4: long 0x3F811111,0x11174385
+EM1A3: long 0x3FA55555,0x55554F5A
+
+EM1A2: long 0x3FC55555,0x55555555,0x00000000,0x00000000
+
+EM1B8: long 0x3EC71DE3,0xA5774682
+EM1B7: long 0x3EFA01A0,0x19D7CB68
+
+EM1B6: long 0x3F2A01A0,0x1A019DF3
+EM1B5: long 0x3F56C16C,0x16C170E2
+
+EM1B4: long 0x3F811111,0x11111111
+EM1B3: long 0x3FA55555,0x55555555
+
+EM1B2: long 0x3FFC0000,0xAAAAAAAA,0xAAAAAAAB
+ long 0x00000000
+
+TWO140: long 0x48B00000,0x00000000
+TWON140:
+ long 0x37300000,0x00000000
+
+EEXPTBL:
+ long 0x3FFF0000,0x80000000,0x00000000,0x00000000
+ long 0x3FFF0000,0x8164D1F3,0xBC030774,0x9F841A9B
+ long 0x3FFF0000,0x82CD8698,0xAC2BA1D8,0x9FC1D5B9
+ long 0x3FFF0000,0x843A28C3,0xACDE4048,0xA0728369
+ long 0x3FFF0000,0x85AAC367,0xCC487B14,0x1FC5C95C
+ long 0x3FFF0000,0x871F6196,0x9E8D1010,0x1EE85C9F
+ long 0x3FFF0000,0x88980E80,0x92DA8528,0x9FA20729
+ long 0x3FFF0000,0x8A14D575,0x496EFD9C,0xA07BF9AF
+ long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E8,0xA0020DCF
+ long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E4,0x205A63DA
+ long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x1EB70051
+ long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x1F6EB029
+ long 0x3FFF0000,0x91C3D373,0xAB11C338,0xA0781494
+ long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0x9EB319B0
+ long 0x3FFF0000,0x94F4EFA8,0xFEF70960,0x2017457D
+ long 0x3FFF0000,0x96942D37,0x20185A00,0x1F11D537
+ long 0x3FFF0000,0x9837F051,0x8DB8A970,0x9FB952DD
+ long 0x3FFF0000,0x99E04593,0x20B7FA64,0x1FE43087
+ long 0x3FFF0000,0x9B8D39B9,0xD54E5538,0x1FA2A818
+ long 0x3FFF0000,0x9D3ED9A7,0x2CFFB750,0x1FDE494D
+ long 0x3FFF0000,0x9EF53260,0x91A111AC,0x20504890
+ long 0x3FFF0000,0xA0B0510F,0xB9714FC4,0xA073691C
+ long 0x3FFF0000,0xA2704303,0x0C496818,0x1F9B7A05
+ long 0x3FFF0000,0xA43515AE,0x09E680A0,0xA0797126
+ long 0x3FFF0000,0xA5FED6A9,0xB15138EC,0xA071A140
+ long 0x3FFF0000,0xA7CD93B4,0xE9653568,0x204F62DA
+ long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x1F283C4A
+ long 0x3FFF0000,0xAB7A39B5,0xA93ED338,0x9F9A7FDC
+ long 0x3FFF0000,0xAD583EEA,0x42A14AC8,0xA05B3FAC
+ long 0x3FFF0000,0xAF3B78AD,0x690A4374,0x1FDF2610
+ long 0x3FFF0000,0xB123F581,0xD2AC2590,0x9F705F90
+ long 0x3FFF0000,0xB311C412,0xA9112488,0x201F678A
+ long 0x3FFF0000,0xB504F333,0xF9DE6484,0x1F32FB13
+ long 0x3FFF0000,0xB6FD91E3,0x28D17790,0x20038B30
+ long 0x3FFF0000,0xB8FBAF47,0x62FB9EE8,0x200DC3CC
+ long 0x3FFF0000,0xBAFF5AB2,0x133E45FC,0x9F8B2AE6
+ long 0x3FFF0000,0xBD08A39F,0x580C36C0,0xA02BBF70
+ long 0x3FFF0000,0xBF1799B6,0x7A731084,0xA00BF518
+ long 0x3FFF0000,0xC12C4CCA,0x66709458,0xA041DD41
+ long 0x3FFF0000,0xC346CCDA,0x24976408,0x9FDF137B
+ long 0x3FFF0000,0xC5672A11,0x5506DADC,0x201F1568
+ long 0x3FFF0000,0xC78D74C8,0xABB9B15C,0x1FC13A2E
+ long 0x3FFF0000,0xC9B9BD86,0x6E2F27A4,0xA03F8F03
+ long 0x3FFF0000,0xCBEC14FE,0xF2727C5C,0x1FF4907D
+ long 0x3FFF0000,0xCE248C15,0x1F8480E4,0x9E6E53E4
+ long 0x3FFF0000,0xD06333DA,0xEF2B2594,0x1FD6D45C
+ long 0x3FFF0000,0xD2A81D91,0xF12AE45C,0xA076EDB9
+ long 0x3FFF0000,0xD4F35AAB,0xCFEDFA20,0x9FA6DE21
+ long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x1EE69A2F
+ long 0x3FFF0000,0xD99D15C2,0x78AFD7B4,0x207F439F
+ long 0x3FFF0000,0xDBFBB797,0xDAF23754,0x201EC207
+ long 0x3FFF0000,0xDE60F482,0x5E0E9124,0x9E8BE175
+ long 0x3FFF0000,0xE0CCDEEC,0x2A94E110,0x20032C4B
+ long 0x3FFF0000,0xE33F8972,0xBE8A5A50,0x2004DFF5
+ long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x1E72F47A
+ long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x1F722F22
+ long 0x3FFF0000,0xEAC0C6E7,0xDD243930,0xA017E945
+ long 0x3FFF0000,0xED4F301E,0xD9942B84,0x1F401A5B
+ long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CC,0x9FB9A9E3
+ long 0x3FFF0000,0xF281773C,0x59FFB138,0x20744C05
+ long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x1F773A19
+ long 0x3FFF0000,0xF7D0DF73,0x0AD13BB8,0x1FFE90D5
+ long 0x3FFF0000,0xFA83B2DB,0x722A033C,0xA041ED22
+ long 0x3FFF0000,0xFD3E0C0C,0xF486C174,0x1F853F3A
+
+ set ADJFLAG,L_SCR2
+ set SCALE,FP_SCR0
+ set ADJSCALE,FP_SCR1
+ set SC,FP_SCR0
+ set ONEBYSC,FP_SCR1
+
+ global setox
+setox:
+#--entry point for EXP(X), here X is finite, non-zero, and not NaN's
+
+#--Step 1.
+ mov.l (%a0),%d1 # load part of input X
+ and.l &0x7FFF0000,%d1 # biased expo. of X
+ cmp.l %d1,&0x3FBE0000 # 2^(-65)
+ bge.b EXPC1 # normal case
+ bra EXPSM
+
+EXPC1:
+#--The case |X| >= 2^(-65)
+ mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
+ cmp.l %d1,&0x400CB167 # 16380 log2 trunc. 16 bits
+ blt.b EXPMAIN # normal case
+ bra EEXPBIG
+
+EXPMAIN:
+#--Step 2.
+#--This is the normal branch: 2^(-65) <= |X| < 16380 log2.
+ fmov.x (%a0),%fp0 # load input from (a0)
+
+ fmov.x %fp0,%fp1
+ fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
+ fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
+ mov.l &0,ADJFLAG(%a6)
+ fmov.l %fp0,%d1 # N = int( X * 64/log2 )
+ lea EEXPTBL(%pc),%a1
+ fmov.l %d1,%fp0 # convert to floating-format
+
+ mov.l %d1,L_SCR1(%a6) # save N temporarily
+ and.l &0x3F,%d1 # D0 is J = N mod 64
+ lsl.l &4,%d1
+ add.l %d1,%a1 # address of 2^(J/64)
+ mov.l L_SCR1(%a6),%d1
+ asr.l &6,%d1 # D0 is M
+ add.w &0x3FFF,%d1 # biased expo. of 2^(M)
+ mov.w L2(%pc),L_SCR1(%a6) # prefetch L2, no need in CB
+
+EXPCONT1:
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 is biased expo. of 2^(M)
+ fmov.x %fp0,%fp2
+ fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
+ fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
+ fadd.x %fp1,%fp0 # X + N*L1
+ fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*A5))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R+R*S*(A2+S*A4)] + [S*(A1+S*(A3+S*A5))]
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # fp1 IS S = R*R
+
+ fmov.s &0x3AB60B70,%fp2 # fp2 IS A5
+
+ fmul.x %fp1,%fp2 # fp2 IS S*A5
+ fmov.x %fp1,%fp3
+ fmul.s &0x3C088895,%fp3 # fp3 IS S*A4
+
+ fadd.d EEXPA3(%pc),%fp2 # fp2 IS A3+S*A5
+ fadd.d EEXPA2(%pc),%fp3 # fp3 IS A2+S*A4
+
+ fmul.x %fp1,%fp2 # fp2 IS S*(A3+S*A5)
+ mov.w %d1,SCALE(%a6) # SCALE is 2^(M) in extended
+ mov.l &0x80000000,SCALE+4(%a6)
+ clr.l SCALE+8(%a6)
+
+ fmul.x %fp1,%fp3 # fp3 IS S*(A2+S*A4)
+
+ fadd.s &0x3F000000,%fp2 # fp2 IS A1+S*(A3+S*A5)
+ fmul.x %fp0,%fp3 # fp3 IS R*S*(A2+S*A4)
+
+ fmul.x %fp1,%fp2 # fp2 IS S*(A1+S*(A3+S*A5))
+ fadd.x %fp3,%fp0 # fp0 IS R+R*S*(A2+S*A4),
+
+ fmov.x (%a1)+,%fp1 # fp1 is lead. pt. of 2^(J/64)
+ fadd.x %fp2,%fp0 # fp0 is EXP(R) - 1
+
+#--Step 5
+#--final reconstruction process
+#--EXP(X) = 2^M * ( 2^(J/64) + 2^(J/64)*(EXP(R)-1) )
+
+ fmul.x %fp1,%fp0 # 2^(J/64)*(Exp(R)-1)
+ fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
+ fadd.s (%a1),%fp0 # accurate 2^(J/64)
+
+ fadd.x %fp1,%fp0 # 2^(J/64) + 2^(J/64)*...
+ mov.l ADJFLAG(%a6),%d1
+
+#--Step 6
+ tst.l %d1
+ beq.b NORMAL
+ADJUST:
+ fmul.x ADJSCALE(%a6),%fp0
+NORMAL:
+ fmov.l %d0,%fpcr # restore user FPCR
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x SCALE(%a6),%fp0 # multiply 2^(M)
+ bra t_catch
+
+EXPSM:
+#--Step 7
+ fmovm.x (%a0),&0x80 # load X
+ fmov.l %d0,%fpcr
+ fadd.s &0x3F800000,%fp0 # 1+X in user mode
+ bra t_pinx2
+
+EEXPBIG:
+#--Step 8
+ cmp.l %d1,&0x400CB27C # 16480 log2
+ bgt.b EXP2BIG
+#--Steps 8.2 -- 8.6
+ fmov.x (%a0),%fp0 # load input from (a0)
+
+ fmov.x %fp0,%fp1
+ fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
+ fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
+ mov.l &1,ADJFLAG(%a6)
+ fmov.l %fp0,%d1 # N = int( X * 64/log2 )
+ lea EEXPTBL(%pc),%a1
+ fmov.l %d1,%fp0 # convert to floating-format
+ mov.l %d1,L_SCR1(%a6) # save N temporarily
+ and.l &0x3F,%d1 # D0 is J = N mod 64
+ lsl.l &4,%d1
+ add.l %d1,%a1 # address of 2^(J/64)
+ mov.l L_SCR1(%a6),%d1
+ asr.l &6,%d1 # D0 is K
+ mov.l %d1,L_SCR1(%a6) # save K temporarily
+ asr.l &1,%d1 # D0 is M1
+ sub.l %d1,L_SCR1(%a6) # a1 is M
+ add.w &0x3FFF,%d1 # biased expo. of 2^(M1)
+ mov.w %d1,ADJSCALE(%a6) # ADJSCALE := 2^(M1)
+ mov.l &0x80000000,ADJSCALE+4(%a6)
+ clr.l ADJSCALE+8(%a6)
+ mov.l L_SCR1(%a6),%d1 # D0 is M
+ add.w &0x3FFF,%d1 # biased expo. of 2^(M)
+ bra.w EXPCONT1 # go back to Step 3
+
+EXP2BIG:
+#--Step 9
+ tst.b (%a0) # is X positive or negative?
+ bmi t_unfl2
+ bra t_ovfl2
+
+ global setoxd
+setoxd:
+#--entry point for EXP(X), X is denormalized
+ mov.l (%a0),-(%sp)
+ andi.l &0x80000000,(%sp)
+ ori.l &0x00800000,(%sp) # sign(X)*2^(-126)
+
+ fmov.s &0x3F800000,%fp0
+
+ fmov.l %d0,%fpcr
+ fadd.s (%sp)+,%fp0
+ bra t_pinx2
+
+ global setoxm1
+setoxm1:
+#--entry point for EXPM1(X), here X is finite, non-zero, non-NaN
+
+#--Step 1.
+#--Step 1.1
+ mov.l (%a0),%d1 # load part of input X
+ and.l &0x7FFF0000,%d1 # biased expo. of X
+ cmp.l %d1,&0x3FFD0000 # 1/4
+ bge.b EM1CON1 # |X| >= 1/4
+ bra EM1SM
+
+EM1CON1:
+#--Step 1.3
+#--The case |X| >= 1/4
+ mov.w 4(%a0),%d1 # expo. and partial sig. of |X|
+ cmp.l %d1,&0x4004C215 # 70log2 rounded up to 16 bits
+ ble.b EM1MAIN # 1/4 <= |X| <= 70log2
+ bra EM1BIG
+
+EM1MAIN:
+#--Step 2.
+#--This is the case: 1/4 <= |X| <= 70 log2.
+ fmov.x (%a0),%fp0 # load input from (a0)
+
+ fmov.x %fp0,%fp1
+ fmul.s &0x42B8AA3B,%fp0 # 64/log2 * X
+ fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
+ fmov.l %fp0,%d1 # N = int( X * 64/log2 )
+ lea EEXPTBL(%pc),%a1
+ fmov.l %d1,%fp0 # convert to floating-format
+
+ mov.l %d1,L_SCR1(%a6) # save N temporarily
+ and.l &0x3F,%d1 # D0 is J = N mod 64
+ lsl.l &4,%d1
+ add.l %d1,%a1 # address of 2^(J/64)
+ mov.l L_SCR1(%a6),%d1
+ asr.l &6,%d1 # D0 is M
+ mov.l %d1,L_SCR1(%a6) # save a copy of M
+
+#--Step 3.
+#--fp1,fp2 saved on the stack. fp0 is N, fp1 is X,
+#--a0 points to 2^(J/64), D0 and a1 both contain M
+ fmov.x %fp0,%fp2
+ fmul.s &0xBC317218,%fp0 # N * L1, L1 = lead(-log2/64)
+ fmul.x L2(%pc),%fp2 # N * L2, L1+L2 = -log2/64
+ fadd.x %fp1,%fp0 # X + N*L1
+ fadd.x %fp2,%fp0 # fp0 is R, reduced arg.
+ add.w &0x3FFF,%d1 # D0 is biased expo. of 2^M
+
+#--Step 4.
+#--WE NOW COMPUTE EXP(R)-1 BY A POLYNOMIAL
+#-- R + R*R*(A1 + R*(A2 + R*(A3 + R*(A4 + R*(A5 + R*A6)))))
+#--TO FULLY UTILIZE THE PIPELINE, WE COMPUTE S = R*R
+#--[R*S*(A2+S*(A4+S*A6))] + [R+S*(A1+S*(A3+S*A5))]
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # fp1 IS S = R*R
+
+ fmov.s &0x3950097B,%fp2 # fp2 IS a6
+
+ fmul.x %fp1,%fp2 # fp2 IS S*A6
+ fmov.x %fp1,%fp3
+ fmul.s &0x3AB60B6A,%fp3 # fp3 IS S*A5
+
+ fadd.d EM1A4(%pc),%fp2 # fp2 IS A4+S*A6
+ fadd.d EM1A3(%pc),%fp3 # fp3 IS A3+S*A5
+ mov.w %d1,SC(%a6) # SC is 2^(M) in extended
+ mov.l &0x80000000,SC+4(%a6)
+ clr.l SC+8(%a6)
+
+ fmul.x %fp1,%fp2 # fp2 IS S*(A4+S*A6)
+ mov.l L_SCR1(%a6),%d1 # D0 is M
+ neg.w %d1 # D0 is -M
+ fmul.x %fp1,%fp3 # fp3 IS S*(A3+S*A5)
+ add.w &0x3FFF,%d1 # biased expo. of 2^(-M)
+ fadd.d EM1A2(%pc),%fp2 # fp2 IS A2+S*(A4+S*A6)
+ fadd.s &0x3F000000,%fp3 # fp3 IS A1+S*(A3+S*A5)
+
+ fmul.x %fp1,%fp2 # fp2 IS S*(A2+S*(A4+S*A6))
+ or.w &0x8000,%d1 # signed/expo. of -2^(-M)
+ mov.w %d1,ONEBYSC(%a6) # OnebySc is -2^(-M)
+ mov.l &0x80000000,ONEBYSC+4(%a6)
+ clr.l ONEBYSC+8(%a6)
+ fmul.x %fp3,%fp1 # fp1 IS S*(A1+S*(A3+S*A5))
+
+ fmul.x %fp0,%fp2 # fp2 IS R*S*(A2+S*(A4+S*A6))
+ fadd.x %fp1,%fp0 # fp0 IS R+S*(A1+S*(A3+S*A5))
+
+ fadd.x %fp2,%fp0 # fp0 IS EXP(R)-1
+
+ fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
+
+#--Step 5
+#--Compute 2^(J/64)*p
+
+ fmul.x (%a1),%fp0 # 2^(J/64)*(Exp(R)-1)
+
+#--Step 6
+#--Step 6.1
+ mov.l L_SCR1(%a6),%d1 # retrieve M
+ cmp.l %d1,&63
+ ble.b MLE63
+#--Step 6.2 M >= 64
+ fmov.s 12(%a1),%fp1 # fp1 is t
+ fadd.x ONEBYSC(%a6),%fp1 # fp1 is t+OnebySc
+ fadd.x %fp1,%fp0 # p+(t+OnebySc), fp1 released
+ fadd.x (%a1),%fp0 # T+(p+(t+OnebySc))
+ bra EM1SCALE
+MLE63:
+#--Step 6.3 M <= 63
+ cmp.l %d1,&-3
+ bge.b MGEN3
+MLTN3:
+#--Step 6.4 M <= -4
+ fadd.s 12(%a1),%fp0 # p+t
+ fadd.x (%a1),%fp0 # T+(p+t)
+ fadd.x ONEBYSC(%a6),%fp0 # OnebySc + (T+(p+t))
+ bra EM1SCALE
+MGEN3:
+#--Step 6.5 -3 <= M <= 63
+ fmov.x (%a1)+,%fp1 # fp1 is T
+ fadd.s (%a1),%fp0 # fp0 is p+t
+ fadd.x ONEBYSC(%a6),%fp1 # fp1 is T+OnebySc
+ fadd.x %fp1,%fp0 # (T+OnebySc)+(p+t)
+
+EM1SCALE:
+#--Step 6.6
+ fmov.l %d0,%fpcr
+ fmul.x SC(%a6),%fp0
+ bra t_inx2
+
+EM1SM:
+#--Step 7 |X| < 1/4.
+ cmp.l %d1,&0x3FBE0000 # 2^(-65)
+ bge.b EM1POLY
+
+EM1TINY:
+#--Step 8 |X| < 2^(-65)
+ cmp.l %d1,&0x00330000 # 2^(-16312)
+ blt.b EM12TINY
+#--Step 8.2
+ mov.l &0x80010000,SC(%a6) # SC is -2^(-16382)
+ mov.l &0x80000000,SC+4(%a6)
+ clr.l SC+8(%a6)
+ fmov.x (%a0),%fp0
+ fmov.l %d0,%fpcr
+ mov.b &FADD_OP,%d1 # last inst is ADD
+ fadd.x SC(%a6),%fp0
+ bra t_catch
+
+EM12TINY:
+#--Step 8.3
+ fmov.x (%a0),%fp0
+ fmul.d TWO140(%pc),%fp0
+ mov.l &0x80010000,SC(%a6)
+ mov.l &0x80000000,SC+4(%a6)
+ clr.l SC+8(%a6)
+ fadd.x SC(%a6),%fp0
+ fmov.l %d0,%fpcr
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.d TWON140(%pc),%fp0
+ bra t_catch
+
+EM1POLY:
+#--Step 9 exp(X)-1 by a simple polynomial
+ fmov.x (%a0),%fp0 # fp0 is X
+ fmul.x %fp0,%fp0 # fp0 is S := X*X
+ fmovm.x &0xc,-(%sp) # save fp2 {%fp2/%fp3}
+ fmov.s &0x2F30CAA8,%fp1 # fp1 is B12
+ fmul.x %fp0,%fp1 # fp1 is S*B12
+ fmov.s &0x310F8290,%fp2 # fp2 is B11
+ fadd.s &0x32D73220,%fp1 # fp1 is B10+S*B12
+
+ fmul.x %fp0,%fp2 # fp2 is S*B11
+ fmul.x %fp0,%fp1 # fp1 is S*(B10 + ...
+
+ fadd.s &0x3493F281,%fp2 # fp2 is B9+S*...
+ fadd.d EM1B8(%pc),%fp1 # fp1 is B8+S*...
+
+ fmul.x %fp0,%fp2 # fp2 is S*(B9+...
+ fmul.x %fp0,%fp1 # fp1 is S*(B8+...
+
+ fadd.d EM1B7(%pc),%fp2 # fp2 is B7+S*...
+ fadd.d EM1B6(%pc),%fp1 # fp1 is B6+S*...
+
+ fmul.x %fp0,%fp2 # fp2 is S*(B7+...
+ fmul.x %fp0,%fp1 # fp1 is S*(B6+...
+
+ fadd.d EM1B5(%pc),%fp2 # fp2 is B5+S*...
+ fadd.d EM1B4(%pc),%fp1 # fp1 is B4+S*...
+
+ fmul.x %fp0,%fp2 # fp2 is S*(B5+...
+ fmul.x %fp0,%fp1 # fp1 is S*(B4+...
+
+ fadd.d EM1B3(%pc),%fp2 # fp2 is B3+S*...
+ fadd.x EM1B2(%pc),%fp1 # fp1 is B2+S*...
+
+ fmul.x %fp0,%fp2 # fp2 is S*(B3+...
+ fmul.x %fp0,%fp1 # fp1 is S*(B2+...
+
+ fmul.x %fp0,%fp2 # fp2 is S*S*(B3+...)
+ fmul.x (%a0),%fp1 # fp1 is X*S*(B2...
+
+ fmul.s &0x3F000000,%fp0 # fp0 is S*B1
+ fadd.x %fp2,%fp1 # fp1 is Q
+
+ fmovm.x (%sp)+,&0x30 # fp2 restored {%fp2/%fp3}
+
+ fadd.x %fp1,%fp0 # fp0 is S*B1+Q
+
+ fmov.l %d0,%fpcr
+ fadd.x (%a0),%fp0
+ bra t_inx2
+
+EM1BIG:
+#--Step 10 |X| > 70 log2
+ mov.l (%a0),%d1
+ cmp.l %d1,&0
+ bgt.w EXPC1
+#--Step 10.2
+ fmov.s &0xBF800000,%fp0 # fp0 is -1
+ fmov.l %d0,%fpcr
+ fadd.s &0x00800000,%fp0 # -1 + 2^(-126)
+ bra t_minx2
+
+ global setoxm1d
+setoxm1d:
+#--entry point for EXPM1(X), here X is denormalized
+#--Step 0.
+ bra t_extdnrm
+
+#########################################################################
+# sgetexp(): returns the exponent portion of the input argument. #
+# The exponent bias is removed and the exponent value is #
+# returned as an extended precision number in fp0. #
+# sgetexpd(): handles denormalized numbers. #
+# #
+# sgetman(): extracts the mantissa of the input argument. The #
+# mantissa is converted to an extended precision number w/ #
+# an exponent of $3fff and is returned in fp0. The range of #
+# the result is [1.0 - 2.0). #
+# sgetmand(): handles denormalized numbers. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = exponent(X) or mantissa(X) #
+# #
+#########################################################################
+
+ global sgetexp
+sgetexp:
+ mov.w SRC_EX(%a0),%d0 # get the exponent
+ bclr &0xf,%d0 # clear the sign bit
+ subi.w &0x3fff,%d0 # subtract off the bias
+ fmov.w %d0,%fp0 # return exp in fp0
+ blt.b sgetexpn # it's negative
+ rts
+
+sgetexpn:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+ global sgetexpd
+sgetexpd:
+ bsr.l norm # normalize
+ neg.w %d0 # new exp = -(shft amt)
+ subi.w &0x3fff,%d0 # subtract off the bias
+ fmov.w %d0,%fp0 # return exp in fp0
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+ global sgetman
+sgetman:
+ mov.w SRC_EX(%a0),%d0 # get the exp
+ ori.w &0x7fff,%d0 # clear old exp
+ bclr &0xe,%d0 # make it the new exp +-3fff
+
+# here, we build the result in a tmp location so as not to disturb the input
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy to tmp loc
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy to tmp loc
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmov.x FP_SCR0(%a6),%fp0 # put new value back in fp0
+ bmi.b sgetmann # it's negative
+ rts
+
+sgetmann:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+#
+# For denormalized numbers, shift the mantissa until the j-bit = 1,
+# then load the exponent with +/1 $3fff.
+#
+ global sgetmand
+sgetmand:
+ bsr.l norm # normalize exponent
+ bra.b sgetman
+
+#########################################################################
+# scosh(): computes the hyperbolic cosine of a normalized input #
+# scoshd(): computes the hyperbolic cosine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = cosh(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# COSH #
+# 1. If |X| > 16380 log2, go to 3. #
+# #
+# 2. (|X| <= 16380 log2) Cosh(X) is obtained by the formulae #
+# y = |X|, z = exp(Y), and #
+# cosh(X) = (1/2)*( z + 1/z ). #
+# Exit. #
+# #
+# 3. (|X| > 16380 log2). If |X| > 16480 log2, go to 5. #
+# #
+# 4. (16380 log2 < |X| <= 16480 log2) #
+# cosh(X) = sign(X) * exp(|X|)/2. #
+# However, invoking exp(|X|) may cause premature #
+# overflow. Thus, we calculate sinh(X) as follows: #
+# Y := |X| #
+# Fact := 2**(16380) #
+# Y' := Y - 16381 log2 #
+# cosh(X) := Fact * exp(Y'). #
+# Exit. #
+# #
+# 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
+# Huge*Huge to generate overflow and an infinity with #
+# the appropriate sign. Huge is the largest finite number #
+# in extended format. Exit. #
+# #
+#########################################################################
+
+TWO16380:
+ long 0x7FFB0000,0x80000000,0x00000000,0x00000000
+
+ global scosh
+scosh:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x400CB167
+ bgt.b COSHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--COSH(X) = (1/2) * ( EXP(X) + 1/EXP(X) )
+
+ fabs.x %fp0 # |X|
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save |X| to stack
+ lea (%sp),%a0 # pass ptr to |X|
+ bsr setox # FP0 IS EXP(|X|)
+ add.l &0xc,%sp # erase |X| from stack
+ fmul.s &0x3F000000,%fp0 # (1/2)EXP(|X|)
+ mov.l (%sp)+,%d0
+
+ fmov.s &0x3E800000,%fp1 # (1/4)
+ fdiv.x %fp0,%fp1 # 1/(2 EXP(|X|))
+
+ fmov.l %d0,%fpcr
+ mov.b &FADD_OP,%d1 # last inst is ADD
+ fadd.x %fp1,%fp0
+ bra t_catch
+
+COSHBIG:
+ cmp.l %d1,&0x400CB2B3
+ bgt.b COSHHUGE
+
+ fabs.x %fp0
+ fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
+ fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save fp0 to stack
+ lea (%sp),%a0 # pass ptr to fp0
+ bsr setox
+ add.l &0xc,%sp # clear fp0 from stack
+ mov.l (%sp)+,%d0
+
+ fmov.l %d0,%fpcr
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x TWO16380(%pc),%fp0
+ bra t_catch
+
+COSHHUGE:
+ bra t_ovfl2
+
+ global scoshd
+#--COSH(X) = 1 FOR DENORMALIZED X
+scoshd:
+ fmov.s &0x3F800000,%fp0
+
+ fmov.l %d0,%fpcr
+ fadd.s &0x00800000,%fp0
+ bra t_pinx2
+
+#########################################################################
+# ssinh(): computes the hyperbolic sine of a normalized input #
+# ssinhd(): computes the hyperbolic sine of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = sinh(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# SINH #
+# 1. If |X| > 16380 log2, go to 3. #
+# #
+# 2. (|X| <= 16380 log2) Sinh(X) is obtained by the formula #
+# y = |X|, sgn = sign(X), and z = expm1(Y), #
+# sinh(X) = sgn*(1/2)*( z + z/(1+z) ). #
+# Exit. #
+# #
+# 3. If |X| > 16480 log2, go to 5. #
+# #
+# 4. (16380 log2 < |X| <= 16480 log2) #
+# sinh(X) = sign(X) * exp(|X|)/2. #
+# However, invoking exp(|X|) may cause premature overflow. #
+# Thus, we calculate sinh(X) as follows: #
+# Y := |X| #
+# sgn := sign(X) #
+# sgnFact := sgn * 2**(16380) #
+# Y' := Y - 16381 log2 #
+# sinh(X) := sgnFact * exp(Y'). #
+# Exit. #
+# #
+# 5. (|X| > 16480 log2) sinh(X) must overflow. Return #
+# sign(X)*Huge*Huge to generate overflow and an infinity with #
+# the appropriate sign. Huge is the largest finite number in #
+# extended format. Exit. #
+# #
+#########################################################################
+
+ global ssinh
+ssinh:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ mov.l %d1,%a1 # save (compacted) operand
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x400CB167
+ bgt.b SINHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 16380 LOG2
+#--Y = |X|, Z = EXPM1(Y), SINH(X) = SIGN(X)*(1/2)*( Z + Z/(1+Z) )
+
+ fabs.x %fp0 # Y = |X|
+
+ movm.l &0x8040,-(%sp) # {a1/d0}
+ fmovm.x &0x01,-(%sp) # save Y on stack
+ lea (%sp),%a0 # pass ptr to Y
+ clr.l %d0
+ bsr setoxm1 # FP0 IS Z = EXPM1(Y)
+ add.l &0xc,%sp # clear Y from stack
+ fmov.l &0,%fpcr
+ movm.l (%sp)+,&0x0201 # {a1/d0}
+
+ fmov.x %fp0,%fp1
+ fadd.s &0x3F800000,%fp1 # 1+Z
+ fmov.x %fp0,-(%sp)
+ fdiv.x %fp1,%fp0 # Z/(1+Z)
+ mov.l %a1,%d1
+ and.l &0x80000000,%d1
+ or.l &0x3F000000,%d1
+ fadd.x (%sp)+,%fp0
+ mov.l %d1,-(%sp)
+
+ fmov.l %d0,%fpcr
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.s (%sp)+,%fp0 # last fp inst - possible exceptions set
+ bra t_catch
+
+SINHBIG:
+ cmp.l %d1,&0x400CB2B3
+ bgt t_ovfl
+ fabs.x %fp0
+ fsub.d T1(%pc),%fp0 # (|X|-16381LOG2_LEAD)
+ mov.l &0,-(%sp)
+ mov.l &0x80000000,-(%sp)
+ mov.l %a1,%d1
+ and.l &0x80000000,%d1
+ or.l &0x7FFB0000,%d1
+ mov.l %d1,-(%sp) # EXTENDED FMT
+ fsub.d T2(%pc),%fp0 # |X| - 16381 LOG2, ACCURATE
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save fp0 on stack
+ lea (%sp),%a0 # pass ptr to fp0
+ bsr setox
+ add.l &0xc,%sp # clear fp0 from stack
+
+ mov.l (%sp)+,%d0
+ fmov.l %d0,%fpcr
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x (%sp)+,%fp0 # possible exception
+ bra t_catch
+
+ global ssinhd
+#--SINH(X) = X FOR DENORMALIZED X
+ssinhd:
+ bra t_extdnrm
+
+#########################################################################
+# stanh(): computes the hyperbolic tangent of a normalized input #
+# stanhd(): computes the hyperbolic tangent of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = tanh(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# TANH #
+# 1. If |X| >= (5/2) log2 or |X| <= 2**(-40), go to 3. #
+# #
+# 2. (2**(-40) < |X| < (5/2) log2) Calculate tanh(X) by #
+# sgn := sign(X), y := 2|X|, z := expm1(Y), and #
+# tanh(X) = sgn*( z/(2+z) ). #
+# Exit. #
+# #
+# 3. (|X| <= 2**(-40) or |X| >= (5/2) log2). If |X| < 1, #
+# go to 7. #
+# #
+# 4. (|X| >= (5/2) log2) If |X| >= 50 log2, go to 6. #
+# #
+# 5. ((5/2) log2 <= |X| < 50 log2) Calculate tanh(X) by #
+# sgn := sign(X), y := 2|X|, z := exp(Y), #
+# tanh(X) = sgn - [ sgn*2/(1+z) ]. #
+# Exit. #
+# #
+# 6. (|X| >= 50 log2) Tanh(X) = +-1 (round to nearest). Thus, we #
+# calculate Tanh(X) by #
+# sgn := sign(X), Tiny := 2**(-126), #
+# tanh(X) := sgn - sgn*Tiny. #
+# Exit. #
+# #
+# 7. (|X| < 2**(-40)). Tanh(X) = X. Exit. #
+# #
+#########################################################################
+
+ set X,FP_SCR0
+ set XFRAC,X+4
+
+ set SGN,L_SCR3
+
+ set V,FP_SCR0
+
+ global stanh
+stanh:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+
+ fmov.x %fp0,X(%a6)
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ mov.l %d1,X(%a6)
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1, &0x3fd78000 # is |X| < 2^(-40)?
+ blt.w TANHBORS # yes
+ cmp.l %d1, &0x3fffddce # is |X| > (5/2)LOG2?
+ bgt.w TANHBORS # yes
+
+#--THIS IS THE USUAL CASE
+#--Y = 2|X|, Z = EXPM1(Y), TANH(X) = SIGN(X) * Z / (Z+2).
+
+ mov.l X(%a6),%d1
+ mov.l %d1,SGN(%a6)
+ and.l &0x7FFF0000,%d1
+ add.l &0x00010000,%d1 # EXPONENT OF 2|X|
+ mov.l %d1,X(%a6)
+ and.l &0x80000000,SGN(%a6)
+ fmov.x X(%a6),%fp0 # FP0 IS Y = 2|X|
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x1,-(%sp) # save Y on stack
+ lea (%sp),%a0 # pass ptr to Y
+ bsr setoxm1 # FP0 IS Z = EXPM1(Y)
+ add.l &0xc,%sp # clear Y from stack
+ mov.l (%sp)+,%d0
+
+ fmov.x %fp0,%fp1
+ fadd.s &0x40000000,%fp1 # Z+2
+ mov.l SGN(%a6),%d1
+ fmov.x %fp1,V(%a6)
+ eor.l %d1,V(%a6)
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ fdiv.x V(%a6),%fp0
+ bra t_inx2
+
+TANHBORS:
+ cmp.l %d1,&0x3FFF8000
+ blt.w TANHSM
+
+ cmp.l %d1,&0x40048AA1
+ bgt.w TANHHUGE
+
+#-- (5/2) LOG2 < |X| < 50 LOG2,
+#--TANH(X) = 1 - (2/[EXP(2X)+1]). LET Y = 2|X|, SGN = SIGN(X),
+#--TANH(X) = SGN - SGN*2/[EXP(Y)+1].
+
+ mov.l X(%a6),%d1
+ mov.l %d1,SGN(%a6)
+ and.l &0x7FFF0000,%d1
+ add.l &0x00010000,%d1 # EXPO OF 2|X|
+ mov.l %d1,X(%a6) # Y = 2|X|
+ and.l &0x80000000,SGN(%a6)
+ mov.l SGN(%a6),%d1
+ fmov.x X(%a6),%fp0 # Y = 2|X|
+
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ fmovm.x &0x01,-(%sp) # save Y on stack
+ lea (%sp),%a0 # pass ptr to Y
+ bsr setox # FP0 IS EXP(Y)
+ add.l &0xc,%sp # clear Y from stack
+ mov.l (%sp)+,%d0
+ mov.l SGN(%a6),%d1
+ fadd.s &0x3F800000,%fp0 # EXP(Y)+1
+
+ eor.l &0xC0000000,%d1 # -SIGN(X)*2
+ fmov.s %d1,%fp1 # -SIGN(X)*2 IN SGL FMT
+ fdiv.x %fp0,%fp1 # -SIGN(X)2 / [EXP(Y)+1 ]
+
+ mov.l SGN(%a6),%d1
+ or.l &0x3F800000,%d1 # SGN
+ fmov.s %d1,%fp0 # SGN IN SGL FMT
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ mov.b &FADD_OP,%d1 # last inst is ADD
+ fadd.x %fp1,%fp0
+ bra t_inx2
+
+TANHSM:
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x X(%a6),%fp0 # last inst - possible exception set
+ bra t_catch
+
+#---RETURN SGN(X) - SGN(X)EPS
+TANHHUGE:
+ mov.l X(%a6),%d1
+ and.l &0x80000000,%d1
+ or.l &0x3F800000,%d1
+ fmov.s %d1,%fp0
+ and.l &0x80000000,%d1
+ eor.l &0x80800000,%d1 # -SIGN(X)*EPS
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ fadd.s %d1,%fp0
+ bra t_inx2
+
+ global stanhd
+#--TANH(X) = X FOR DENORMALIZED X
+stanhd:
+ bra t_extdnrm
+
+#########################################################################
+# slogn(): computes the natural logarithm of a normalized input #
+# slognd(): computes the natural logarithm of a denormalized input #
+# slognp1(): computes the log(1+X) of a normalized input #
+# slognp1d(): computes the log(1+X) of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = log(X) or log(1+X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 2 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# LOGN: #
+# Step 1. If |X-1| < 1/16, approximate log(X) by an odd #
+# polynomial in u, where u = 2(X-1)/(X+1). Otherwise, #
+# move on to Step 2. #
+# #
+# Step 2. X = 2**k * Y where 1 <= Y < 2. Define F to be the first #
+# seven significant bits of Y plus 2**(-7), i.e. #
+# F = 1.xxxxxx1 in base 2 where the six "x" match those #
+# of Y. Note that |Y-F| <= 2**(-7). #
+# #
+# Step 3. Define u = (Y-F)/F. Approximate log(1+u) by a #
+# polynomial in u, log(1+u) = poly. #
+# #
+# Step 4. Reconstruct #
+# log(X) = log( 2**k * Y ) = k*log(2) + log(F) + log(1+u) #
+# by k*log(2) + (log(F) + poly). The values of log(F) are #
+# calculated beforehand and stored in the program. #
+# #
+# lognp1: #
+# Step 1: If |X| < 1/16, approximate log(1+X) by an odd #
+# polynomial in u where u = 2X/(2+X). Otherwise, move on #
+# to Step 2. #
+# #
+# Step 2: Let 1+X = 2**k * Y, where 1 <= Y < 2. Define F as done #
+# in Step 2 of the algorithm for LOGN and compute #
+# log(1+X) as k*log(2) + log(F) + poly where poly #
+# approximates log(1+u), u = (Y-F)/F. #
+# #
+# Implementation Notes: #
+# Note 1. There are 64 different possible values for F, thus 64 #
+# log(F)'s need to be tabulated. Moreover, the values of #
+# 1/F are also tabulated so that the division in (Y-F)/F #
+# can be performed by a multiplication. #
+# #
+# Note 2. In Step 2 of lognp1, in order to preserved accuracy, #
+# the value Y-F has to be calculated carefully when #
+# 1/2 <= X < 3/2. #
+# #
+# Note 3. To fully exploit the pipeline, polynomials are usually #
+# separated into two parts evaluated independently before #
+# being added up. #
+# #
+#########################################################################
+LOGOF2:
+ long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+one:
+ long 0x3F800000
+zero:
+ long 0x00000000
+infty:
+ long 0x7F800000
+negone:
+ long 0xBF800000
+
+LOGA6:
+ long 0x3FC2499A,0xB5E4040B
+LOGA5:
+ long 0xBFC555B5,0x848CB7DB
+
+LOGA4:
+ long 0x3FC99999,0x987D8730
+LOGA3:
+ long 0xBFCFFFFF,0xFF6F7E97
+
+LOGA2:
+ long 0x3FD55555,0x555555A4
+LOGA1:
+ long 0xBFE00000,0x00000008
+
+LOGB5:
+ long 0x3F175496,0xADD7DAD6
+LOGB4:
+ long 0x3F3C71C2,0xFE80C7E0
+
+LOGB3:
+ long 0x3F624924,0x928BCCFF
+LOGB2:
+ long 0x3F899999,0x999995EC
+
+LOGB1:
+ long 0x3FB55555,0x55555555
+TWO:
+ long 0x40000000,0x00000000
+
+LTHOLD:
+ long 0x3f990000,0x80000000,0x00000000,0x00000000
+
+LOGTBL:
+ long 0x3FFE0000,0xFE03F80F,0xE03F80FE,0x00000000
+ long 0x3FF70000,0xFF015358,0x833C47E2,0x00000000
+ long 0x3FFE0000,0xFA232CF2,0x52138AC0,0x00000000
+ long 0x3FF90000,0xBDC8D83E,0xAD88D549,0x00000000
+ long 0x3FFE0000,0xF6603D98,0x0F6603DA,0x00000000
+ long 0x3FFA0000,0x9CF43DCF,0xF5EAFD48,0x00000000
+ long 0x3FFE0000,0xF2B9D648,0x0F2B9D65,0x00000000
+ long 0x3FFA0000,0xDA16EB88,0xCB8DF614,0x00000000
+ long 0x3FFE0000,0xEF2EB71F,0xC4345238,0x00000000
+ long 0x3FFB0000,0x8B29B775,0x1BD70743,0x00000000
+ long 0x3FFE0000,0xEBBDB2A5,0xC1619C8C,0x00000000
+ long 0x3FFB0000,0xA8D839F8,0x30C1FB49,0x00000000
+ long 0x3FFE0000,0xE865AC7B,0x7603A197,0x00000000
+ long 0x3FFB0000,0xC61A2EB1,0x8CD907AD,0x00000000
+ long 0x3FFE0000,0xE525982A,0xF70C880E,0x00000000
+ long 0x3FFB0000,0xE2F2A47A,0xDE3A18AF,0x00000000
+ long 0x3FFE0000,0xE1FC780E,0x1FC780E2,0x00000000
+ long 0x3FFB0000,0xFF64898E,0xDF55D551,0x00000000
+ long 0x3FFE0000,0xDEE95C4C,0xA037BA57,0x00000000
+ long 0x3FFC0000,0x8DB956A9,0x7B3D0148,0x00000000
+ long 0x3FFE0000,0xDBEB61EE,0xD19C5958,0x00000000
+ long 0x3FFC0000,0x9B8FE100,0xF47BA1DE,0x00000000
+ long 0x3FFE0000,0xD901B203,0x6406C80E,0x00000000
+ long 0x3FFC0000,0xA9372F1D,0x0DA1BD17,0x00000000
+ long 0x3FFE0000,0xD62B80D6,0x2B80D62C,0x00000000
+ long 0x3FFC0000,0xB6B07F38,0xCE90E46B,0x00000000
+ long 0x3FFE0000,0xD3680D36,0x80D3680D,0x00000000
+ long 0x3FFC0000,0xC3FD0329,0x06488481,0x00000000
+ long 0x3FFE0000,0xD0B69FCB,0xD2580D0B,0x00000000
+ long 0x3FFC0000,0xD11DE0FF,0x15AB18CA,0x00000000
+ long 0x3FFE0000,0xCE168A77,0x25080CE1,0x00000000
+ long 0x3FFC0000,0xDE1433A1,0x6C66B150,0x00000000
+ long 0x3FFE0000,0xCB8727C0,0x65C393E0,0x00000000
+ long 0x3FFC0000,0xEAE10B5A,0x7DDC8ADD,0x00000000
+ long 0x3FFE0000,0xC907DA4E,0x871146AD,0x00000000
+ long 0x3FFC0000,0xF7856E5E,0xE2C9B291,0x00000000
+ long 0x3FFE0000,0xC6980C69,0x80C6980C,0x00000000
+ long 0x3FFD0000,0x82012CA5,0xA68206D7,0x00000000
+ long 0x3FFE0000,0xC4372F85,0x5D824CA6,0x00000000
+ long 0x3FFD0000,0x882C5FCD,0x7256A8C5,0x00000000
+ long 0x3FFE0000,0xC1E4BBD5,0x95F6E947,0x00000000
+ long 0x3FFD0000,0x8E44C60B,0x4CCFD7DE,0x00000000
+ long 0x3FFE0000,0xBFA02FE8,0x0BFA02FF,0x00000000
+ long 0x3FFD0000,0x944AD09E,0xF4351AF6,0x00000000
+ long 0x3FFE0000,0xBD691047,0x07661AA3,0x00000000
+ long 0x3FFD0000,0x9A3EECD4,0xC3EAA6B2,0x00000000
+ long 0x3FFE0000,0xBB3EE721,0xA54D880C,0x00000000
+ long 0x3FFD0000,0xA0218434,0x353F1DE8,0x00000000
+ long 0x3FFE0000,0xB92143FA,0x36F5E02E,0x00000000
+ long 0x3FFD0000,0xA5F2FCAB,0xBBC506DA,0x00000000
+ long 0x3FFE0000,0xB70FBB5A,0x19BE3659,0x00000000
+ long 0x3FFD0000,0xABB3B8BA,0x2AD362A5,0x00000000
+ long 0x3FFE0000,0xB509E68A,0x9B94821F,0x00000000
+ long 0x3FFD0000,0xB1641795,0xCE3CA97B,0x00000000
+ long 0x3FFE0000,0xB30F6352,0x8917C80B,0x00000000
+ long 0x3FFD0000,0xB7047551,0x5D0F1C61,0x00000000
+ long 0x3FFE0000,0xB11FD3B8,0x0B11FD3C,0x00000000
+ long 0x3FFD0000,0xBC952AFE,0xEA3D13E1,0x00000000
+ long 0x3FFE0000,0xAF3ADDC6,0x80AF3ADE,0x00000000
+ long 0x3FFD0000,0xC2168ED0,0xF458BA4A,0x00000000
+ long 0x3FFE0000,0xAD602B58,0x0AD602B6,0x00000000
+ long 0x3FFD0000,0xC788F439,0xB3163BF1,0x00000000
+ long 0x3FFE0000,0xAB8F69E2,0x8359CD11,0x00000000
+ long 0x3FFD0000,0xCCECAC08,0xBF04565D,0x00000000
+ long 0x3FFE0000,0xA9C84A47,0xA07F5638,0x00000000
+ long 0x3FFD0000,0xD2420487,0x2DD85160,0x00000000
+ long 0x3FFE0000,0xA80A80A8,0x0A80A80B,0x00000000
+ long 0x3FFD0000,0xD7894992,0x3BC3588A,0x00000000
+ long 0x3FFE0000,0xA655C439,0x2D7B73A8,0x00000000
+ long 0x3FFD0000,0xDCC2C4B4,0x9887DACC,0x00000000
+ long 0x3FFE0000,0xA4A9CF1D,0x96833751,0x00000000
+ long 0x3FFD0000,0xE1EEBD3E,0x6D6A6B9E,0x00000000
+ long 0x3FFE0000,0xA3065E3F,0xAE7CD0E0,0x00000000
+ long 0x3FFD0000,0xE70D785C,0x2F9F5BDC,0x00000000
+ long 0x3FFE0000,0xA16B312E,0xA8FC377D,0x00000000
+ long 0x3FFD0000,0xEC1F392C,0x5179F283,0x00000000
+ long 0x3FFE0000,0x9FD809FD,0x809FD80A,0x00000000
+ long 0x3FFD0000,0xF12440D3,0xE36130E6,0x00000000
+ long 0x3FFE0000,0x9E4CAD23,0xDD5F3A20,0x00000000
+ long 0x3FFD0000,0xF61CCE92,0x346600BB,0x00000000
+ long 0x3FFE0000,0x9CC8E160,0xC3FB19B9,0x00000000
+ long 0x3FFD0000,0xFB091FD3,0x8145630A,0x00000000
+ long 0x3FFE0000,0x9B4C6F9E,0xF03A3CAA,0x00000000
+ long 0x3FFD0000,0xFFE97042,0xBFA4C2AD,0x00000000
+ long 0x3FFE0000,0x99D722DA,0xBDE58F06,0x00000000
+ long 0x3FFE0000,0x825EFCED,0x49369330,0x00000000
+ long 0x3FFE0000,0x9868C809,0x868C8098,0x00000000
+ long 0x3FFE0000,0x84C37A7A,0xB9A905C9,0x00000000
+ long 0x3FFE0000,0x97012E02,0x5C04B809,0x00000000
+ long 0x3FFE0000,0x87224C2E,0x8E645FB7,0x00000000
+ long 0x3FFE0000,0x95A02568,0x095A0257,0x00000000
+ long 0x3FFE0000,0x897B8CAC,0x9F7DE298,0x00000000
+ long 0x3FFE0000,0x94458094,0x45809446,0x00000000
+ long 0x3FFE0000,0x8BCF55DE,0xC4CD05FE,0x00000000
+ long 0x3FFE0000,0x92F11384,0x0497889C,0x00000000
+ long 0x3FFE0000,0x8E1DC0FB,0x89E125E5,0x00000000
+ long 0x3FFE0000,0x91A2B3C4,0xD5E6F809,0x00000000
+ long 0x3FFE0000,0x9066E68C,0x955B6C9B,0x00000000
+ long 0x3FFE0000,0x905A3863,0x3E06C43B,0x00000000
+ long 0x3FFE0000,0x92AADE74,0xC7BE59E0,0x00000000
+ long 0x3FFE0000,0x8F1779D9,0xFDC3A219,0x00000000
+ long 0x3FFE0000,0x94E9BFF6,0x15845643,0x00000000
+ long 0x3FFE0000,0x8DDA5202,0x37694809,0x00000000
+ long 0x3FFE0000,0x9723A1B7,0x20134203,0x00000000
+ long 0x3FFE0000,0x8CA29C04,0x6514E023,0x00000000
+ long 0x3FFE0000,0x995899C8,0x90EB8990,0x00000000
+ long 0x3FFE0000,0x8B70344A,0x139BC75A,0x00000000
+ long 0x3FFE0000,0x9B88BDAA,0x3A3DAE2F,0x00000000
+ long 0x3FFE0000,0x8A42F870,0x5669DB46,0x00000000
+ long 0x3FFE0000,0x9DB4224F,0xFFE1157C,0x00000000
+ long 0x3FFE0000,0x891AC73A,0xE9819B50,0x00000000
+ long 0x3FFE0000,0x9FDADC26,0x8B7A12DA,0x00000000
+ long 0x3FFE0000,0x87F78087,0xF78087F8,0x00000000
+ long 0x3FFE0000,0xA1FCFF17,0xCE733BD4,0x00000000
+ long 0x3FFE0000,0x86D90544,0x7A34ACC6,0x00000000
+ long 0x3FFE0000,0xA41A9E8F,0x5446FB9F,0x00000000
+ long 0x3FFE0000,0x85BF3761,0x2CEE3C9B,0x00000000
+ long 0x3FFE0000,0xA633CD7E,0x6771CD8B,0x00000000
+ long 0x3FFE0000,0x84A9F9C8,0x084A9F9D,0x00000000
+ long 0x3FFE0000,0xA8489E60,0x0B435A5E,0x00000000
+ long 0x3FFE0000,0x83993052,0x3FBE3368,0x00000000
+ long 0x3FFE0000,0xAA59233C,0xCCA4BD49,0x00000000
+ long 0x3FFE0000,0x828CBFBE,0xB9A020A3,0x00000000
+ long 0x3FFE0000,0xAC656DAE,0x6BCC4985,0x00000000
+ long 0x3FFE0000,0x81848DA8,0xFAF0D277,0x00000000
+ long 0x3FFE0000,0xAE6D8EE3,0x60BB2468,0x00000000
+ long 0x3FFE0000,0x80808080,0x80808081,0x00000000
+ long 0x3FFE0000,0xB07197A2,0x3C46C654,0x00000000
+
+ set ADJK,L_SCR1
+
+ set X,FP_SCR0
+ set XDCARE,X+2
+ set XFRAC,X+4
+
+ set F,FP_SCR1
+ set FFRAC,F+4
+
+ set KLOG2,FP_SCR0
+
+ set SAVEU,FP_SCR0
+
+ global slogn
+#--ENTRY POINT FOR LOG(X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slogn:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ mov.l &0x00000000,ADJK(%a6)
+
+LOGBGN:
+#--FPCR SAVED AND CLEARED, INPUT IS 2^(ADJK)*FP0, FP0 CONTAINS
+#--A FINITE, NON-ZERO, NORMALIZED NUMBER.
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+
+ mov.l (%a0),X(%a6)
+ mov.l 4(%a0),X+4(%a6)
+ mov.l 8(%a0),X+8(%a6)
+
+ cmp.l %d1,&0 # CHECK IF X IS NEGATIVE
+ blt.w LOGNEG # LOG OF NEGATIVE ARGUMENT IS INVALID
+# X IS POSITIVE, CHECK IF X IS NEAR 1
+ cmp.l %d1,&0x3ffef07d # IS X < 15/16?
+ blt.b LOGMAIN # YES
+ cmp.l %d1,&0x3fff8841 # IS X > 17/16?
+ ble.w LOGNEAR1 # NO
+
+LOGMAIN:
+#--THIS SHOULD BE THE USUAL CASE, X NOT VERY CLOSE TO 1
+
+#--X = 2^(K) * Y, 1 <= Y < 2. THUS, Y = 1.XXXXXXXX....XX IN BINARY.
+#--WE DEFINE F = 1.XXXXXX1, I.E. FIRST 7 BITS OF Y AND ATTACH A 1.
+#--THE IDEA IS THAT LOG(X) = K*LOG2 + LOG(Y)
+#-- = K*LOG2 + LOG(F) + LOG(1 + (Y-F)/F).
+#--NOTE THAT U = (Y-F)/F IS VERY SMALL AND THUS APPROXIMATING
+#--LOG(1+U) CAN BE VERY EFFICIENT.
+#--ALSO NOTE THAT THE VALUE 1/F IS STORED IN A TABLE SO THAT NO
+#--DIVISION IS NEEDED TO CALCULATE (Y-F)/F.
+
+#--GET K, Y, F, AND ADDRESS OF 1/F.
+ asr.l &8,%d1
+ asr.l &8,%d1 # SHIFTED 16 BITS, BIASED EXPO. OF X
+ sub.l &0x3FFF,%d1 # THIS IS K
+ add.l ADJK(%a6),%d1 # ADJUST K, ORIGINAL INPUT MAY BE DENORM.
+ lea LOGTBL(%pc),%a0 # BASE ADDRESS OF 1/F AND LOG(F)
+ fmov.l %d1,%fp1 # CONVERT K TO FLOATING-POINT FORMAT
+
+#--WHILE THE CONVERSION IS GOING ON, WE GET F AND ADDRESS OF 1/F
+ mov.l &0x3FFF0000,X(%a6) # X IS NOW Y, I.E. 2^(-K)*X
+ mov.l XFRAC(%a6),FFRAC(%a6)
+ and.l &0xFE000000,FFRAC(%a6) # FIRST 7 BITS OF Y
+ or.l &0x01000000,FFRAC(%a6) # GET F: ATTACH A 1 AT THE EIGHTH BIT
+ mov.l FFRAC(%a6),%d1 # READY TO GET ADDRESS OF 1/F
+ and.l &0x7E000000,%d1
+ asr.l &8,%d1
+ asr.l &8,%d1
+ asr.l &4,%d1 # SHIFTED 20, D0 IS THE DISPLACEMENT
+ add.l %d1,%a0 # A0 IS THE ADDRESS FOR 1/F
+
+ fmov.x X(%a6),%fp0
+ mov.l &0x3fff0000,F(%a6)
+ clr.l F+8(%a6)
+ fsub.x F(%a6),%fp0 # Y-F
+ fmovm.x &0xc,-(%sp) # SAVE FP2-3 WHILE FP0 IS NOT READY
+#--SUMMARY: FP0 IS Y-F, A0 IS ADDRESS OF 1/F, FP1 IS K
+#--REGISTERS SAVED: FPCR, FP1, FP2
+
+LP1CONT1:
+#--AN RE-ENTRY POINT FOR LOGNP1
+ fmul.x (%a0),%fp0 # FP0 IS U = (Y-F)/F
+ fmul.x LOGOF2(%pc),%fp1 # GET K*LOG2 WHILE FP0 IS NOT READY
+ fmov.x %fp0,%fp2
+ fmul.x %fp2,%fp2 # FP2 IS V=U*U
+ fmov.x %fp1,KLOG2(%a6) # PUT K*LOG2 IN MEMEORY, FREE FP1
+
+#--LOG(1+U) IS APPROXIMATED BY
+#--U + V*(A1+U*(A2+U*(A3+U*(A4+U*(A5+U*A6))))) WHICH IS
+#--[U + V*(A1+V*(A3+V*A5))] + [U*V*(A2+V*(A4+V*A6))]
+
+ fmov.x %fp2,%fp3
+ fmov.x %fp2,%fp1
+
+ fmul.d LOGA6(%pc),%fp1 # V*A6
+ fmul.d LOGA5(%pc),%fp2 # V*A5
+
+ fadd.d LOGA4(%pc),%fp1 # A4+V*A6
+ fadd.d LOGA3(%pc),%fp2 # A3+V*A5
+
+ fmul.x %fp3,%fp1 # V*(A4+V*A6)
+ fmul.x %fp3,%fp2 # V*(A3+V*A5)
+
+ fadd.d LOGA2(%pc),%fp1 # A2+V*(A4+V*A6)
+ fadd.d LOGA1(%pc),%fp2 # A1+V*(A3+V*A5)
+
+ fmul.x %fp3,%fp1 # V*(A2+V*(A4+V*A6))
+ add.l &16,%a0 # ADDRESS OF LOG(F)
+ fmul.x %fp3,%fp2 # V*(A1+V*(A3+V*A5))
+
+ fmul.x %fp0,%fp1 # U*V*(A2+V*(A4+V*A6))
+ fadd.x %fp2,%fp0 # U+V*(A1+V*(A3+V*A5))
+
+ fadd.x (%a0),%fp1 # LOG(F)+U*V*(A2+V*(A4+V*A6))
+ fmovm.x (%sp)+,&0x30 # RESTORE FP2-3
+ fadd.x %fp1,%fp0 # FP0 IS LOG(F) + LOG(1+U)
+
+ fmov.l %d0,%fpcr
+ fadd.x KLOG2(%a6),%fp0 # FINAL ADD
+ bra t_inx2
+
+
+LOGNEAR1:
+
+# if the input is exactly equal to one, then exit through ld_pzero.
+# if these 2 lines weren't here, the correct answer would be returned
+# but the INEX2 bit would be set.
+ fcmp.b %fp0,&0x1 # is it equal to one?
+ fbeq.l ld_pzero # yes
+
+#--REGISTERS SAVED: FPCR, FP1. FP0 CONTAINS THE INPUT.
+ fmov.x %fp0,%fp1
+ fsub.s one(%pc),%fp1 # FP1 IS X-1
+ fadd.s one(%pc),%fp0 # FP0 IS X+1
+ fadd.x %fp1,%fp1 # FP1 IS 2(X-1)
+#--LOG(X) = LOG(1+U/2)-LOG(1-U/2) WHICH IS AN ODD POLYNOMIAL
+#--IN U, U = 2(X-1)/(X+1) = FP1/FP0
+
+LP1CONT2:
+#--THIS IS AN RE-ENTRY POINT FOR LOGNP1
+ fdiv.x %fp0,%fp1 # FP1 IS U
+ fmovm.x &0xc,-(%sp) # SAVE FP2-3
+#--REGISTERS SAVED ARE NOW FPCR,FP1,FP2,FP3
+#--LET V=U*U, W=V*V, CALCULATE
+#--U + U*V*(B1 + V*(B2 + V*(B3 + V*(B4 + V*B5)))) BY
+#--U + U*V*( [B1 + W*(B3 + W*B5)] + [V*(B2 + W*B4)] )
+ fmov.x %fp1,%fp0
+ fmul.x %fp0,%fp0 # FP0 IS V
+ fmov.x %fp1,SAVEU(%a6) # STORE U IN MEMORY, FREE FP1
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS W
+
+ fmov.d LOGB5(%pc),%fp3
+ fmov.d LOGB4(%pc),%fp2
+
+ fmul.x %fp1,%fp3 # W*B5
+ fmul.x %fp1,%fp2 # W*B4
+
+ fadd.d LOGB3(%pc),%fp3 # B3+W*B5
+ fadd.d LOGB2(%pc),%fp2 # B2+W*B4
+
+ fmul.x %fp3,%fp1 # W*(B3+W*B5), FP3 RELEASED
+
+ fmul.x %fp0,%fp2 # V*(B2+W*B4)
+
+ fadd.d LOGB1(%pc),%fp1 # B1+W*(B3+W*B5)
+ fmul.x SAVEU(%a6),%fp0 # FP0 IS U*V
+
+ fadd.x %fp2,%fp1 # B1+W*(B3+W*B5) + V*(B2+W*B4), FP2 RELEASED
+ fmovm.x (%sp)+,&0x30 # FP2-3 RESTORED
+
+ fmul.x %fp1,%fp0 # U*V*( [B1+W*(B3+W*B5)] + [V*(B2+W*B4)] )
+
+ fmov.l %d0,%fpcr
+ fadd.x SAVEU(%a6),%fp0
+ bra t_inx2
+
+#--REGISTERS SAVED FPCR. LOG(-VE) IS INVALID
+LOGNEG:
+ bra t_operr
+
+ global slognd
+slognd:
+#--ENTRY POINT FOR LOG(X) FOR DENORMALIZED INPUT
+
+ mov.l &-100,ADJK(%a6) # INPUT = 2^(ADJK) * FP0
+
+#----normalize the input value by left shifting k bits (k to be determined
+#----below), adjusting exponent and storing -k to ADJK
+#----the value TWOTO100 is no longer needed.
+#----Note that this code assumes the denormalized input is NON-ZERO.
+
+ movm.l &0x3f00,-(%sp) # save some registers {d2-d7}
+ mov.l (%a0),%d3 # D3 is exponent of smallest norm. #
+ mov.l 4(%a0),%d4
+ mov.l 8(%a0),%d5 # (D4,D5) is (Hi_X,Lo_X)
+ clr.l %d2 # D2 used for holding K
+
+ tst.l %d4
+ bne.b Hi_not0
+
+Hi_0:
+ mov.l %d5,%d4
+ clr.l %d5
+ mov.l &32,%d2
+ clr.l %d6
+ bfffo %d4{&0:&32},%d6
+ lsl.l %d6,%d4
+ add.l %d6,%d2 # (D3,D4,D5) is normalized
+
+ mov.l %d3,X(%a6)
+ mov.l %d4,XFRAC(%a6)
+ mov.l %d5,XFRAC+4(%a6)
+ neg.l %d2
+ mov.l %d2,ADJK(%a6)
+ fmov.x X(%a6),%fp0
+ movm.l (%sp)+,&0xfc # restore registers {d2-d7}
+ lea X(%a6),%a0
+ bra.w LOGBGN # begin regular log(X)
+
+Hi_not0:
+ clr.l %d6
+ bfffo %d4{&0:&32},%d6 # find first 1
+ mov.l %d6,%d2 # get k
+ lsl.l %d6,%d4
+ mov.l %d5,%d7 # a copy of D5
+ lsl.l %d6,%d5
+ neg.l %d6
+ add.l &32,%d6
+ lsr.l %d6,%d7
+ or.l %d7,%d4 # (D3,D4,D5) normalized
+
+ mov.l %d3,X(%a6)
+ mov.l %d4,XFRAC(%a6)
+ mov.l %d5,XFRAC+4(%a6)
+ neg.l %d2
+ mov.l %d2,ADJK(%a6)
+ fmov.x X(%a6),%fp0
+ movm.l (%sp)+,&0xfc # restore registers {d2-d7}
+ lea X(%a6),%a0
+ bra.w LOGBGN # begin regular log(X)
+
+ global slognp1
+#--ENTRY POINT FOR LOG(1+X) FOR X FINITE, NON-ZERO, NOT NAN'S
+slognp1:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ fabs.x %fp0 # test magnitude
+ fcmp.x %fp0,LTHOLD(%pc) # compare with min threshold
+ fbgt.w LP1REAL # if greater, continue
+ fmov.l %d0,%fpcr
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x (%a0),%fp0 # return signed argument
+ bra t_catch
+
+LP1REAL:
+ fmov.x (%a0),%fp0 # LOAD INPUT
+ mov.l &0x00000000,ADJK(%a6)
+ fmov.x %fp0,%fp1 # FP1 IS INPUT Z
+ fadd.s one(%pc),%fp0 # X := ROUND(1+Z)
+ fmov.x %fp0,X(%a6)
+ mov.w XFRAC(%a6),XDCARE(%a6)
+ mov.l X(%a6),%d1
+ cmp.l %d1,&0
+ ble.w LP1NEG0 # LOG OF ZERO OR -VE
+ cmp.l %d1,&0x3ffe8000 # IS BOUNDS [1/2,3/2]?
+ blt.w LOGMAIN
+ cmp.l %d1,&0x3fffc000
+ bgt.w LOGMAIN
+#--IF 1+Z > 3/2 OR 1+Z < 1/2, THEN X, WHICH IS ROUNDING 1+Z,
+#--CONTAINS AT LEAST 63 BITS OF INFORMATION OF Z. IN THAT CASE,
+#--SIMPLY INVOKE LOG(X) FOR LOG(1+Z).
+
+LP1NEAR1:
+#--NEXT SEE IF EXP(-1/16) < X < EXP(1/16)
+ cmp.l %d1,&0x3ffef07d
+ blt.w LP1CARE
+ cmp.l %d1,&0x3fff8841
+ bgt.w LP1CARE
+
+LP1ONE16:
+#--EXP(-1/16) < X < EXP(1/16). LOG(1+Z) = LOG(1+U/2) - LOG(1-U/2)
+#--WHERE U = 2Z/(2+Z) = 2Z/(1+X).
+ fadd.x %fp1,%fp1 # FP1 IS 2Z
+ fadd.s one(%pc),%fp0 # FP0 IS 1+X
+#--U = FP1/FP0
+ bra.w LP1CONT2
+
+LP1CARE:
+#--HERE WE USE THE USUAL TABLE DRIVEN APPROACH. CARE HAS TO BE
+#--TAKEN BECAUSE 1+Z CAN HAVE 67 BITS OF INFORMATION AND WE MUST
+#--PRESERVE ALL THE INFORMATION. BECAUSE 1+Z IS IN [1/2,3/2],
+#--THERE ARE ONLY TWO CASES.
+#--CASE 1: 1+Z < 1, THEN K = -1 AND Y-F = (2-F) + 2Z
+#--CASE 2: 1+Z > 1, THEN K = 0 AND Y-F = (1-F) + Z
+#--ON RETURNING TO LP1CONT1, WE MUST HAVE K IN FP1, ADDRESS OF
+#--(1/F) IN A0, Y-F IN FP0, AND FP2 SAVED.
+
+ mov.l XFRAC(%a6),FFRAC(%a6)
+ and.l &0xFE000000,FFRAC(%a6)
+ or.l &0x01000000,FFRAC(%a6) # F OBTAINED
+ cmp.l %d1,&0x3FFF8000 # SEE IF 1+Z > 1
+ bge.b KISZERO
+
+KISNEG1:
+ fmov.s TWO(%pc),%fp0
+ mov.l &0x3fff0000,F(%a6)
+ clr.l F+8(%a6)
+ fsub.x F(%a6),%fp0 # 2-F
+ mov.l FFRAC(%a6),%d1
+ and.l &0x7E000000,%d1
+ asr.l &8,%d1
+ asr.l &8,%d1
+ asr.l &4,%d1 # D0 CONTAINS DISPLACEMENT FOR 1/F
+ fadd.x %fp1,%fp1 # GET 2Z
+ fmovm.x &0xc,-(%sp) # SAVE FP2 {%fp2/%fp3}
+ fadd.x %fp1,%fp0 # FP0 IS Y-F = (2-F)+2Z
+ lea LOGTBL(%pc),%a0 # A0 IS ADDRESS OF 1/F
+ add.l %d1,%a0
+ fmov.s negone(%pc),%fp1 # FP1 IS K = -1
+ bra.w LP1CONT1
+
+KISZERO:
+ fmov.s one(%pc),%fp0
+ mov.l &0x3fff0000,F(%a6)
+ clr.l F+8(%a6)
+ fsub.x F(%a6),%fp0 # 1-F
+ mov.l FFRAC(%a6),%d1
+ and.l &0x7E000000,%d1
+ asr.l &8,%d1
+ asr.l &8,%d1
+ asr.l &4,%d1
+ fadd.x %fp1,%fp0 # FP0 IS Y-F
+ fmovm.x &0xc,-(%sp) # FP2 SAVED {%fp2/%fp3}
+ lea LOGTBL(%pc),%a0
+ add.l %d1,%a0 # A0 IS ADDRESS OF 1/F
+ fmov.s zero(%pc),%fp1 # FP1 IS K = 0
+ bra.w LP1CONT1
+
+LP1NEG0:
+#--FPCR SAVED. D0 IS X IN COMPACT FORM.
+ cmp.l %d1,&0
+ blt.b LP1NEG
+LP1ZERO:
+ fmov.s negone(%pc),%fp0
+
+ fmov.l %d0,%fpcr
+ bra t_dz
+
+LP1NEG:
+ fmov.s zero(%pc),%fp0
+
+ fmov.l %d0,%fpcr
+ bra t_operr
+
+ global slognp1d
+#--ENTRY POINT FOR LOG(1+Z) FOR DENORMALIZED INPUT
+# Simply return the denorm
+slognp1d:
+ bra t_extdnrm
+
+#########################################################################
+# satanh(): computes the inverse hyperbolic tangent of a norm input #
+# satanhd(): computes the inverse hyperbolic tangent of a denorm input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = arctanh(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 3 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# ATANH #
+# 1. If |X| >= 1, go to 3. #
+# #
+# 2. (|X| < 1) Calculate atanh(X) by #
+# sgn := sign(X) #
+# y := |X| #
+# z := 2y/(1-y) #
+# atanh(X) := sgn * (1/2) * logp1(z) #
+# Exit. #
+# #
+# 3. If |X| > 1, go to 5. #
+# #
+# 4. (|X| = 1) Generate infinity with an appropriate sign and #
+# divide-by-zero by #
+# sgn := sign(X) #
+# atan(X) := sgn / (+0). #
+# Exit. #
+# #
+# 5. (|X| > 1) Generate an invalid operation by 0 * infinity. #
+# Exit. #
+# #
+#########################################################################
+
+ global satanh
+satanh:
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ cmp.l %d1,&0x3FFF8000
+ bge.b ATANHBIG
+
+#--THIS IS THE USUAL CASE, |X| < 1
+#--Y = |X|, Z = 2Y/(1-Y), ATANH(X) = SIGN(X) * (1/2) * LOG1P(Z).
+
+ fabs.x (%a0),%fp0 # Y = |X|
+ fmov.x %fp0,%fp1
+ fneg.x %fp1 # -Y
+ fadd.x %fp0,%fp0 # 2Y
+ fadd.s &0x3F800000,%fp1 # 1-Y
+ fdiv.x %fp1,%fp0 # 2Y/(1-Y)
+ mov.l (%a0),%d1
+ and.l &0x80000000,%d1
+ or.l &0x3F000000,%d1 # SIGN(X)*HALF
+ mov.l %d1,-(%sp)
+
+ mov.l %d0,-(%sp) # save rnd prec,mode
+ clr.l %d0 # pass ext prec,RN
+ fmovm.x &0x01,-(%sp) # save Z on stack
+ lea (%sp),%a0 # pass ptr to Z
+ bsr slognp1 # LOG1P(Z)
+ add.l &0xc,%sp # clear Z from stack
+
+ mov.l (%sp)+,%d0 # fetch old prec,mode
+ fmov.l %d0,%fpcr # load it
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.s (%sp)+,%fp0
+ bra t_catch
+
+ATANHBIG:
+ fabs.x (%a0),%fp0 # |X|
+ fcmp.s %fp0,&0x3F800000
+ fbgt t_operr
+ bra t_dz
+
+ global satanhd
+#--ATANH(X) = X FOR DENORMALIZED X
+satanhd:
+ bra t_extdnrm
+
+#########################################################################
+# slog10(): computes the base-10 logarithm of a normalized input #
+# slog10d(): computes the base-10 logarithm of a denormalized input #
+# slog2(): computes the base-2 logarithm of a normalized input #
+# slog2d(): computes the base-2 logarithm of a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = log_10(X) or log_2(X) #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 1.7 ulps in 64 significant bit, #
+# i.e. within 0.5003 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# slog10d: #
+# #
+# Step 0. If X < 0, create a NaN and raise the invalid operation #
+# flag. Otherwise, save FPCR in D1; set FpCR to default. #
+# Notes: Default means round-to-nearest mode, no floating-point #
+# traps, and precision control = double extended. #
+# #
+# Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
+# Notes: Even if X is denormalized, log(X) is always normalized. #
+# #
+# Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
+# 2.1 Restore the user FPCR #
+# 2.2 Return ans := Y * INV_L10. #
+# #
+# slog10: #
+# #
+# Step 0. If X < 0, create a NaN and raise the invalid operation #
+# flag. Otherwise, save FPCR in D1; set FpCR to default. #
+# Notes: Default means round-to-nearest mode, no floating-point #
+# traps, and precision control = double extended. #
+# #
+# Step 1. Call sLogN to obtain Y = log(X), the natural log of X. #
+# #
+# Step 2. Compute log_10(X) = log(X) * (1/log(10)). #
+# 2.1 Restore the user FPCR #
+# 2.2 Return ans := Y * INV_L10. #
+# #
+# sLog2d: #
+# #
+# Step 0. If X < 0, create a NaN and raise the invalid operation #
+# flag. Otherwise, save FPCR in D1; set FpCR to default. #
+# Notes: Default means round-to-nearest mode, no floating-point #
+# traps, and precision control = double extended. #
+# #
+# Step 1. Call slognd to obtain Y = log(X), the natural log of X. #
+# Notes: Even if X is denormalized, log(X) is always normalized. #
+# #
+# Step 2. Compute log_10(X) = log(X) * (1/log(2)). #
+# 2.1 Restore the user FPCR #
+# 2.2 Return ans := Y * INV_L2. #
+# #
+# sLog2: #
+# #
+# Step 0. If X < 0, create a NaN and raise the invalid operation #
+# flag. Otherwise, save FPCR in D1; set FpCR to default. #
+# Notes: Default means round-to-nearest mode, no floating-point #
+# traps, and precision control = double extended. #
+# #
+# Step 1. If X is not an integer power of two, i.e., X != 2^k, #
+# go to Step 3. #
+# #
+# Step 2. Return k. #
+# 2.1 Get integer k, X = 2^k. #
+# 2.2 Restore the user FPCR. #
+# 2.3 Return ans := convert-to-double-extended(k). #
+# #
+# Step 3. Call sLogN to obtain Y = log(X), the natural log of X. #
+# #
+# Step 4. Compute log_2(X) = log(X) * (1/log(2)). #
+# 4.1 Restore the user FPCR #
+# 4.2 Return ans := Y * INV_L2. #
+# #
+#########################################################################
+
+INV_L10:
+ long 0x3FFD0000,0xDE5BD8A9,0x37287195,0x00000000
+
+INV_L2:
+ long 0x3FFF0000,0xB8AA3B29,0x5C17F0BC,0x00000000
+
+ global slog10
+#--entry point for Log10(X), X is normalized
+slog10:
+ fmov.b &0x1,%fp0
+ fcmp.x %fp0,(%a0) # if operand == 1,
+ fbeq.l ld_pzero # return an EXACT zero
+
+ mov.l (%a0),%d1
+ blt.w invalid
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ bsr slogn # log(X), X normal.
+ fmov.l (%sp)+,%fpcr
+ fmul.x INV_L10(%pc),%fp0
+ bra t_inx2
+
+ global slog10d
+#--entry point for Log10(X), X is denormalized
+slog10d:
+ mov.l (%a0),%d1
+ blt.w invalid
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ bsr slognd # log(X), X denorm.
+ fmov.l (%sp)+,%fpcr
+ fmul.x INV_L10(%pc),%fp0
+ bra t_minx2
+
+ global slog2
+#--entry point for Log2(X), X is normalized
+slog2:
+ mov.l (%a0),%d1
+ blt.w invalid
+
+ mov.l 8(%a0),%d1
+ bne.b continue # X is not 2^k
+
+ mov.l 4(%a0),%d1
+ and.l &0x7FFFFFFF,%d1
+ bne.b continue
+
+#--X = 2^k.
+ mov.w (%a0),%d1
+ and.l &0x00007FFF,%d1
+ sub.l &0x3FFF,%d1
+ beq.l ld_pzero
+ fmov.l %d0,%fpcr
+ fmov.l %d1,%fp0
+ bra t_inx2
+
+continue:
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ bsr slogn # log(X), X normal.
+ fmov.l (%sp)+,%fpcr
+ fmul.x INV_L2(%pc),%fp0
+ bra t_inx2
+
+invalid:
+ bra t_operr
+
+ global slog2d
+#--entry point for Log2(X), X is denormalized
+slog2d:
+ mov.l (%a0),%d1
+ blt.w invalid
+ mov.l %d0,-(%sp)
+ clr.l %d0
+ bsr slognd # log(X), X denorm.
+ fmov.l (%sp)+,%fpcr
+ fmul.x INV_L2(%pc),%fp0
+ bra t_minx2
+
+#########################################################################
+# stwotox(): computes 2**X for a normalized input #
+# stwotoxd(): computes 2**X for a denormalized input #
+# stentox(): computes 10**X for a normalized input #
+# stentoxd(): computes 10**X for a denormalized input #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input #
+# d0 = round precision,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = 2**X or 10**X #
+# #
+# ACCURACY and MONOTONICITY ******************************************* #
+# The returned result is within 2 ulps in 64 significant bit, #
+# i.e. within 0.5001 ulp to 53 bits if the result is subsequently #
+# rounded to double precision. The result is provably monotonic #
+# in double precision. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# twotox #
+# 1. If |X| > 16480, go to ExpBig. #
+# #
+# 2. If |X| < 2**(-70), go to ExpSm. #
+# #
+# 3. Decompose X as X = N/64 + r where |r| <= 1/128. Furthermore #
+# decompose N as #
+# N = 64(M + M') + j, j = 0,1,2,...,63. #
+# #
+# 4. Overwrite r := r * log2. Then #
+# 2**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
+# Go to expr to compute that expression. #
+# #
+# tentox #
+# 1. If |X| > 16480*log_10(2) (base 10 log of 2), go to ExpBig. #
+# #
+# 2. If |X| < 2**(-70), go to ExpSm. #
+# #
+# 3. Set y := X*log_2(10)*64 (base 2 log of 10). Set #
+# N := round-to-int(y). Decompose N as #
+# N = 64(M + M') + j, j = 0,1,2,...,63. #
+# #
+# 4. Define r as #
+# r := ((X - N*L1)-N*L2) * L10 #
+# where L1, L2 are the leading and trailing parts of #
+# log_10(2)/64 and L10 is the natural log of 10. Then #
+# 10**X = 2**(M') * 2**(M) * 2**(j/64) * exp(r). #
+# Go to expr to compute that expression. #
+# #
+# expr #
+# 1. Fetch 2**(j/64) from table as Fact1 and Fact2. #
+# #
+# 2. Overwrite Fact1 and Fact2 by #
+# Fact1 := 2**(M) * Fact1 #
+# Fact2 := 2**(M) * Fact2 #
+# Thus Fact1 + Fact2 = 2**(M) * 2**(j/64). #
+# #
+# 3. Calculate P where 1 + P approximates exp(r): #
+# P = r + r*r*(A1+r*(A2+...+r*A5)). #
+# #
+# 4. Let AdjFact := 2**(M'). Return #
+# AdjFact * ( Fact1 + ((Fact1*P) + Fact2) ). #
+# Exit. #
+# #
+# ExpBig #
+# 1. Generate overflow by Huge * Huge if X > 0; otherwise, #
+# generate underflow by Tiny * Tiny. #
+# #
+# ExpSm #
+# 1. Return 1 + X. #
+# #
+#########################################################################
+
+L2TEN64:
+ long 0x406A934F,0x0979A371 # 64LOG10/LOG2
+L10TWO1:
+ long 0x3F734413,0x509F8000 # LOG2/64LOG10
+
+L10TWO2:
+ long 0xBFCD0000,0xC0219DC1,0xDA994FD2,0x00000000
+
+LOG10: long 0x40000000,0x935D8DDD,0xAAA8AC17,0x00000000
+
+LOG2: long 0x3FFE0000,0xB17217F7,0xD1CF79AC,0x00000000
+
+EXPA5: long 0x3F56C16D,0x6F7BD0B2
+EXPA4: long 0x3F811112,0x302C712C
+EXPA3: long 0x3FA55555,0x55554CC1
+EXPA2: long 0x3FC55555,0x55554A54
+EXPA1: long 0x3FE00000,0x00000000,0x00000000,0x00000000
+
+TEXPTBL:
+ long 0x3FFF0000,0x80000000,0x00000000,0x3F738000
+ long 0x3FFF0000,0x8164D1F3,0xBC030773,0x3FBEF7CA
+ long 0x3FFF0000,0x82CD8698,0xAC2BA1D7,0x3FBDF8A9
+ long 0x3FFF0000,0x843A28C3,0xACDE4046,0x3FBCD7C9
+ long 0x3FFF0000,0x85AAC367,0xCC487B15,0xBFBDE8DA
+ long 0x3FFF0000,0x871F6196,0x9E8D1010,0x3FBDE85C
+ long 0x3FFF0000,0x88980E80,0x92DA8527,0x3FBEBBF1
+ long 0x3FFF0000,0x8A14D575,0x496EFD9A,0x3FBB80CA
+ long 0x3FFF0000,0x8B95C1E3,0xEA8BD6E7,0xBFBA8373
+ long 0x3FFF0000,0x8D1ADF5B,0x7E5BA9E6,0xBFBE9670
+ long 0x3FFF0000,0x8EA4398B,0x45CD53C0,0x3FBDB700
+ long 0x3FFF0000,0x9031DC43,0x1466B1DC,0x3FBEEEB0
+ long 0x3FFF0000,0x91C3D373,0xAB11C336,0x3FBBFD6D
+ long 0x3FFF0000,0x935A2B2F,0x13E6E92C,0xBFBDB319
+ long 0x3FFF0000,0x94F4EFA8,0xFEF70961,0x3FBDBA2B
+ long 0x3FFF0000,0x96942D37,0x20185A00,0x3FBE91D5
+ long 0x3FFF0000,0x9837F051,0x8DB8A96F,0x3FBE8D5A
+ long 0x3FFF0000,0x99E04593,0x20B7FA65,0xBFBCDE7B
+ long 0x3FFF0000,0x9B8D39B9,0xD54E5539,0xBFBEBAAF
+ long 0x3FFF0000,0x9D3ED9A7,0x2CFFB751,0xBFBD86DA
+ long 0x3FFF0000,0x9EF53260,0x91A111AE,0xBFBEBEDD
+ long 0x3FFF0000,0xA0B0510F,0xB9714FC2,0x3FBCC96E
+ long 0x3FFF0000,0xA2704303,0x0C496819,0xBFBEC90B
+ long 0x3FFF0000,0xA43515AE,0x09E6809E,0x3FBBD1DB
+ long 0x3FFF0000,0xA5FED6A9,0xB15138EA,0x3FBCE5EB
+ long 0x3FFF0000,0xA7CD93B4,0xE965356A,0xBFBEC274
+ long 0x3FFF0000,0xA9A15AB4,0xEA7C0EF8,0x3FBEA83C
+ long 0x3FFF0000,0xAB7A39B5,0xA93ED337,0x3FBECB00
+ long 0x3FFF0000,0xAD583EEA,0x42A14AC6,0x3FBE9301
+ long 0x3FFF0000,0xAF3B78AD,0x690A4375,0xBFBD8367
+ long 0x3FFF0000,0xB123F581,0xD2AC2590,0xBFBEF05F
+ long 0x3FFF0000,0xB311C412,0xA9112489,0x3FBDFB3C
+ long 0x3FFF0000,0xB504F333,0xF9DE6484,0x3FBEB2FB
+ long 0x3FFF0000,0xB6FD91E3,0x28D17791,0x3FBAE2CB
+ long 0x3FFF0000,0xB8FBAF47,0x62FB9EE9,0x3FBCDC3C
+ long 0x3FFF0000,0xBAFF5AB2,0x133E45FB,0x3FBEE9AA
+ long 0x3FFF0000,0xBD08A39F,0x580C36BF,0xBFBEAEFD
+ long 0x3FFF0000,0xBF1799B6,0x7A731083,0xBFBCBF51
+ long 0x3FFF0000,0xC12C4CCA,0x66709456,0x3FBEF88A
+ long 0x3FFF0000,0xC346CCDA,0x24976407,0x3FBD83B2
+ long 0x3FFF0000,0xC5672A11,0x5506DADD,0x3FBDF8AB
+ long 0x3FFF0000,0xC78D74C8,0xABB9B15D,0xBFBDFB17
+ long 0x3FFF0000,0xC9B9BD86,0x6E2F27A3,0xBFBEFE3C
+ long 0x3FFF0000,0xCBEC14FE,0xF2727C5D,0xBFBBB6F8
+ long 0x3FFF0000,0xCE248C15,0x1F8480E4,0xBFBCEE53
+ long 0x3FFF0000,0xD06333DA,0xEF2B2595,0xBFBDA4AE
+ long 0x3FFF0000,0xD2A81D91,0xF12AE45A,0x3FBC9124
+ long 0x3FFF0000,0xD4F35AAB,0xCFEDFA1F,0x3FBEB243
+ long 0x3FFF0000,0xD744FCCA,0xD69D6AF4,0x3FBDE69A
+ long 0x3FFF0000,0xD99D15C2,0x78AFD7B6,0xBFB8BC61
+ long 0x3FFF0000,0xDBFBB797,0xDAF23755,0x3FBDF610
+ long 0x3FFF0000,0xDE60F482,0x5E0E9124,0xBFBD8BE1
+ long 0x3FFF0000,0xE0CCDEEC,0x2A94E111,0x3FBACB12
+ long 0x3FFF0000,0xE33F8972,0xBE8A5A51,0x3FBB9BFE
+ long 0x3FFF0000,0xE5B906E7,0x7C8348A8,0x3FBCF2F4
+ long 0x3FFF0000,0xE8396A50,0x3C4BDC68,0x3FBEF22F
+ long 0x3FFF0000,0xEAC0C6E7,0xDD24392F,0xBFBDBF4A
+ long 0x3FFF0000,0xED4F301E,0xD9942B84,0x3FBEC01A
+ long 0x3FFF0000,0xEFE4B99B,0xDCDAF5CB,0x3FBE8CAC
+ long 0x3FFF0000,0xF281773C,0x59FFB13A,0xBFBCBB3F
+ long 0x3FFF0000,0xF5257D15,0x2486CC2C,0x3FBEF73A
+ long 0x3FFF0000,0xF7D0DF73,0x0AD13BB9,0xBFB8B795
+ long 0x3FFF0000,0xFA83B2DB,0x722A033A,0x3FBEF84B
+ long 0x3FFF0000,0xFD3E0C0C,0xF486C175,0xBFBEF581
+
+ set INT,L_SCR1
+
+ set X,FP_SCR0
+ set XDCARE,X+2
+ set XFRAC,X+4
+
+ set ADJFACT,FP_SCR0
+
+ set FACT1,FP_SCR0
+ set FACT1HI,FACT1+4
+ set FACT1LOW,FACT1+8
+
+ set FACT2,FP_SCR1
+ set FACT2HI,FACT2+4
+ set FACT2LOW,FACT2+8
+
+ global stwotox
+#--ENTRY POINT FOR 2**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stwotox:
+ fmovm.x (%a0),&0x80 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ fmov.x %fp0,X(%a6)
+ and.l &0x7FFFFFFF,%d1
+
+ cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
+ bge.b TWOOK1
+ bra.w EXPBORS
+
+TWOOK1:
+ cmp.l %d1,&0x400D80C0 # |X| > 16480?
+ ble.b TWOMAIN
+ bra.w EXPBORS
+
+TWOMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480
+
+ fmov.x %fp0,%fp1
+ fmul.s &0x42800000,%fp1 # 64 * X
+ fmov.l %fp1,INT(%a6) # N = ROUND-TO-INT(64 X)
+ mov.l %d2,-(%sp)
+ lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
+ fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
+ mov.l INT(%a6),%d1
+ mov.l %d1,%d2
+ and.l &0x3F,%d1 # D0 IS J
+ asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
+ add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
+ asr.l &6,%d2 # d2 IS L, N = 64L + J
+ mov.l %d2,%d1
+ asr.l &1,%d1 # D0 IS M
+ sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
+ add.l &0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmul.s &0x3C800000,%fp1 # (1/64)*N
+ mov.l (%a1)+,FACT1(%a6)
+ mov.l (%a1)+,FACT1HI(%a6)
+ mov.l (%a1)+,FACT1LOW(%a6)
+ mov.w (%a1)+,FACT2(%a6)
+
+ fsub.x %fp1,%fp0 # X - (1/64)*INT(64 X)
+
+ mov.w (%a1)+,FACT2HI(%a6)
+ clr.w FACT2HI+2(%a6)
+ clr.l FACT2LOW(%a6)
+ add.w %d1,FACT1(%a6)
+ fmul.x LOG2(%pc),%fp0 # FP0 IS R
+ add.w %d1,FACT2(%a6)
+
+ bra.w expr
+
+EXPBORS:
+#--FPCR, D0 SAVED
+ cmp.l %d1,&0x3FFF8000
+ bgt.b TEXPBIG
+
+#--|X| IS SMALL, RETURN 1 + X
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ fadd.s &0x3F800000,%fp0 # RETURN 1 + X
+ bra t_pinx2
+
+TEXPBIG:
+#--|X| IS LARGE, GENERATE OVERFLOW IF X > 0; ELSE GENERATE UNDERFLOW
+#--REGISTERS SAVE SO FAR ARE FPCR AND D0
+ mov.l X(%a6),%d1
+ cmp.l %d1,&0
+ blt.b EXPNEG
+
+ bra t_ovfl2 # t_ovfl expects positive value
+
+EXPNEG:
+ bra t_unfl2 # t_unfl expects positive value
+
+ global stwotoxd
+stwotoxd:
+#--ENTRY POINT FOR 2**(X) FOR DENORMALIZED ARGUMENT
+
+ fmov.l %d0,%fpcr # set user's rounding mode/precision
+ fmov.s &0x3F800000,%fp0 # RETURN 1 + X
+ mov.l (%a0),%d1
+ or.l &0x00800001,%d1
+ fadd.s %d1,%fp0
+ bra t_pinx2
+
+ global stentox
+#--ENTRY POINT FOR 10**(X), HERE X IS FINITE, NON-ZERO, AND NOT NAN'S
+stentox:
+ fmovm.x (%a0),&0x80 # LOAD INPUT
+
+ mov.l (%a0),%d1
+ mov.w 4(%a0),%d1
+ fmov.x %fp0,X(%a6)
+ and.l &0x7FFFFFFF,%d1
+
+ cmp.l %d1,&0x3FB98000 # |X| >= 2**(-70)?
+ bge.b TENOK1
+ bra.w EXPBORS
+
+TENOK1:
+ cmp.l %d1,&0x400B9B07 # |X| <= 16480*log2/log10 ?
+ ble.b TENMAIN
+ bra.w EXPBORS
+
+TENMAIN:
+#--USUAL CASE, 2^(-70) <= |X| <= 16480 LOG 2 / LOG 10
+
+ fmov.x %fp0,%fp1
+ fmul.d L2TEN64(%pc),%fp1 # X*64*LOG10/LOG2
+ fmov.l %fp1,INT(%a6) # N=INT(X*64*LOG10/LOG2)
+ mov.l %d2,-(%sp)
+ lea TEXPTBL(%pc),%a1 # LOAD ADDRESS OF TABLE OF 2^(J/64)
+ fmov.l INT(%a6),%fp1 # N --> FLOATING FMT
+ mov.l INT(%a6),%d1
+ mov.l %d1,%d2
+ and.l &0x3F,%d1 # D0 IS J
+ asl.l &4,%d1 # DISPLACEMENT FOR 2^(J/64)
+ add.l %d1,%a1 # ADDRESS FOR 2^(J/64)
+ asr.l &6,%d2 # d2 IS L, N = 64L + J
+ mov.l %d2,%d1
+ asr.l &1,%d1 # D0 IS M
+ sub.l %d1,%d2 # d2 IS M', N = 64(M+M') + J
+ add.l &0x3FFF,%d2
+
+#--SUMMARY: a1 IS ADDRESS FOR THE LEADING PORTION OF 2^(J/64),
+#--D0 IS M WHERE N = 64(M+M') + J. NOTE THAT |M| <= 16140 BY DESIGN.
+#--ADJFACT = 2^(M').
+#--REGISTERS SAVED SO FAR ARE (IN ORDER) FPCR, D0, FP1, a1, AND FP2.
+ fmovm.x &0x0c,-(%sp) # save fp2/fp3
+
+ fmov.x %fp1,%fp2
+
+ fmul.d L10TWO1(%pc),%fp1 # N*(LOG2/64LOG10)_LEAD
+ mov.l (%a1)+,FACT1(%a6)
+
+ fmul.x L10TWO2(%pc),%fp2 # N*(LOG2/64LOG10)_TRAIL
+
+ mov.l (%a1)+,FACT1HI(%a6)
+ mov.l (%a1)+,FACT1LOW(%a6)
+ fsub.x %fp1,%fp0 # X - N L_LEAD
+ mov.w (%a1)+,FACT2(%a6)
+
+ fsub.x %fp2,%fp0 # X - N L_TRAIL
+
+ mov.w (%a1)+,FACT2HI(%a6)
+ clr.w FACT2HI+2(%a6)
+ clr.l FACT2LOW(%a6)
+
+ fmul.x LOG10(%pc),%fp0 # FP0 IS R
+ add.w %d1,FACT1(%a6)
+ add.w %d1,FACT2(%a6)
+
+expr:
+#--FPCR, FP2, FP3 ARE SAVED IN ORDER AS SHOWN.
+#--ADJFACT CONTAINS 2**(M'), FACT1 + FACT2 = 2**(M) * 2**(J/64).
+#--FP0 IS R. THE FOLLOWING CODE COMPUTES
+#-- 2**(M'+M) * 2**(J/64) * EXP(R)
+
+ fmov.x %fp0,%fp1
+ fmul.x %fp1,%fp1 # FP1 IS S = R*R
+
+ fmov.d EXPA5(%pc),%fp2 # FP2 IS A5
+ fmov.d EXPA4(%pc),%fp3 # FP3 IS A4
+
+ fmul.x %fp1,%fp2 # FP2 IS S*A5
+ fmul.x %fp1,%fp3 # FP3 IS S*A4
+
+ fadd.d EXPA3(%pc),%fp2 # FP2 IS A3+S*A5
+ fadd.d EXPA2(%pc),%fp3 # FP3 IS A2+S*A4
+
+ fmul.x %fp1,%fp2 # FP2 IS S*(A3+S*A5)
+ fmul.x %fp1,%fp3 # FP3 IS S*(A2+S*A4)
+
+ fadd.d EXPA1(%pc),%fp2 # FP2 IS A1+S*(A3+S*A5)
+ fmul.x %fp0,%fp3 # FP3 IS R*S*(A2+S*A4)
+
+ fmul.x %fp1,%fp2 # FP2 IS S*(A1+S*(A3+S*A5))
+ fadd.x %fp3,%fp0 # FP0 IS R+R*S*(A2+S*A4)
+ fadd.x %fp2,%fp0 # FP0 IS EXP(R) - 1
+
+ fmovm.x (%sp)+,&0x30 # restore fp2/fp3
+
+#--FINAL RECONSTRUCTION PROCESS
+#--EXP(X) = 2^M*2^(J/64) + 2^M*2^(J/64)*(EXP(R)-1) - (1 OR 0)
+
+ fmul.x FACT1(%a6),%fp0
+ fadd.x FACT2(%a6),%fp0
+ fadd.x FACT1(%a6),%fp0
+
+ fmov.l %d0,%fpcr # restore users round prec,mode
+ mov.w %d2,ADJFACT(%a6) # INSERT EXPONENT
+ mov.l (%sp)+,%d2
+ mov.l &0x80000000,ADJFACT+4(%a6)
+ clr.l ADJFACT+8(%a6)
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x ADJFACT(%a6),%fp0 # FINAL ADJUSTMENT
+ bra t_catch
+
+ global stentoxd
+stentoxd:
+#--ENTRY POINT FOR 10**(X) FOR DENORMALIZED ARGUMENT
+
+ fmov.l %d0,%fpcr # set user's rounding mode/precision
+ fmov.s &0x3F800000,%fp0 # RETURN 1 + X
+ mov.l (%a0),%d1
+ or.l &0x00800001,%d1
+ fadd.s %d1,%fp0
+ bra t_pinx2
+
+#########################################################################
+# smovcr(): returns the ROM constant at the offset specified in d1 #
+# rounded to the mode and precision specified in d0. #
+# #
+# INPUT *************************************************************** #
+# d0 = rnd prec,mode #
+# d1 = ROM offset #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = the ROM constant rounded to the user's rounding mode,prec #
+# #
+#########################################################################
+
+ global smovcr
+smovcr:
+ mov.l %d1,-(%sp) # save rom offset for a sec
+
+ lsr.b &0x4,%d0 # shift ctrl bits to lo
+ mov.l %d0,%d1 # make a copy
+ andi.w &0x3,%d1 # extract rnd mode
+ andi.w &0xc,%d0 # extract rnd prec
+ swap %d0 # put rnd prec in hi
+ mov.w %d1,%d0 # put rnd mode in lo
+
+ mov.l (%sp)+,%d1 # get rom offset
+
+#
+# check range of offset
+#
+ tst.b %d1 # if zero, offset is to pi
+ beq.b pi_tbl # it is pi
+ cmpi.b %d1,&0x0a # check range $01 - $0a
+ ble.b z_val # if in this range, return zero
+ cmpi.b %d1,&0x0e # check range $0b - $0e
+ ble.b sm_tbl # valid constants in this range
+ cmpi.b %d1,&0x2f # check range $10 - $2f
+ ble.b z_val # if in this range, return zero
+ cmpi.b %d1,&0x3f # check range $30 - $3f
+ ble.b bg_tbl # valid constants in this range
+
+z_val:
+ bra.l ld_pzero # return a zero
+
+#
+# the answer is PI rounded to the proper precision.
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+pi_tbl:
+ tst.b %d0 # is rmode RN?
+ bne.b pi_not_rn # no
+pi_rn:
+ lea.l PIRN(%pc),%a0 # yes; load PI RN table addr
+ bra.w set_finx
+pi_not_rn:
+ cmpi.b %d0,&rp_mode # is rmode RP?
+ beq.b pi_rp # yes
+pi_rzrm:
+ lea.l PIRZRM(%pc),%a0 # no; load PI RZ,RM table addr
+ bra.b set_finx
+pi_rp:
+ lea.l PIRP(%pc),%a0 # load PI RP table addr
+ bra.b set_finx
+
+#
+# the answer is one of:
+# $0B log10(2) (inexact)
+# $0C e (inexact)
+# $0D log2(e) (inexact)
+# $0E log10(e) (exact)
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+sm_tbl:
+ subi.b &0xb,%d1 # make offset in 0-4 range
+ tst.b %d0 # is rmode RN?
+ bne.b sm_not_rn # no
+sm_rn:
+ lea.l SMALRN(%pc),%a0 # yes; load RN table addr
+sm_tbl_cont:
+ cmpi.b %d1,&0x2 # is result log10(e)?
+ ble.b set_finx # no; answer is inexact
+ bra.b no_finx # yes; answer is exact
+sm_not_rn:
+ cmpi.b %d0,&rp_mode # is rmode RP?
+ beq.b sm_rp # yes
+sm_rzrm:
+ lea.l SMALRZRM(%pc),%a0 # no; load RZ,RM table addr
+ bra.b sm_tbl_cont
+sm_rp:
+ lea.l SMALRP(%pc),%a0 # load RP table addr
+ bra.b sm_tbl_cont
+
+#
+# the answer is one of:
+# $30 ln(2) (inexact)
+# $31 ln(10) (inexact)
+# $32 10^0 (exact)
+# $33 10^1 (exact)
+# $34 10^2 (exact)
+# $35 10^4 (exact)
+# $36 10^8 (exact)
+# $37 10^16 (exact)
+# $38 10^32 (inexact)
+# $39 10^64 (inexact)
+# $3A 10^128 (inexact)
+# $3B 10^256 (inexact)
+# $3C 10^512 (inexact)
+# $3D 10^1024 (inexact)
+# $3E 10^2048 (inexact)
+# $3F 10^4096 (inexact)
+#
+# fetch a pointer to the answer table relating to the proper rounding
+# precision.
+#
+bg_tbl:
+ subi.b &0x30,%d1 # make offset in 0-f range
+ tst.b %d0 # is rmode RN?
+ bne.b bg_not_rn # no
+bg_rn:
+ lea.l BIGRN(%pc),%a0 # yes; load RN table addr
+bg_tbl_cont:
+ cmpi.b %d1,&0x1 # is offset <= $31?
+ ble.b set_finx # yes; answer is inexact
+ cmpi.b %d1,&0x7 # is $32 <= offset <= $37?
+ ble.b no_finx # yes; answer is exact
+ bra.b set_finx # no; answer is inexact
+bg_not_rn:
+ cmpi.b %d0,&rp_mode # is rmode RP?
+ beq.b bg_rp # yes
+bg_rzrm:
+ lea.l BIGRZRM(%pc),%a0 # no; load RZ,RM table addr
+ bra.b bg_tbl_cont
+bg_rp:
+ lea.l BIGRP(%pc),%a0 # load RP table addr
+ bra.b bg_tbl_cont
+
+# answer is inexact, so set INEX2 and AINEX in the user's FPSR.
+set_finx:
+ ori.l &inx2a_mask,USER_FPSR(%a6) # set INEX2/AINEX
+no_finx:
+ mulu.w &0xc,%d1 # offset points into tables
+ swap %d0 # put rnd prec in lo word
+ tst.b %d0 # is precision extended?
+
+ bne.b not_ext # if xprec, do not call round
+
+# Precision is extended
+ fmovm.x (%a0,%d1.w),&0x80 # return result in fp0
+ rts
+
+# Precision is single or double
+not_ext:
+ swap %d0 # rnd prec in upper word
+
+# call round() to round the answer to the proper precision.
+# exponents out of range for single or double DO NOT cause underflow
+# or overflow.
+ mov.w 0x0(%a0,%d1.w),FP_SCR1_EX(%a6) # load first word
+ mov.l 0x4(%a0,%d1.w),FP_SCR1_HI(%a6) # load second word
+ mov.l 0x8(%a0,%d1.w),FP_SCR1_LO(%a6) # load third word
+ mov.l %d0,%d1
+ clr.l %d0 # clear g,r,s
+ lea FP_SCR1(%a6),%a0 # pass ptr to answer
+ clr.w LOCAL_SGN(%a0) # sign always positive
+ bsr.l _round # round the mantissa
+
+ fmovm.x (%a0),&0x80 # return rounded result in fp0
+ rts
+
+ align 0x4
+
+PIRN: long 0x40000000,0xc90fdaa2,0x2168c235 # pi
+PIRZRM: long 0x40000000,0xc90fdaa2,0x2168c234 # pi
+PIRP: long 0x40000000,0xc90fdaa2,0x2168c235 # pi
+
+SMALRN: long 0x3ffd0000,0x9a209a84,0xfbcff798 # log10(2)
+ long 0x40000000,0xadf85458,0xa2bb4a9a # e
+ long 0x3fff0000,0xb8aa3b29,0x5c17f0bc # log2(e)
+ long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
+ long 0x00000000,0x00000000,0x00000000 # 0.0
+
+SMALRZRM:
+ long 0x3ffd0000,0x9a209a84,0xfbcff798 # log10(2)
+ long 0x40000000,0xadf85458,0xa2bb4a9a # e
+ long 0x3fff0000,0xb8aa3b29,0x5c17f0bb # log2(e)
+ long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
+ long 0x00000000,0x00000000,0x00000000 # 0.0
+
+SMALRP: long 0x3ffd0000,0x9a209a84,0xfbcff799 # log10(2)
+ long 0x40000000,0xadf85458,0xa2bb4a9b # e
+ long 0x3fff0000,0xb8aa3b29,0x5c17f0bc # log2(e)
+ long 0x3ffd0000,0xde5bd8a9,0x37287195 # log10(e)
+ long 0x00000000,0x00000000,0x00000000 # 0.0
+
+BIGRN: long 0x3ffe0000,0xb17217f7,0xd1cf79ac # ln(2)
+ long 0x40000000,0x935d8ddd,0xaaa8ac17 # ln(10)
+
+ long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
+
+BIGRZRM:
+ long 0x3ffe0000,0xb17217f7,0xd1cf79ab # ln(2)
+ long 0x40000000,0x935d8ddd,0xaaa8ac16 # ln(10)
+
+ long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
+
+BIGRP:
+ long 0x3ffe0000,0xb17217f7,0xd1cf79ac # ln(2)
+ long 0x40000000,0x935d8ddd,0xaaa8ac17 # ln(10)
+
+ long 0x3fff0000,0x80000000,0x00000000 # 10 ^ 0
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
+
+#########################################################################
+# sscale(): computes the destination operand scaled by the source #
+# operand. If the absoulute value of the source operand is #
+# >= 2^14, an overflow or underflow is returned. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to double-extended source operand X #
+# a1 = pointer to double-extended destination operand Y #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = scale(X,Y) #
+# #
+#########################################################################
+
+set SIGN, L_SCR1
+
+ global sscale
+sscale:
+ mov.l %d0,-(%sp) # store off ctrl bits for now
+
+ mov.w DST_EX(%a1),%d1 # get dst exponent
+ smi.b SIGN(%a6) # use SIGN to hold dst sign
+ andi.l &0x00007fff,%d1 # strip sign from dst exp
+
+ mov.w SRC_EX(%a0),%d0 # check src bounds
+ andi.w &0x7fff,%d0 # clr src sign bit
+ cmpi.w %d0,&0x3fff # is src ~ ZERO?
+ blt.w src_small # yes
+ cmpi.w %d0,&0x400c # no; is src too big?
+ bgt.w src_out # yes
+
+#
+# Source is within 2^14 range.
+#
+src_ok:
+ fintrz.x SRC(%a0),%fp0 # calc int of src
+ fmov.l %fp0,%d0 # int src to d0
+# don't want any accrued bits from the fintrz showing up later since
+# we may need to read the fpsr for the last fp op in t_catch2().
+ fmov.l &0x0,%fpsr
+
+ tst.b DST_HI(%a1) # is dst denormalized?
+ bmi.b sok_norm
+
+# the dst is a DENORM. normalize the DENORM and add the adjustment to
+# the src value. then, jump to the norm part of the routine.
+sok_dnrm:
+ mov.l %d0,-(%sp) # save src for now
+
+ mov.w DST_EX(%a1),FP_SCR0_EX(%a6) # make a copy
+ mov.l DST_HI(%a1),FP_SCR0_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
+
+ lea FP_SCR0(%a6),%a0 # pass ptr to DENORM
+ bsr.l norm # normalize the DENORM
+ neg.l %d0
+ add.l (%sp)+,%d0 # add adjustment to src
+
+ fmovm.x FP_SCR0(%a6),&0x80 # load normalized DENORM
+
+ cmpi.w %d0,&-0x3fff # is the shft amt really low?
+ bge.b sok_norm2 # thank goodness no
+
+# the multiply factor that we're trying to create should be a denorm
+# for the multiply to work. therefore, we're going to actually do a
+# multiply with a denorm which will cause an unimplemented data type
+# exception to be put into the machine which will be caught and corrected
+# later. we don't do this with the DENORMs above because this method
+# is slower. but, don't fret, I don't see it being used much either.
+ fmov.l (%sp)+,%fpcr # restore user fpcr
+ mov.l &0x80000000,%d1 # load normalized mantissa
+ subi.l &-0x3fff,%d0 # how many should we shift?
+ neg.l %d0 # make it positive
+ cmpi.b %d0,&0x20 # is it > 32?
+ bge.b sok_dnrm_32 # yes
+ lsr.l %d0,%d1 # no; bit stays in upper lw
+ clr.l -(%sp) # insert zero low mantissa
+ mov.l %d1,-(%sp) # insert new high mantissa
+ clr.l -(%sp) # make zero exponent
+ bra.b sok_norm_cont
+sok_dnrm_32:
+ subi.b &0x20,%d0 # get shift count
+ lsr.l %d0,%d1 # make low mantissa longword
+ mov.l %d1,-(%sp) # insert new low mantissa
+ clr.l -(%sp) # insert zero high mantissa
+ clr.l -(%sp) # make zero exponent
+ bra.b sok_norm_cont
+
+# the src will force the dst to a DENORM value or worse. so, let's
+# create an fp multiply that will create the result.
+sok_norm:
+ fmovm.x DST(%a1),&0x80 # load fp0 with normalized src
+sok_norm2:
+ fmov.l (%sp)+,%fpcr # restore user fpcr
+
+ addi.w &0x3fff,%d0 # turn src amt into exp value
+ swap %d0 # put exponent in high word
+ clr.l -(%sp) # insert new exponent
+ mov.l &0x80000000,-(%sp) # insert new high mantissa
+ mov.l %d0,-(%sp) # insert new lo mantissa
+
+sok_norm_cont:
+ fmov.l %fpcr,%d0 # d0 needs fpcr for t_catch2
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x (%sp)+,%fp0 # do the multiply
+ bra t_catch2 # catch any exceptions
+
+#
+# Source is outside of 2^14 range. Test the sign and branch
+# to the appropriate exception handler.
+#
+src_out:
+ mov.l (%sp)+,%d0 # restore ctrl bits
+ exg %a0,%a1 # swap src,dst ptrs
+ tst.b SRC_EX(%a1) # is src negative?
+ bmi t_unfl # yes; underflow
+ bra t_ovfl_sc # no; overflow
+
+#
+# The source input is below 1, so we check for denormalized numbers
+# and set unfl.
+#
+src_small:
+ tst.b DST_HI(%a1) # is dst denormalized?
+ bpl.b ssmall_done # yes
+
+ mov.l (%sp)+,%d0
+ fmov.l %d0,%fpcr # no; load control bits
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x DST(%a1),%fp0 # simply return dest
+ bra t_catch2
+ssmall_done:
+ mov.l (%sp)+,%d0 # load control bits into d1
+ mov.l %a1,%a0 # pass ptr to dst
+ bra t_resdnrm
+
+#########################################################################
+# smod(): computes the fp MOD of the input values X,Y. #
+# srem(): computes the fp (IEEE) REM of the input values X,Y. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision input X #
+# a1 = pointer to extended precision input Y #
+# d0 = round precision,mode #
+# #
+# The input operands X and Y can be either normalized or #
+# denormalized. #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = FREM(X,Y) or FMOD(X,Y) #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# Step 1. Save and strip signs of X and Y: signX := sign(X), #
+# signY := sign(Y), X := |X|, Y := |Y|, #
+# signQ := signX EOR signY. Record whether MOD or REM #
+# is requested. #
+# #
+# Step 2. Set L := expo(X)-expo(Y), k := 0, Q := 0. #
+# If (L < 0) then #
+# R := X, go to Step 4. #
+# else #
+# R := 2^(-L)X, j := L. #
+# endif #
+# #
+# Step 3. Perform MOD(X,Y) #
+# 3.1 If R = Y, go to Step 9. #
+# 3.2 If R > Y, then { R := R - Y, Q := Q + 1} #
+# 3.3 If j = 0, go to Step 4. #
+# 3.4 k := k + 1, j := j - 1, Q := 2Q, R := 2R. Go to #
+# Step 3.1. #
+# #
+# Step 4. At this point, R = X - QY = MOD(X,Y). Set #
+# Last_Subtract := false (used in Step 7 below). If #
+# MOD is requested, go to Step 6. #
+# #
+# Step 5. R = MOD(X,Y), but REM(X,Y) is requested. #
+# 5.1 If R < Y/2, then R = MOD(X,Y) = REM(X,Y). Go to #
+# Step 6. #
+# 5.2 If R > Y/2, then { set Last_Subtract := true, #
+# Q := Q + 1, Y := signY*Y }. Go to Step 6. #
+# 5.3 This is the tricky case of R = Y/2. If Q is odd, #
+# then { Q := Q + 1, signX := -signX }. #
+# #
+# Step 6. R := signX*R. #
+# #
+# Step 7. If Last_Subtract = true, R := R - Y. #
+# #
+# Step 8. Return signQ, last 7 bits of Q, and R as required. #
+# #
+# Step 9. At this point, R = 2^(-j)*X - Q Y = Y. Thus, #
+# X = 2^(j)*(Q+1)Y. set Q := 2^(j)*(Q+1), #
+# R := 0. Return signQ, last 7 bits of Q, and R. #
+# #
+#########################################################################
+
+ set Mod_Flag,L_SCR3
+ set Sc_Flag,L_SCR3+1
+
+ set SignY,L_SCR2
+ set SignX,L_SCR2+2
+ set SignQ,L_SCR3+2
+
+ set Y,FP_SCR0
+ set Y_Hi,Y+4
+ set Y_Lo,Y+8
+
+ set R,FP_SCR1
+ set R_Hi,R+4
+ set R_Lo,R+8
+
+Scale:
+ long 0x00010000,0x80000000,0x00000000,0x00000000
+
+ global smod
+smod:
+ clr.b FPSR_QBYTE(%a6)
+ mov.l %d0,-(%sp) # save ctrl bits
+ clr.b Mod_Flag(%a6)
+ bra.b Mod_Rem
+
+ global srem
+srem:
+ clr.b FPSR_QBYTE(%a6)
+ mov.l %d0,-(%sp) # save ctrl bits
+ mov.b &0x1,Mod_Flag(%a6)
+
+Mod_Rem:
+#..Save sign of X and Y
+ movm.l &0x3f00,-(%sp) # save data registers
+ mov.w SRC_EX(%a0),%d3
+ mov.w %d3,SignY(%a6)
+ and.l &0x00007FFF,%d3 # Y := |Y|
+
+#
+ mov.l SRC_HI(%a0),%d4
+ mov.l SRC_LO(%a0),%d5 # (D3,D4,D5) is |Y|
+
+ tst.l %d3
+ bne.b Y_Normal
+
+ mov.l &0x00003FFE,%d3 # $3FFD + 1
+ tst.l %d4
+ bne.b HiY_not0
+
+HiY_0:
+ mov.l %d5,%d4
+ clr.l %d5
+ sub.l &32,%d3
+ clr.l %d6
+ bfffo %d4{&0:&32},%d6
+ lsl.l %d6,%d4
+ sub.l %d6,%d3 # (D3,D4,D5) is normalized
+# ...with bias $7FFD
+ bra.b Chk_X
+
+HiY_not0:
+ clr.l %d6
+ bfffo %d4{&0:&32},%d6
+ sub.l %d6,%d3
+ lsl.l %d6,%d4
+ mov.l %d5,%d7 # a copy of D5
+ lsl.l %d6,%d5
+ neg.l %d6
+ add.l &32,%d6
+ lsr.l %d6,%d7
+ or.l %d7,%d4 # (D3,D4,D5) normalized
+# ...with bias $7FFD
+ bra.b Chk_X
+
+Y_Normal:
+ add.l &0x00003FFE,%d3 # (D3,D4,D5) normalized
+# ...with bias $7FFD
+
+Chk_X:
+ mov.w DST_EX(%a1),%d0
+ mov.w %d0,SignX(%a6)
+ mov.w SignY(%a6),%d1
+ eor.l %d0,%d1
+ and.l &0x00008000,%d1
+ mov.w %d1,SignQ(%a6) # sign(Q) obtained
+ and.l &0x00007FFF,%d0
+ mov.l DST_HI(%a1),%d1
+ mov.l DST_LO(%a1),%d2 # (D0,D1,D2) is |X|
+ tst.l %d0
+ bne.b X_Normal
+ mov.l &0x00003FFE,%d0
+ tst.l %d1
+ bne.b HiX_not0
+
+HiX_0:
+ mov.l %d2,%d1
+ clr.l %d2
+ sub.l &32,%d0
+ clr.l %d6
+ bfffo %d1{&0:&32},%d6
+ lsl.l %d6,%d1
+ sub.l %d6,%d0 # (D0,D1,D2) is normalized
+# ...with bias $7FFD
+ bra.b Init
+
+HiX_not0:
+ clr.l %d6
+ bfffo %d1{&0:&32},%d6
+ sub.l %d6,%d0
+ lsl.l %d6,%d1
+ mov.l %d2,%d7 # a copy of D2
+ lsl.l %d6,%d2
+ neg.l %d6
+ add.l &32,%d6
+ lsr.l %d6,%d7
+ or.l %d7,%d1 # (D0,D1,D2) normalized
+# ...with bias $7FFD
+ bra.b Init
+
+X_Normal:
+ add.l &0x00003FFE,%d0 # (D0,D1,D2) normalized
+# ...with bias $7FFD
+
+Init:
+#
+ mov.l %d3,L_SCR1(%a6) # save biased exp(Y)
+ mov.l %d0,-(%sp) # save biased exp(X)
+ sub.l %d3,%d0 # L := expo(X)-expo(Y)
+
+ clr.l %d6 # D6 := carry <- 0
+ clr.l %d3 # D3 is Q
+ mov.l &0,%a1 # A1 is k; j+k=L, Q=0
+
+#..(Carry,D1,D2) is R
+ tst.l %d0
+ bge.b Mod_Loop_pre
+
+#..expo(X) < expo(Y). Thus X = mod(X,Y)
+#
+ mov.l (%sp)+,%d0 # restore d0
+ bra.w Get_Mod
+
+Mod_Loop_pre:
+ addq.l &0x4,%sp # erase exp(X)
+#..At this point R = 2^(-L)X; Q = 0; k = 0; and k+j = L
+Mod_Loop:
+ tst.l %d6 # test carry bit
+ bgt.b R_GT_Y
+
+#..At this point carry = 0, R = (D1,D2), Y = (D4,D5)
+ cmp.l %d1,%d4 # compare hi(R) and hi(Y)
+ bne.b R_NE_Y
+ cmp.l %d2,%d5 # compare lo(R) and lo(Y)
+ bne.b R_NE_Y
+
+#..At this point, R = Y
+ bra.w Rem_is_0
+
+R_NE_Y:
+#..use the borrow of the previous compare
+ bcs.b R_LT_Y # borrow is set iff R < Y
+
+R_GT_Y:
+#..If Carry is set, then Y < (Carry,D1,D2) < 2Y. Otherwise, Carry = 0
+#..and Y < (D1,D2) < 2Y. Either way, perform R - Y
+ sub.l %d5,%d2 # lo(R) - lo(Y)
+ subx.l %d4,%d1 # hi(R) - hi(Y)
+ clr.l %d6 # clear carry
+ addq.l &1,%d3 # Q := Q + 1
+
+R_LT_Y:
+#..At this point, Carry=0, R < Y. R = 2^(k-L)X - QY; k+j = L; j >= 0.
+ tst.l %d0 # see if j = 0.
+ beq.b PostLoop
+
+ add.l %d3,%d3 # Q := 2Q
+ add.l %d2,%d2 # lo(R) = 2lo(R)
+ roxl.l &1,%d1 # hi(R) = 2hi(R) + carry
+ scs %d6 # set Carry if 2(R) overflows
+ addq.l &1,%a1 # k := k+1
+ subq.l &1,%d0 # j := j - 1
+#..At this point, R=(Carry,D1,D2) = 2^(k-L)X - QY, j+k=L, j >= 0, R < 2Y.
+
+ bra.b Mod_Loop
+
+PostLoop:
+#..k = L, j = 0, Carry = 0, R = (D1,D2) = X - QY, R < Y.
+
+#..normalize R.
+ mov.l L_SCR1(%a6),%d0 # new biased expo of R
+ tst.l %d1
+ bne.b HiR_not0
+
+HiR_0:
+ mov.l %d2,%d1
+ clr.l %d2
+ sub.l &32,%d0
+ clr.l %d6
+ bfffo %d1{&0:&32},%d6
+ lsl.l %d6,%d1
+ sub.l %d6,%d0 # (D0,D1,D2) is normalized
+# ...with bias $7FFD
+ bra.b Get_Mod
+
+HiR_not0:
+ clr.l %d6
+ bfffo %d1{&0:&32},%d6
+ bmi.b Get_Mod # already normalized
+ sub.l %d6,%d0
+ lsl.l %d6,%d1
+ mov.l %d2,%d7 # a copy of D2
+ lsl.l %d6,%d2
+ neg.l %d6
+ add.l &32,%d6
+ lsr.l %d6,%d7
+ or.l %d7,%d1 # (D0,D1,D2) normalized
+
+#
+Get_Mod:
+ cmp.l %d0,&0x000041FE
+ bge.b No_Scale
+Do_Scale:
+ mov.w %d0,R(%a6)
+ mov.l %d1,R_Hi(%a6)
+ mov.l %d2,R_Lo(%a6)
+ mov.l L_SCR1(%a6),%d6
+ mov.w %d6,Y(%a6)
+ mov.l %d4,Y_Hi(%a6)
+ mov.l %d5,Y_Lo(%a6)
+ fmov.x R(%a6),%fp0 # no exception
+ mov.b &1,Sc_Flag(%a6)
+ bra.b ModOrRem
+No_Scale:
+ mov.l %d1,R_Hi(%a6)
+ mov.l %d2,R_Lo(%a6)
+ sub.l &0x3FFE,%d0
+ mov.w %d0,R(%a6)
+ mov.l L_SCR1(%a6),%d6
+ sub.l &0x3FFE,%d6
+ mov.l %d6,L_SCR1(%a6)
+ fmov.x R(%a6),%fp0
+ mov.w %d6,Y(%a6)
+ mov.l %d4,Y_Hi(%a6)
+ mov.l %d5,Y_Lo(%a6)
+ clr.b Sc_Flag(%a6)
+
+#
+ModOrRem:
+ tst.b Mod_Flag(%a6)
+ beq.b Fix_Sign
+
+ mov.l L_SCR1(%a6),%d6 # new biased expo(Y)
+ subq.l &1,%d6 # biased expo(Y/2)
+ cmp.l %d0,%d6
+ blt.b Fix_Sign
+ bgt.b Last_Sub
+
+ cmp.l %d1,%d4
+ bne.b Not_EQ
+ cmp.l %d2,%d5
+ bne.b Not_EQ
+ bra.w Tie_Case
+
+Not_EQ:
+ bcs.b Fix_Sign
+
+Last_Sub:
+#
+ fsub.x Y(%a6),%fp0 # no exceptions
+ addq.l &1,%d3 # Q := Q + 1
+
+#
+Fix_Sign:
+#..Get sign of X
+ mov.w SignX(%a6),%d6
+ bge.b Get_Q
+ fneg.x %fp0
+
+#..Get Q
+#
+Get_Q:
+ clr.l %d6
+ mov.w SignQ(%a6),%d6 # D6 is sign(Q)
+ mov.l &8,%d7
+ lsr.l %d7,%d6
+ and.l &0x0000007F,%d3 # 7 bits of Q
+ or.l %d6,%d3 # sign and bits of Q
+# swap %d3
+# fmov.l %fpsr,%d6
+# and.l &0xFF00FFFF,%d6
+# or.l %d3,%d6
+# fmov.l %d6,%fpsr # put Q in fpsr
+ mov.b %d3,FPSR_QBYTE(%a6) # put Q in fpsr
+
+#
+Restore:
+ movm.l (%sp)+,&0xfc # {%d2-%d7}
+ mov.l (%sp)+,%d0
+ fmov.l %d0,%fpcr
+ tst.b Sc_Flag(%a6)
+ beq.b Finish
+ mov.b &FMUL_OP,%d1 # last inst is MUL
+ fmul.x Scale(%pc),%fp0 # may cause underflow
+ bra t_catch2
+# the '040 package did this apparently to see if the dst operand for the
+# preceding fmul was a denorm. but, it better not have been since the
+# algorithm just got done playing with fp0 and expected no exceptions
+# as a result. trust me...
+# bra t_avoid_unsupp # check for denorm as a
+# ;result of the scaling
+
+Finish:
+ mov.b &FMOV_OP,%d1 # last inst is MOVE
+ fmov.x %fp0,%fp0 # capture exceptions & round
+ bra t_catch2
+
+Rem_is_0:
+#..R = 2^(-j)X - Q Y = Y, thus R = 0 and quotient = 2^j (Q+1)
+ addq.l &1,%d3
+ cmp.l %d0,&8 # D0 is j
+ bge.b Q_Big
+
+ lsl.l %d0,%d3
+ bra.b Set_R_0
+
+Q_Big:
+ clr.l %d3
+
+Set_R_0:
+ fmov.s &0x00000000,%fp0
+ clr.b Sc_Flag(%a6)
+ bra.w Fix_Sign
+
+Tie_Case:
+#..Check parity of Q
+ mov.l %d3,%d6
+ and.l &0x00000001,%d6
+ tst.l %d6
+ beq.w Fix_Sign # Q is even
+
+#..Q is odd, Q := Q + 1, signX := -signX
+ addq.l &1,%d3
+ mov.w SignX(%a6),%d6
+ eor.l &0x00008000,%d6
+ mov.w %d6,SignX(%a6)
+ bra.w Fix_Sign
+
+qnan: long 0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF **************************************************************** #
+# t_dz(): Handle DZ exception during transcendental emulation. #
+# Sets N bit according to sign of source operand. #
+# t_dz2(): Handle DZ exception during transcendental emulation. #
+# Sets N bit always. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to source operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default result #
+# #
+# ALGORITHM *********************************************************** #
+# - Store properly signed INF into fp0. #
+# - Set FPSR exception status dz bit, ccode inf bit, and #
+# accrued dz bit. #
+# #
+#########################################################################
+
+ global t_dz
+t_dz:
+ tst.b SRC_EX(%a0) # no; is src negative?
+ bmi.b t_dz2 # yes
+
+dz_pinf:
+ fmov.s &0x7f800000,%fp0 # return +INF in fp0
+ ori.l &dzinf_mask,USER_FPSR(%a6) # set I/DZ/ADZ
+ rts
+
+ global t_dz2
+t_dz2:
+ fmov.s &0xff800000,%fp0 # return -INF in fp0
+ ori.l &dzinf_mask+neg_mask,USER_FPSR(%a6) # set N/I/DZ/ADZ
+ rts
+
+#################################################################
+# OPERR exception: #
+# - set FPSR exception status operr bit, condition code #
+# nan bit; Store default NAN into fp0 #
+#################################################################
+ global t_operr
+t_operr:
+ ori.l &opnan_mask,USER_FPSR(%a6) # set NaN/OPERR/AIOP
+ fmovm.x qnan(%pc),&0x80 # return default NAN in fp0
+ rts
+
+#################################################################
+# Extended DENORM: #
+# - For all functions that have a denormalized input and #
+# that f(x)=x, this is the entry point. #
+# - we only return the EXOP here if either underflow or #
+# inexact is enabled. #
+#################################################################
+
+# Entry point for scale w/ extended denorm. The function does
+# NOT set INEX2/AUNFL/AINEX.
+ global t_resdnrm
+t_resdnrm:
+ ori.l &unfl_mask,USER_FPSR(%a6) # set UNFL
+ bra.b xdnrm_con
+
+ global t_extdnrm
+t_extdnrm:
+ ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+xdnrm_con:
+ mov.l %a0,%a1 # make copy of src ptr
+ mov.l %d0,%d1 # make copy of rnd prec,mode
+ andi.b &0xc0,%d1 # extended precision?
+ bne.b xdnrm_sd # no
+
+# result precision is extended.
+ tst.b LOCAL_EX(%a0) # is denorm negative?
+ bpl.b xdnrm_exit # no
+
+ bset &neg_bit,FPSR_CC(%a6) # yes; set 'N' ccode bit
+ bra.b xdnrm_exit
+
+# result precision is single or double
+xdnrm_sd:
+ mov.l %a1,-(%sp)
+ tst.b LOCAL_EX(%a0) # is denorm pos or neg?
+ smi.b %d1 # set d0 accodingly
+ bsr.l unf_sub
+ mov.l (%sp)+,%a1
+xdnrm_exit:
+ fmovm.x (%a0),&0x80 # return default result in fp0
+
+ mov.b FPCR_ENABLE(%a6),%d0
+ andi.b &0x0a,%d0 # is UNFL or INEX enabled?
+ bne.b xdnrm_ena # yes
+ rts
+
+################
+# unfl enabled #
+################
+# we have a DENORM that needs to be converted into an EXOP.
+# so, normalize the mantissa, add 0x6000 to the new exponent,
+# and return the result in fp1.
+xdnrm_ena:
+ mov.w LOCAL_EX(%a1),FP_SCR0_EX(%a6)
+ mov.l LOCAL_HI(%a1),FP_SCR0_HI(%a6)
+ mov.l LOCAL_LO(%a1),FP_SCR0_LO(%a6)
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize mantissa
+ addi.l &0x6000,%d0 # add extra bias
+ andi.w &0x8000,FP_SCR0_EX(%a6) # keep old sign
+ or.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#################################################################
+# UNFL exception: #
+# - This routine is for cases where even an EXOP isn't #
+# large enough to hold the range of this result. #
+# In such a case, the EXOP equals zero. #
+# - Return the default result to the proper precision #
+# with the sign of this result being the same as that #
+# of the src operand. #
+# - t_unfl2() is provided to force the result sign to #
+# positive which is the desired result for fetox(). #
+#################################################################
+ global t_unfl
+t_unfl:
+ ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+ tst.b (%a0) # is result pos or neg?
+ smi.b %d1 # set d1 accordingly
+ bsr.l unf_sub # calc default unfl result
+ fmovm.x (%a0),&0x80 # return default result in fp0
+
+ fmov.s &0x00000000,%fp1 # return EXOP in fp1
+ rts
+
+# t_unfl2 ALWAYS tells unf_sub to create a positive result
+ global t_unfl2
+t_unfl2:
+ ori.l &unfinx_mask,USER_FPSR(%a6) # set UNFL/INEX2/AUNFL/AINEX
+
+ sf.b %d1 # set d0 to represent positive
+ bsr.l unf_sub # calc default unfl result
+ fmovm.x (%a0),&0x80 # return default result in fp0
+
+ fmov.s &0x0000000,%fp1 # return EXOP in fp1
+ rts
+
+#################################################################
+# OVFL exception: #
+# - This routine is for cases where even an EXOP isn't #
+# large enough to hold the range of this result. #
+# - Return the default result to the proper precision #
+# with the sign of this result being the same as that #
+# of the src operand. #
+# - t_ovfl2() is provided to force the result sign to #
+# positive which is the desired result for fcosh(). #
+# - t_ovfl_sc() is provided for scale() which only sets #
+# the inexact bits if the number is inexact for the #
+# precision indicated. #
+#################################################################
+
+ global t_ovfl_sc
+t_ovfl_sc:
+ ori.l &ovfl_inx_mask,USER_FPSR(%a6) # set OVFL/AOVFL/AINEX
+
+ mov.b %d0,%d1 # fetch rnd mode/prec
+ andi.b &0xc0,%d1 # extract rnd prec
+ beq.b ovfl_work # prec is extended
+
+ tst.b LOCAL_HI(%a0) # is dst a DENORM?
+ bmi.b ovfl_sc_norm # no
+
+# dst op is a DENORM. we have to normalize the mantissa to see if the
+# result would be inexact for the given precision. make a copy of the
+# dst so we don't screw up the version passed to us.
+ mov.w LOCAL_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l LOCAL_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l LOCAL_LO(%a0),FP_SCR0_LO(%a6)
+ lea FP_SCR0(%a6),%a0 # pass ptr to FP_SCR0
+ movm.l &0xc080,-(%sp) # save d0-d1/a0
+ bsr.l norm # normalize mantissa
+ movm.l (%sp)+,&0x0103 # restore d0-d1/a0
+
+ovfl_sc_norm:
+ cmpi.b %d1,&0x40 # is prec dbl?
+ bne.b ovfl_sc_dbl # no; sgl
+ovfl_sc_sgl:
+ tst.l LOCAL_LO(%a0) # is lo lw of sgl set?
+ bne.b ovfl_sc_inx # yes
+ tst.b 3+LOCAL_HI(%a0) # is lo byte of hi lw set?
+ bne.b ovfl_sc_inx # yes
+ bra.b ovfl_work # don't set INEX2
+ovfl_sc_dbl:
+ mov.l LOCAL_LO(%a0),%d1 # are any of lo 11 bits of
+ andi.l &0x7ff,%d1 # dbl mantissa set?
+ beq.b ovfl_work # no; don't set INEX2
+ovfl_sc_inx:
+ ori.l &inex2_mask,USER_FPSR(%a6) # set INEX2
+ bra.b ovfl_work # continue
+
+ global t_ovfl
+t_ovfl:
+ ori.l &ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
+
+ovfl_work:
+ tst.b LOCAL_EX(%a0) # what is the sign?
+ smi.b %d1 # set d1 accordingly
+ bsr.l ovf_res # calc default ovfl result
+ mov.b %d0,FPSR_CC(%a6) # insert new ccodes
+ fmovm.x (%a0),&0x80 # return default result in fp0
+
+ fmov.s &0x00000000,%fp1 # return EXOP in fp1
+ rts
+
+# t_ovfl2 ALWAYS tells ovf_res to create a positive result
+ global t_ovfl2
+t_ovfl2:
+ ori.l &ovfinx_mask,USER_FPSR(%a6) # set OVFL/INEX2/AOVFL/AINEX
+
+ sf.b %d1 # clear sign flag for positive
+ bsr.l ovf_res # calc default ovfl result
+ mov.b %d0,FPSR_CC(%a6) # insert new ccodes
+ fmovm.x (%a0),&0x80 # return default result in fp0
+
+ fmov.s &0x00000000,%fp1 # return EXOP in fp1
+ rts
+
+#################################################################
+# t_catch(): #
+# - the last operation of a transcendental emulation #
+# routine may have caused an underflow or overflow. #
+# we find out if this occurred by doing an fsave and #
+# checking the exception bit. if one did occur, then we #
+# jump to fgen_except() which creates the default #
+# result and EXOP for us. #
+#################################################################
+ global t_catch
+t_catch:
+
+ fsave -(%sp)
+ tst.b 0x2(%sp)
+ bmi.b catch
+ add.l &0xc,%sp
+
+#################################################################
+# INEX2 exception: #
+# - The inex2 and ainex bits are set. #
+#################################################################
+ global t_inx2
+t_inx2:
+ fblt.w t_minx2
+ fbeq.w inx2_zero
+
+ global t_pinx2
+t_pinx2:
+ ori.w &inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
+ rts
+
+ global t_minx2
+t_minx2:
+ ori.l &inx2a_mask+neg_mask,USER_FPSR(%a6) # set N/INEX2/AINEX
+ rts
+
+inx2_zero:
+ mov.b &z_bmask,FPSR_CC(%a6)
+ ori.w &inx2a_mask,2+USER_FPSR(%a6) # set INEX2/AINEX
+ rts
+
+# an underflow or overflow exception occurred.
+# we must set INEX/AINEX since the fmul/fdiv/fmov emulation may not!
+catch:
+ ori.w &inx2a_mask,FPSR_EXCEPT(%a6)
+catch2:
+ bsr.l fgen_except
+ add.l &0xc,%sp
+ rts
+
+ global t_catch2
+t_catch2:
+
+ fsave -(%sp)
+
+ tst.b 0x2(%sp)
+ bmi.b catch2
+ add.l &0xc,%sp
+
+ fmov.l %fpsr,%d0
+ or.l %d0,USER_FPSR(%a6)
+
+ rts
+
+#########################################################################
+
+#########################################################################
+# unf_res(): underflow default result calculation for transcendentals #
+# #
+# INPUT: #
+# d0 : rnd mode,precision #
+# d1.b : sign bit of result ('11111111 = (-) ; '00000000 = (+)) #
+# OUTPUT: #
+# a0 : points to result (in instruction memory) #
+#########################################################################
+unf_sub:
+ ori.l &unfinx_mask,USER_FPSR(%a6)
+
+ andi.w &0x10,%d1 # keep sign bit in 4th spot
+
+ lsr.b &0x4,%d0 # shift rnd prec,mode to lo bits
+ andi.b &0xf,%d0 # strip hi rnd mode bit
+ or.b %d1,%d0 # concat {sgn,mode,prec}
+
+ mov.l %d0,%d1 # make a copy
+ lsl.b &0x1,%d1 # mult index 2 by 2
+
+ mov.b (tbl_unf_cc.b,%pc,%d0.w*1),FPSR_CC(%a6) # insert ccode bits
+ lea (tbl_unf_result.b,%pc,%d1.w*8),%a0 # grab result ptr
+ rts
+
+tbl_unf_cc:
+ byte 0x4, 0x4, 0x4, 0x0
+ byte 0x4, 0x4, 0x4, 0x0
+ byte 0x4, 0x4, 0x4, 0x0
+ byte 0x0, 0x0, 0x0, 0x0
+ byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+ byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+ byte 0x8+0x4, 0x8+0x4, 0x8, 0x8+0x4
+
+tbl_unf_result:
+ long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+ long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+ long 0x00000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+ long 0x00000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
+
+ long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+ long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+ long 0x3f810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+ long 0x3f810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
+
+ long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+ long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZER0;dbl
+ long 0x3c010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+ long 0x3c010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
+
+ long 0x0,0x0,0x0,0x0
+ long 0x0,0x0,0x0,0x0
+ long 0x0,0x0,0x0,0x0
+ long 0x0,0x0,0x0,0x0
+
+ long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+ long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+ long 0x80000000, 0x00000000, 0x00000001, 0x0 # MIN; ext
+ long 0x80000000, 0x00000000, 0x00000000, 0x0 # ZERO;ext
+
+ long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+ long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+ long 0xbf810000, 0x00000100, 0x00000000, 0x0 # MIN; sgl
+ long 0xbf810000, 0x00000000, 0x00000000, 0x0 # ZERO;sgl
+
+ long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+ long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+ long 0xbc010000, 0x00000000, 0x00000800, 0x0 # MIN; dbl
+ long 0xbc010000, 0x00000000, 0x00000000, 0x0 # ZERO;dbl
+
+############################################################
+
+#########################################################################
+# src_zero(): Return signed zero according to sign of src operand. #
+#########################################################################
+ global src_zero
+src_zero:
+ tst.b SRC_EX(%a0) # get sign of src operand
+ bmi.b ld_mzero # if neg, load neg zero
+
+#
+# ld_pzero(): return a positive zero.
+#
+ global ld_pzero
+ld_pzero:
+ fmov.s &0x00000000,%fp0 # load +0
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+
+# ld_mzero(): return a negative zero.
+ global ld_mzero
+ld_mzero:
+ fmov.s &0x80000000,%fp0 # load -0
+ mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set 'N','Z' ccode bits
+ rts
+
+#########################################################################
+# dst_zero(): Return signed zero according to sign of dst operand. #
+#########################################################################
+ global dst_zero
+dst_zero:
+ tst.b DST_EX(%a1) # get sign of dst operand
+ bmi.b ld_mzero # if neg, load neg zero
+ bra.b ld_pzero # load positive zero
+
+#########################################################################
+# src_inf(): Return signed inf according to sign of src operand. #
+#########################################################################
+ global src_inf
+src_inf:
+ tst.b SRC_EX(%a0) # get sign of src operand
+ bmi.b ld_minf # if negative branch
+
+#
+# ld_pinf(): return a positive infinity.
+#
+ global ld_pinf
+ld_pinf:
+ fmov.s &0x7f800000,%fp0 # load +INF
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'INF' ccode bit
+ rts
+
+#
+# ld_minf():return a negative infinity.
+#
+ global ld_minf
+ld_minf:
+ fmov.s &0xff800000,%fp0 # load -INF
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+ rts
+
+#########################################################################
+# dst_inf(): Return signed inf according to sign of dst operand. #
+#########################################################################
+ global dst_inf
+dst_inf:
+ tst.b DST_EX(%a1) # get sign of dst operand
+ bmi.b ld_minf # if negative branch
+ bra.b ld_pinf
+
+ global szr_inf
+#################################################################
+# szr_inf(): Return +ZERO for a negative src operand or #
+# +INF for a positive src operand. #
+# Routine used for fetox, ftwotox, and ftentox. #
+#################################################################
+szr_inf:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.b ld_pzero
+ bra.b ld_pinf
+
+#########################################################################
+# sopr_inf(): Return +INF for a positive src operand or #
+# jump to operand error routine for a negative src operand. #
+# Routine used for flogn, flognp1, flog10, and flog2. #
+#########################################################################
+ global sopr_inf
+sopr_inf:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.w t_operr
+ bra.b ld_pinf
+
+#################################################################
+# setoxm1i(): Return minus one for a negative src operand or #
+# positive infinity for a positive src operand. #
+# Routine used for fetoxm1. #
+#################################################################
+ global setoxm1i
+setoxm1i:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.b ld_mone
+ bra.b ld_pinf
+
+#########################################################################
+# src_one(): Return signed one according to sign of src operand. #
+#########################################################################
+ global src_one
+src_one:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.b ld_mone
+
+#
+# ld_pone(): return positive one.
+#
+ global ld_pone
+ld_pone:
+ fmov.s &0x3f800000,%fp0 # load +1
+ clr.b FPSR_CC(%a6)
+ rts
+
+#
+# ld_mone(): return negative one.
+#
+ global ld_mone
+ld_mone:
+ fmov.s &0xbf800000,%fp0 # load -1
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+ppiby2: long 0x3fff0000, 0xc90fdaa2, 0x2168c235
+mpiby2: long 0xbfff0000, 0xc90fdaa2, 0x2168c235
+
+#################################################################
+# spi_2(): Return signed PI/2 according to sign of src operand. #
+#################################################################
+ global spi_2
+spi_2:
+ tst.b SRC_EX(%a0) # check sign of source
+ bmi.b ld_mpi2
+
+#
+# ld_ppi2(): return positive PI/2.
+#
+ global ld_ppi2
+ld_ppi2:
+ fmov.l %d0,%fpcr
+ fmov.x ppiby2(%pc),%fp0 # load +pi/2
+ bra.w t_pinx2 # set INEX2
+
+#
+# ld_mpi2(): return negative PI/2.
+#
+ global ld_mpi2
+ld_mpi2:
+ fmov.l %d0,%fpcr
+ fmov.x mpiby2(%pc),%fp0 # load -pi/2
+ bra.w t_minx2 # set INEX2
+
+####################################################
+# The following routines give support for fsincos. #
+####################################################
+
+#
+# ssincosz(): When the src operand is ZERO, store a one in the
+# cosine register and return a ZERO in fp0 w/ the same sign
+# as the src operand.
+#
+ global ssincosz
+ssincosz:
+ fmov.s &0x3f800000,%fp1
+ tst.b SRC_EX(%a0) # test sign
+ bpl.b sincoszp
+ fmov.s &0x80000000,%fp0 # return sin result in fp0
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6)
+ bra.b sto_cos # store cosine result
+sincoszp:
+ fmov.s &0x00000000,%fp0 # return sin result in fp0
+ mov.b &z_bmask,FPSR_CC(%a6)
+ bra.b sto_cos # store cosine result
+
+#
+# ssincosi(): When the src operand is INF, store a QNAN in the cosine
+# register and jump to the operand error routine for negative
+# src operands.
+#
+ global ssincosi
+ssincosi:
+ fmov.x qnan(%pc),%fp1 # load NAN
+ bsr.l sto_cos # store cosine result
+ bra.w t_operr
+
+#
+# ssincosqnan(): When the src operand is a QNAN, store the QNAN in the cosine
+# register and branch to the src QNAN routine.
+#
+ global ssincosqnan
+ssincosqnan:
+ fmov.x LOCAL_EX(%a0),%fp1
+ bsr.l sto_cos
+ bra.w src_qnan
+
+#
+# ssincossnan(): When the src operand is an SNAN, store the SNAN w/ the SNAN bit set
+# in the cosine register and branch to the src SNAN routine.
+#
+ global ssincossnan
+ssincossnan:
+ fmov.x LOCAL_EX(%a0),%fp1
+ bsr.l sto_cos
+ bra.w src_snan
+
+########################################################################
+
+#########################################################################
+# sto_cos(): store fp1 to the fpreg designated by the CMDREG dst field. #
+# fp1 holds the result of the cosine portion of ssincos(). #
+# the value in fp1 will not take any exceptions when moved. #
+# INPUT: #
+# fp1 : fp value to store #
+# MODIFIED: #
+# d0 #
+#########################################################################
+ global sto_cos
+sto_cos:
+ mov.b 1+EXC_CMDREG(%a6),%d0
+ andi.w &0x7,%d0
+ mov.w (tbl_sto_cos.b,%pc,%d0.w*2),%d0
+ jmp (tbl_sto_cos.b,%pc,%d0.w*1)
+
+tbl_sto_cos:
+ short sto_cos_0 - tbl_sto_cos
+ short sto_cos_1 - tbl_sto_cos
+ short sto_cos_2 - tbl_sto_cos
+ short sto_cos_3 - tbl_sto_cos
+ short sto_cos_4 - tbl_sto_cos
+ short sto_cos_5 - tbl_sto_cos
+ short sto_cos_6 - tbl_sto_cos
+ short sto_cos_7 - tbl_sto_cos
+
+sto_cos_0:
+ fmovm.x &0x40,EXC_FP0(%a6)
+ rts
+sto_cos_1:
+ fmovm.x &0x40,EXC_FP1(%a6)
+ rts
+sto_cos_2:
+ fmov.x %fp1,%fp2
+ rts
+sto_cos_3:
+ fmov.x %fp1,%fp3
+ rts
+sto_cos_4:
+ fmov.x %fp1,%fp4
+ rts
+sto_cos_5:
+ fmov.x %fp1,%fp5
+ rts
+sto_cos_6:
+ fmov.x %fp1,%fp6
+ rts
+sto_cos_7:
+ fmov.x %fp1,%fp7
+ rts
+
+##################################################################
+ global smod_sdnrm
+ global smod_snorm
+smod_sdnrm:
+smod_snorm:
+ mov.b DTAG(%a6),%d1
+ beq.l smod
+ cmpi.b %d1,&ZERO
+ beq.w smod_zro
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l smod
+ cmpi.b %d1,&SNAN
+ beq.l dst_snan
+ bra.l dst_qnan
+
+ global smod_szero
+smod_szero:
+ mov.b DTAG(%a6),%d1
+ beq.l t_operr
+ cmpi.b %d1,&ZERO
+ beq.l t_operr
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l t_operr
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l dst_snan
+
+ global smod_sinf
+smod_sinf:
+ mov.b DTAG(%a6),%d1
+ beq.l smod_fpn
+ cmpi.b %d1,&ZERO
+ beq.l smod_zro
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l smod_fpn
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l dst_snan
+
+smod_zro:
+srem_zro:
+ mov.b SRC_EX(%a0),%d1 # get src sign
+ mov.b DST_EX(%a1),%d0 # get dst sign
+ eor.b %d0,%d1 # get qbyte sign
+ andi.b &0x80,%d1
+ mov.b %d1,FPSR_QBYTE(%a6)
+ tst.b %d0
+ bpl.w ld_pzero
+ bra.w ld_mzero
+
+smod_fpn:
+srem_fpn:
+ clr.b FPSR_QBYTE(%a6)
+ mov.l %d0,-(%sp)
+ mov.b SRC_EX(%a0),%d1 # get src sign
+ mov.b DST_EX(%a1),%d0 # get dst sign
+ eor.b %d0,%d1 # get qbyte sign
+ andi.b &0x80,%d1
+ mov.b %d1,FPSR_QBYTE(%a6)
+ cmpi.b DTAG(%a6),&DENORM
+ bne.b smod_nrm
+ lea DST(%a1),%a0
+ mov.l (%sp)+,%d0
+ bra t_resdnrm
+smod_nrm:
+ fmov.l (%sp)+,%fpcr
+ fmov.x DST(%a1),%fp0
+ tst.b DST_EX(%a1)
+ bmi.b smod_nrm_neg
+ rts
+
+smod_nrm_neg:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode
+ rts
+
+#########################################################################
+ global srem_snorm
+ global srem_sdnrm
+srem_sdnrm:
+srem_snorm:
+ mov.b DTAG(%a6),%d1
+ beq.l srem
+ cmpi.b %d1,&ZERO
+ beq.w srem_zro
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l srem
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l dst_snan
+
+ global srem_szero
+srem_szero:
+ mov.b DTAG(%a6),%d1
+ beq.l t_operr
+ cmpi.b %d1,&ZERO
+ beq.l t_operr
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l t_operr
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l dst_snan
+
+ global srem_sinf
+srem_sinf:
+ mov.b DTAG(%a6),%d1
+ beq.w srem_fpn
+ cmpi.b %d1,&ZERO
+ beq.w srem_zro
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l srem_fpn
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l dst_snan
+
+#########################################################################
+ global sscale_snorm
+ global sscale_sdnrm
+sscale_snorm:
+sscale_sdnrm:
+ mov.b DTAG(%a6),%d1
+ beq.l sscale
+ cmpi.b %d1,&ZERO
+ beq.l dst_zero
+ cmpi.b %d1,&INF
+ beq.l dst_inf
+ cmpi.b %d1,&DENORM
+ beq.l sscale
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l dst_snan
+
+ global sscale_szero
+sscale_szero:
+ mov.b DTAG(%a6),%d1
+ beq.l sscale
+ cmpi.b %d1,&ZERO
+ beq.l dst_zero
+ cmpi.b %d1,&INF
+ beq.l dst_inf
+ cmpi.b %d1,&DENORM
+ beq.l sscale
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ bra.l dst_snan
+
+ global sscale_sinf
+sscale_sinf:
+ mov.b DTAG(%a6),%d1
+ beq.l t_operr
+ cmpi.b %d1,&QNAN
+ beq.l dst_qnan
+ cmpi.b %d1,&SNAN
+ beq.l dst_snan
+ bra.l t_operr
+
+########################################################################
+
+#
+# sop_sqnan(): The src op for frem/fmod/fscale was a QNAN.
+#
+ global sop_sqnan
+sop_sqnan:
+ mov.b DTAG(%a6),%d1
+ cmpi.b %d1,&QNAN
+ beq.b dst_qnan
+ cmpi.b %d1,&SNAN
+ beq.b dst_snan
+ bra.b src_qnan
+
+#
+# sop_ssnan(): The src op for frem/fmod/fscale was an SNAN.
+#
+ global sop_ssnan
+sop_ssnan:
+ mov.b DTAG(%a6),%d1
+ cmpi.b %d1,&QNAN
+ beq.b dst_qnan_src_snan
+ cmpi.b %d1,&SNAN
+ beq.b dst_snan
+ bra.b src_snan
+
+dst_qnan_src_snan:
+ ori.l &snaniop_mask,USER_FPSR(%a6) # set NAN/SNAN/AIOP
+ bra.b dst_qnan
+
+#
+# dst_qnan(): Return the dst SNAN w/ the SNAN bit set.
+#
+ global dst_snan
+dst_snan:
+ fmov.x DST(%a1),%fp0 # the fmove sets the SNAN bit
+ fmov.l %fpsr,%d0 # catch resulting status
+ or.l %d0,USER_FPSR(%a6) # store status
+ rts
+
+#
+# dst_qnan(): Return the dst QNAN.
+#
+ global dst_qnan
+dst_qnan:
+ fmov.x DST(%a1),%fp0 # return the non-signalling nan
+ tst.b DST_EX(%a1) # set ccodes according to QNAN sign
+ bmi.b dst_qnan_m
+dst_qnan_p:
+ mov.b &nan_bmask,FPSR_CC(%a6)
+ rts
+dst_qnan_m:
+ mov.b &neg_bmask+nan_bmask,FPSR_CC(%a6)
+ rts
+
+#
+# src_snan(): Return the src SNAN w/ the SNAN bit set.
+#
+ global src_snan
+src_snan:
+ fmov.x SRC(%a0),%fp0 # the fmove sets the SNAN bit
+ fmov.l %fpsr,%d0 # catch resulting status
+ or.l %d0,USER_FPSR(%a6) # store status
+ rts
+
+#
+# src_qnan(): Return the src QNAN.
+#
+ global src_qnan
+src_qnan:
+ fmov.x SRC(%a0),%fp0 # return the non-signalling nan
+ tst.b SRC_EX(%a0) # set ccodes according to QNAN sign
+ bmi.b dst_qnan_m
+src_qnan_p:
+ mov.b &nan_bmask,FPSR_CC(%a6)
+ rts
+src_qnan_m:
+ mov.b &neg_bmask+nan_bmask,FPSR_CC(%a6)
+ rts
+
+#
+# fkern2.s:
+# These entry points are used by the exception handler
+# routines where an instruction is selected by an index into
+# a large jump table corresponding to a given instruction which
+# has been decoded. Flow continues here where we now decode
+# further accoding to the source operand type.
+#
+
+ global fsinh
+fsinh:
+ mov.b STAG(%a6),%d1
+ beq.l ssinh
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l src_inf
+ cmpi.b %d1,&DENORM
+ beq.l ssinhd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global flognp1
+flognp1:
+ mov.b STAG(%a6),%d1
+ beq.l slognp1
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l sopr_inf
+ cmpi.b %d1,&DENORM
+ beq.l slognp1d
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fetoxm1
+fetoxm1:
+ mov.b STAG(%a6),%d1
+ beq.l setoxm1
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l setoxm1i
+ cmpi.b %d1,&DENORM
+ beq.l setoxm1d
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global ftanh
+ftanh:
+ mov.b STAG(%a6),%d1
+ beq.l stanh
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l src_one
+ cmpi.b %d1,&DENORM
+ beq.l stanhd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fatan
+fatan:
+ mov.b STAG(%a6),%d1
+ beq.l satan
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l spi_2
+ cmpi.b %d1,&DENORM
+ beq.l satand
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fasin
+fasin:
+ mov.b STAG(%a6),%d1
+ beq.l sasin
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l sasind
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fatanh
+fatanh:
+ mov.b STAG(%a6),%d1
+ beq.l satanh
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l satanhd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fsine
+fsine:
+ mov.b STAG(%a6),%d1
+ beq.l ssin
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l ssind
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global ftan
+ftan:
+ mov.b STAG(%a6),%d1
+ beq.l stan
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l stand
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fetox
+fetox:
+ mov.b STAG(%a6),%d1
+ beq.l setox
+ cmpi.b %d1,&ZERO
+ beq.l ld_pone
+ cmpi.b %d1,&INF
+ beq.l szr_inf
+ cmpi.b %d1,&DENORM
+ beq.l setoxd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global ftwotox
+ftwotox:
+ mov.b STAG(%a6),%d1
+ beq.l stwotox
+ cmpi.b %d1,&ZERO
+ beq.l ld_pone
+ cmpi.b %d1,&INF
+ beq.l szr_inf
+ cmpi.b %d1,&DENORM
+ beq.l stwotoxd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global ftentox
+ftentox:
+ mov.b STAG(%a6),%d1
+ beq.l stentox
+ cmpi.b %d1,&ZERO
+ beq.l ld_pone
+ cmpi.b %d1,&INF
+ beq.l szr_inf
+ cmpi.b %d1,&DENORM
+ beq.l stentoxd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global flogn
+flogn:
+ mov.b STAG(%a6),%d1
+ beq.l slogn
+ cmpi.b %d1,&ZERO
+ beq.l t_dz2
+ cmpi.b %d1,&INF
+ beq.l sopr_inf
+ cmpi.b %d1,&DENORM
+ beq.l slognd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global flog10
+flog10:
+ mov.b STAG(%a6),%d1
+ beq.l slog10
+ cmpi.b %d1,&ZERO
+ beq.l t_dz2
+ cmpi.b %d1,&INF
+ beq.l sopr_inf
+ cmpi.b %d1,&DENORM
+ beq.l slog10d
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global flog2
+flog2:
+ mov.b STAG(%a6),%d1
+ beq.l slog2
+ cmpi.b %d1,&ZERO
+ beq.l t_dz2
+ cmpi.b %d1,&INF
+ beq.l sopr_inf
+ cmpi.b %d1,&DENORM
+ beq.l slog2d
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fcosh
+fcosh:
+ mov.b STAG(%a6),%d1
+ beq.l scosh
+ cmpi.b %d1,&ZERO
+ beq.l ld_pone
+ cmpi.b %d1,&INF
+ beq.l ld_pinf
+ cmpi.b %d1,&DENORM
+ beq.l scoshd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global facos
+facos:
+ mov.b STAG(%a6),%d1
+ beq.l sacos
+ cmpi.b %d1,&ZERO
+ beq.l ld_ppi2
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l sacosd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fcos
+fcos:
+ mov.b STAG(%a6),%d1
+ beq.l scos
+ cmpi.b %d1,&ZERO
+ beq.l ld_pone
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l scosd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fgetexp
+fgetexp:
+ mov.b STAG(%a6),%d1
+ beq.l sgetexp
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l sgetexpd
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fgetman
+fgetman:
+ mov.b STAG(%a6),%d1
+ beq.l sgetman
+ cmpi.b %d1,&ZERO
+ beq.l src_zero
+ cmpi.b %d1,&INF
+ beq.l t_operr
+ cmpi.b %d1,&DENORM
+ beq.l sgetmand
+ cmpi.b %d1,&QNAN
+ beq.l src_qnan
+ bra.l src_snan
+
+ global fsincos
+fsincos:
+ mov.b STAG(%a6),%d1
+ beq.l ssincos
+ cmpi.b %d1,&ZERO
+ beq.l ssincosz
+ cmpi.b %d1,&INF
+ beq.l ssincosi
+ cmpi.b %d1,&DENORM
+ beq.l ssincosd
+ cmpi.b %d1,&QNAN
+ beq.l ssincosqnan
+ bra.l ssincossnan
+
+ global fmod
+fmod:
+ mov.b STAG(%a6),%d1
+ beq.l smod_snorm
+ cmpi.b %d1,&ZERO
+ beq.l smod_szero
+ cmpi.b %d1,&INF
+ beq.l smod_sinf
+ cmpi.b %d1,&DENORM
+ beq.l smod_sdnrm
+ cmpi.b %d1,&QNAN
+ beq.l sop_sqnan
+ bra.l sop_ssnan
+
+ global frem
+frem:
+ mov.b STAG(%a6),%d1
+ beq.l srem_snorm
+ cmpi.b %d1,&ZERO
+ beq.l srem_szero
+ cmpi.b %d1,&INF
+ beq.l srem_sinf
+ cmpi.b %d1,&DENORM
+ beq.l srem_sdnrm
+ cmpi.b %d1,&QNAN
+ beq.l sop_sqnan
+ bra.l sop_ssnan
+
+ global fscale
+fscale:
+ mov.b STAG(%a6),%d1
+ beq.l sscale_snorm
+ cmpi.b %d1,&ZERO
+ beq.l sscale_szero
+ cmpi.b %d1,&INF
+ beq.l sscale_sinf
+ cmpi.b %d1,&DENORM
+ beq.l sscale_sdnrm
+ cmpi.b %d1,&QNAN
+ beq.l sop_sqnan
+ bra.l sop_ssnan
+
+#########################################################################
+# XDEF **************************************************************** #
+# fgen_except(): catch an exception during transcendental #
+# emulation #
+# #
+# XREF **************************************************************** #
+# fmul() - emulate a multiply instruction #
+# fadd() - emulate an add instruction #
+# fin() - emulate an fmove instruction #
+# #
+# INPUT *************************************************************** #
+# fp0 = destination operand #
+# d0 = type of instruction that took exception #
+# fsave frame = source operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP #
+# #
+# ALGORITHM *********************************************************** #
+# An exception occurred on the last instruction of the #
+# transcendental emulation. hopefully, this won't be happening much #
+# because it will be VERY slow. #
+# The only exceptions capable of passing through here are #
+# Overflow, Underflow, and Unsupported Data Type. #
+# #
+#########################################################################
+
+ global fgen_except
+fgen_except:
+ cmpi.b 0x3(%sp),&0x7 # is exception UNSUPP?
+ beq.b fge_unsupp # yes
+
+ mov.b &NORM,STAG(%a6)
+
+fge_cont:
+ mov.b &NORM,DTAG(%a6)
+
+# ok, I have a problem with putting the dst op at FP_DST. the emulation
+# routines aren't supposed to alter the operands but we've just squashed
+# FP_DST here...
+
+# 8/17/93 - this turns out to be more of a "cleanliness" standpoint
+# then a potential bug. to begin with, only the dyadic functions
+# frem,fmod, and fscale would get the dst trashed here. But, for
+# the 060SP, the FP_DST is never used again anyways.
+ fmovm.x &0x80,FP_DST(%a6) # dst op is in fp0
+
+ lea 0x4(%sp),%a0 # pass: ptr to src op
+ lea FP_DST(%a6),%a1 # pass: ptr to dst op
+
+ cmpi.b %d1,&FMOV_OP
+ beq.b fge_fin # it was an "fmov"
+ cmpi.b %d1,&FADD_OP
+ beq.b fge_fadd # it was an "fadd"
+fge_fmul:
+ bsr.l fmul
+ rts
+fge_fadd:
+ bsr.l fadd
+ rts
+fge_fin:
+ bsr.l fin
+ rts
+
+fge_unsupp:
+ mov.b &DENORM,STAG(%a6)
+ bra.b fge_cont
+
+#
+# This table holds the offsets of the emulation routines for each individual
+# math operation relative to the address of this table. Included are
+# routines like fadd/fmul/fabs as well as the transcendentals.
+# The location within the table is determined by the extension bits of the
+# operation longword.
+#
+
+ swbeg &109
+tbl_unsupp:
+ long fin - tbl_unsupp # 00: fmove
+ long fint - tbl_unsupp # 01: fint
+ long fsinh - tbl_unsupp # 02: fsinh
+ long fintrz - tbl_unsupp # 03: fintrz
+ long fsqrt - tbl_unsupp # 04: fsqrt
+ long tbl_unsupp - tbl_unsupp
+ long flognp1 - tbl_unsupp # 06: flognp1
+ long tbl_unsupp - tbl_unsupp
+ long fetoxm1 - tbl_unsupp # 08: fetoxm1
+ long ftanh - tbl_unsupp # 09: ftanh
+ long fatan - tbl_unsupp # 0a: fatan
+ long tbl_unsupp - tbl_unsupp
+ long fasin - tbl_unsupp # 0c: fasin
+ long fatanh - tbl_unsupp # 0d: fatanh
+ long fsine - tbl_unsupp # 0e: fsin
+ long ftan - tbl_unsupp # 0f: ftan
+ long fetox - tbl_unsupp # 10: fetox
+ long ftwotox - tbl_unsupp # 11: ftwotox
+ long ftentox - tbl_unsupp # 12: ftentox
+ long tbl_unsupp - tbl_unsupp
+ long flogn - tbl_unsupp # 14: flogn
+ long flog10 - tbl_unsupp # 15: flog10
+ long flog2 - tbl_unsupp # 16: flog2
+ long tbl_unsupp - tbl_unsupp
+ long fabs - tbl_unsupp # 18: fabs
+ long fcosh - tbl_unsupp # 19: fcosh
+ long fneg - tbl_unsupp # 1a: fneg
+ long tbl_unsupp - tbl_unsupp
+ long facos - tbl_unsupp # 1c: facos
+ long fcos - tbl_unsupp # 1d: fcos
+ long fgetexp - tbl_unsupp # 1e: fgetexp
+ long fgetman - tbl_unsupp # 1f: fgetman
+ long fdiv - tbl_unsupp # 20: fdiv
+ long fmod - tbl_unsupp # 21: fmod
+ long fadd - tbl_unsupp # 22: fadd
+ long fmul - tbl_unsupp # 23: fmul
+ long fsgldiv - tbl_unsupp # 24: fsgldiv
+ long frem - tbl_unsupp # 25: frem
+ long fscale - tbl_unsupp # 26: fscale
+ long fsglmul - tbl_unsupp # 27: fsglmul
+ long fsub - tbl_unsupp # 28: fsub
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fsincos - tbl_unsupp # 30: fsincos
+ long fsincos - tbl_unsupp # 31: fsincos
+ long fsincos - tbl_unsupp # 32: fsincos
+ long fsincos - tbl_unsupp # 33: fsincos
+ long fsincos - tbl_unsupp # 34: fsincos
+ long fsincos - tbl_unsupp # 35: fsincos
+ long fsincos - tbl_unsupp # 36: fsincos
+ long fsincos - tbl_unsupp # 37: fsincos
+ long fcmp - tbl_unsupp # 38: fcmp
+ long tbl_unsupp - tbl_unsupp
+ long ftst - tbl_unsupp # 3a: ftst
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fsin - tbl_unsupp # 40: fsmove
+ long fssqrt - tbl_unsupp # 41: fssqrt
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fdin - tbl_unsupp # 44: fdmove
+ long fdsqrt - tbl_unsupp # 45: fdsqrt
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fsabs - tbl_unsupp # 58: fsabs
+ long tbl_unsupp - tbl_unsupp
+ long fsneg - tbl_unsupp # 5a: fsneg
+ long tbl_unsupp - tbl_unsupp
+ long fdabs - tbl_unsupp # 5c: fdabs
+ long tbl_unsupp - tbl_unsupp
+ long fdneg - tbl_unsupp # 5e: fdneg
+ long tbl_unsupp - tbl_unsupp
+ long fsdiv - tbl_unsupp # 60: fsdiv
+ long tbl_unsupp - tbl_unsupp
+ long fsadd - tbl_unsupp # 62: fsadd
+ long fsmul - tbl_unsupp # 63: fsmul
+ long fddiv - tbl_unsupp # 64: fddiv
+ long tbl_unsupp - tbl_unsupp
+ long fdadd - tbl_unsupp # 66: fdadd
+ long fdmul - tbl_unsupp # 67: fdmul
+ long fssub - tbl_unsupp # 68: fssub
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fdsub - tbl_unsupp # 6c: fdsub
+
+#########################################################################
+# XDEF **************************************************************** #
+# fmul(): emulates the fmul instruction #
+# fsmul(): emulates the fsmul instruction #
+# fdmul(): emulates the fdmul instruction #
+# #
+# XREF **************************************************************** #
+# scale_to_zero_src() - scale src exponent to zero #
+# scale_to_zero_dst() - scale dst exponent to zero #
+# unf_res() - return default underflow result #
+# ovf_res() - return default overflow result #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a multiply #
+# instruction won't cause an exception. Use the regular fmul to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ align 0x10
+tbl_fmul_ovfl:
+ long 0x3fff - 0x7ffe # ext_max
+ long 0x3fff - 0x407e # sgl_max
+ long 0x3fff - 0x43fe # dbl_max
+tbl_fmul_unfl:
+ long 0x3fff + 0x0001 # ext_unfl
+ long 0x3fff - 0x3f80 # sgl_unfl
+ long 0x3fff - 0x3c00 # dbl_unfl
+
+ global fsmul
+fsmul:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ bra.b fmul
+
+ global fdmul
+fdmul:
+ andi.b &0x30,%d0
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fmul
+fmul:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+ bne.w fmul_not_norm # optimize on non-norm input
+
+fmul_norm:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_to_zero_src # scale src exponent
+ mov.l %d0,-(%sp) # save scale factor 1
+
+ bsr.l scale_to_zero_dst # scale dst exponent
+
+ add.l %d0,(%sp) # SCALE_FACTOR = scale1 + scale2
+
+ mov.w 2+L_SCR3(%a6),%d1 # fetch precision
+ lsr.b &0x6,%d1 # shift to lo bits
+ mov.l (%sp)+,%d0 # load S.F.
+ cmp.l %d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
+ beq.w fmul_may_ovfl # result may rnd to overflow
+ blt.w fmul_ovfl # result will overflow
+
+ cmp.l %d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
+ beq.w fmul_may_unfl # result may rnd to no unfl
+ bgt.w fmul_unfl # result will underflow
+
+#
+# NORMAL:
+# - the result of the multiply operation will neither overflow nor underflow.
+# - do the multiply to the proper precision and rounding mode.
+# - scale the result exponent using the scale factor. if both operands were
+# normalized then we really don't need to go through this scaling. but for now,
+# this will do.
+#
+fmul_normal:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fmul_normal_exit:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# OVERFLOW:
+# - the result of the multiply operation is an overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+# save setting this until now because this is where fmul_may_ovfl may jump in
+fmul_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fmul_ovfl_ena # yes
+
+# calculate the default result
+fmul_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass rnd prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled; Create EXOP:
+# - if precision is extended, then we have the EXOP. simply bias the exponent
+# with an extra -0x6000. if the precision is single or double, we need to
+# calculate a result rounded to extended precision.
+#
+fmul_ovfl_ena:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # test the rnd prec
+ bne.b fmul_ovfl_ena_sd # it's sgl or dbl
+
+fmul_ovfl_ena_cont:
+ fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1 # clear sign bit
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fmul_ovfl_dis
+
+fmul_ovfl_ena_sd:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # keep rnd mode only
+ fmov.l %d1,%fpcr # set FPCR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ bra.b fmul_ovfl_ena_cont
+
+#
+# may OVERFLOW:
+# - the result of the multiply operation MAY overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+#
+fmul_may_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fmul_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fmul_normal_exit
+
+#
+# UNDERFLOW:
+# - the result of the multiply operation is an underflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+# for fun, let's use only extended precision, round to zero. then, let
+# the unf_res() routine figure out all the rest.
+# will we get the correct answer.
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fmul_unfl_ena # yes
+
+fmul_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # unf_res2 may have set 'Z'
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# UNFL is enabled.
+#
+fmul_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fmul_unfl_ena_sd # no, sgl or dbl
+
+# if the rnd mode is anything but RZ, then we have to re-do the above
+# multiplication becuase we used RZ for all.
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+fmul_unfl_ena_cont:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp1 # execute multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # add bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fmul_unfl_dis
+
+fmul_unfl_ena_sd:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # use only rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ bra.b fmul_unfl_ena_cont
+
+# MAY UNDERFLOW:
+# -use the correct rounding mode and precision. this code favors operations
+# that do not underflow.
+fmul_may_unfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| > 2.b?
+ fbgt.w fmul_normal_exit # no; no underflow occurred
+ fblt.w fmul_unfl # yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst operand
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert RZ
+
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp1 # execute multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fabs.x %fp1 # make absolute value
+ fcmp.b %fp1,&0x2 # is |result| < 2.b?
+ fbge.w fmul_normal_exit # no; no underflow occurred
+ bra.w fmul_unfl # yes, underflow occurred
+
+################################################################################
+
+#
+# Multiply: inputs are not both normalized; what are they?
+#
+fmul_not_norm:
+ mov.w (tbl_fmul_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fmul_op.b,%pc,%d1.w)
+
+ swbeg &48
+tbl_fmul_op:
+ short fmul_norm - tbl_fmul_op # NORM x NORM
+ short fmul_zero - tbl_fmul_op # NORM x ZERO
+ short fmul_inf_src - tbl_fmul_op # NORM x INF
+ short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
+ short fmul_norm - tbl_fmul_op # NORM x DENORM
+ short fmul_res_snan - tbl_fmul_op # NORM x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_zero - tbl_fmul_op # ZERO x NORM
+ short fmul_zero - tbl_fmul_op # ZERO x ZERO
+ short fmul_res_operr - tbl_fmul_op # ZERO x INF
+ short fmul_res_qnan - tbl_fmul_op # ZERO x QNAN
+ short fmul_zero - tbl_fmul_op # ZERO x DENORM
+ short fmul_res_snan - tbl_fmul_op # ZERO x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_inf_dst - tbl_fmul_op # INF x NORM
+ short fmul_res_operr - tbl_fmul_op # INF x ZERO
+ short fmul_inf_dst - tbl_fmul_op # INF x INF
+ short fmul_res_qnan - tbl_fmul_op # INF x QNAN
+ short fmul_inf_dst - tbl_fmul_op # INF x DENORM
+ short fmul_res_snan - tbl_fmul_op # INF x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_res_qnan - tbl_fmul_op # QNAN x NORM
+ short fmul_res_qnan - tbl_fmul_op # QNAN x ZERO
+ short fmul_res_qnan - tbl_fmul_op # QNAN x INF
+ short fmul_res_qnan - tbl_fmul_op # QNAN x QNAN
+ short fmul_res_qnan - tbl_fmul_op # QNAN x DENORM
+ short fmul_res_snan - tbl_fmul_op # QNAN x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_norm - tbl_fmul_op # NORM x NORM
+ short fmul_zero - tbl_fmul_op # NORM x ZERO
+ short fmul_inf_src - tbl_fmul_op # NORM x INF
+ short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
+ short fmul_norm - tbl_fmul_op # NORM x DENORM
+ short fmul_res_snan - tbl_fmul_op # NORM x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_res_snan - tbl_fmul_op # SNAN x NORM
+ short fmul_res_snan - tbl_fmul_op # SNAN x ZERO
+ short fmul_res_snan - tbl_fmul_op # SNAN x INF
+ short fmul_res_snan - tbl_fmul_op # SNAN x QNAN
+ short fmul_res_snan - tbl_fmul_op # SNAN x DENORM
+ short fmul_res_snan - tbl_fmul_op # SNAN x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+fmul_res_operr:
+ bra.l res_operr
+fmul_res_snan:
+ bra.l res_snan
+fmul_res_qnan:
+ bra.l res_qnan
+
+#
+# Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
+#
+ global fmul_zero # global for fsglmul
+fmul_zero:
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bpl.b fmul_zero_p # result ZERO is pos.
+fmul_zero_n:
+ fmov.s &0x80000000,%fp0 # load -ZERO
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
+ rts
+fmul_zero_p:
+ fmov.s &0x00000000,%fp0 # load +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+#
+# Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
+#
+# Note: The j-bit for an infinity is a don't-care. However, to be
+# strictly compatible w/ the 68881/882, we make sure to return an
+# INF w/ the j-bit set if the input INF j-bit was set. Destination
+# INFs take priority.
+#
+ global fmul_inf_dst # global for fsglmul
+fmul_inf_dst:
+ fmovm.x DST(%a1),&0x80 # return INF result in fp0
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bpl.b fmul_inf_dst_p # result INF is pos.
+fmul_inf_dst_n:
+ fabs.x %fp0 # clear result sign
+ fneg.x %fp0 # set result sign
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+ rts
+fmul_inf_dst_p:
+ fabs.x %fp0 # clear result sign
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+ global fmul_inf_src # global for fsglmul
+fmul_inf_src:
+ fmovm.x SRC(%a0),&0x80 # return INF result in fp0
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bpl.b fmul_inf_dst_p # result INF is pos.
+ bra.b fmul_inf_dst_n
+
+#########################################################################
+# XDEF **************************************************************** #
+# fin(): emulates the fmove instruction #
+# fsin(): emulates the fsmove instruction #
+# fdin(): emulates the fdmove instruction #
+# #
+# XREF **************************************************************** #
+# norm() - normalize mantissa for EXOP on denorm #
+# scale_to_zero_src() - scale src exponent to zero #
+# ovf_res() - return default overflow result #
+# unf_res() - return default underflow result #
+# res_qnan_1op() - return QNAN result #
+# res_snan_1op() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = round prec/mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms into extended, single, and double precision. #
+# Norms can be emulated w/ a regular fmove instruction. For #
+# sgl/dbl, must scale exponent and perform an "fmove". Check to see #
+# if the result would have overflowed/underflowed. If so, use unf_res() #
+# or ovf_res() to return the default result. Also return EXOP if #
+# exception is enabled. If no exception, return the default result. #
+# Unnorms don't pass through here. #
+# #
+#########################################################################
+
+ global fsin
+fsin:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl precision
+ bra.b fin
+
+ global fdin
+fdin:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl precision
+
+ global fin
+fin:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ mov.b STAG(%a6),%d1 # fetch src optype tag
+ bne.w fin_not_norm # optimize on non-norm input
+
+#
+# FP MOVE IN: NORMs and DENORMs ONLY!
+#
+fin_norm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.w fin_not_ext # no, so go handle dbl or sgl
+
+#
+# precision selected is extended. so...we cannot get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+ tst.b SRC_EX(%a0) # is the operand negative?
+ bpl.b fin_norm_done # no
+ bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
+fin_norm_done:
+ fmovm.x SRC(%a0),&0x80 # return result in fp0
+ rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fin_denorm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.w fin_not_ext # no, so go handle dbl or sgl
+
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+ tst.b SRC_EX(%a0) # is the operand negative?
+ bpl.b fin_denorm_done # no
+ bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
+fin_denorm_done:
+ fmovm.x SRC(%a0),&0x80 # return result in fp0
+ btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+ bne.b fin_denorm_unfl_ena # yes
+ rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fin_denorm_unfl_ena:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ bsr.l norm # normalize result
+ neg.w %d0 # new exponent = -(shft val)
+ addi.w &0x6000,%d0 # add new bias to exponent
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
+ andi.w &0x8000,%d1 # keep old sign
+ andi.w &0x7fff,%d0 # clear sign position
+ or.w %d1,%d0 # concat new exo,old sign
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#
+# operand is to be rounded to single or double precision
+#
+fin_not_ext:
+ cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
+ bne.b fin_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fin_sgl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
+ bge.w fin_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
+ beq.w fin_sd_may_ovfl # maybe; go check
+ blt.w fin_sd_ovfl # yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved into the fp reg file
+#
+fin_sd_normal:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # perform move
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fin_sd_normal_exit:
+ mov.l %d2,-(%sp) # save d2
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d1,%d2 # concat old sign,new exponent
+ mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# operand is to be rounded to double precision
+#
+fin_dbl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
+ bge.w fin_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
+ beq.w fin_sd_may_ovfl # maybe; go check
+ blt.w fin_sd_ovfl # yes; go handle overflow
+ bra.w fin_sd_normal # no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fin_sd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ tst.b FP_SCR0_EX(%a6) # is operand negative?
+ bpl.b fin_sd_unfl_tst
+ bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
+
+# if underflow or inexact is enabled, then go calculate the EXOP first.
+fin_sd_unfl_tst:
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fin_sd_unfl_ena # yes
+
+fin_sd_unfl_dis:
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# operand will underflow AND underflow or inexact is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fin_sd_unfl_ena:
+ mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+ mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+ mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # subtract scale factor
+ andi.w &0x8000,%d2 # extract old sign
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1
+ or.w %d1,%d2 # concat old sign,new exp
+ mov.w %d2,FP_SCR1_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fin_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fin_sd_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # perform move
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fin_sd_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fin_sd_ovfl_ena # yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fin_sd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass: prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fin_sd_ovfl_ena:
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ sub.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fin_sd_ovfl_dis
+
+#
+# the move in MAY overflow. so...
+#
+fin_sd_may_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # perform the move
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fin_sd_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fin_sd_normal_exit
+
+##########################################################################
+
+#
+# operand is not a NORM: check its optype and branch accordingly
+#
+fin_not_norm:
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.w fin_denorm
+ cmpi.b %d1,&SNAN # weed out SNANs
+ beq.l res_snan_1op
+ cmpi.b %d1,&QNAN # weed out QNANs
+ beq.l res_qnan_1op
+
+#
+# do the fmove in; at this point, only possible ops are ZERO and INF.
+# use fmov to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+ fmov.x SRC(%a0),%fp0 # do fmove in
+ fmov.l %fpsr,%d0 # no exceptions possible
+ rol.l &0x8,%d0 # put ccodes in lo byte
+ mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fdiv(): emulates the fdiv instruction #
+# fsdiv(): emulates the fsdiv instruction #
+# fddiv(): emulates the fddiv instruction #
+# #
+# XREF **************************************************************** #
+# scale_to_zero_src() - scale src exponent to zero #
+# scale_to_zero_dst() - scale dst exponent to zero #
+# unf_res() - return default underflow result #
+# ovf_res() - return default overflow result #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a divide #
+# instruction won't cause an exception. Use the regular fdiv to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ align 0x10
+tbl_fdiv_unfl:
+ long 0x3fff - 0x0000 # ext_unfl
+ long 0x3fff - 0x3f81 # sgl_unfl
+ long 0x3fff - 0x3c01 # dbl_unfl
+
+tbl_fdiv_ovfl:
+ long 0x3fff - 0x7ffe # ext overflow exponent
+ long 0x3fff - 0x407e # sgl overflow exponent
+ long 0x3fff - 0x43fe # dbl overflow exponent
+
+ global fsdiv
+fsdiv:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ bra.b fdiv
+
+ global fddiv
+fddiv:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fdiv
+fdiv:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+
+ bne.w fdiv_not_norm # optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fdiv_norm:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_to_zero_src # scale src exponent
+ mov.l %d0,-(%sp) # save scale factor 1
+
+ bsr.l scale_to_zero_dst # scale dst exponent
+
+ neg.l (%sp) # SCALE FACTOR = scale1 - scale2
+ add.l %d0,(%sp)
+
+ mov.w 2+L_SCR3(%a6),%d1 # fetch precision
+ lsr.b &0x6,%d1 # shift to lo bits
+ mov.l (%sp)+,%d0 # load S.F.
+ cmp.l %d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
+ ble.w fdiv_may_ovfl # result will overflow
+
+ cmp.l %d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
+ beq.w fdiv_may_unfl # maybe
+ bgt.w fdiv_unfl # yes; go handle underflow
+
+fdiv_normal:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # save FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # perform divide
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fdiv_normal_exit:
+ fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
+ mov.l %d2,-(%sp) # store d2
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+tbl_fdiv_ovfl2:
+ long 0x7fff
+ long 0x407f
+ long 0x43ff
+
+fdiv_no_ovfl:
+ mov.l (%sp)+,%d0 # restore scale factor
+ bra.b fdiv_normal_exit
+
+fdiv_may_ovfl:
+ mov.l %d0,-(%sp) # save scale factor
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # set FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l %fpsr,%d0
+ fmov.l &0x0,%fpcr
+
+ or.l %d0,USER_FPSR(%a6) # save INEX,N
+
+ fmovm.x &0x01,-(%sp) # save result to stack
+ mov.w (%sp),%d0 # fetch new exponent
+ add.l &0xc,%sp # clear result from stack
+ andi.l &0x7fff,%d0 # strip sign
+ sub.l (%sp),%d0 # add scale factor
+ cmp.l %d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
+ blt.b fdiv_no_ovfl
+ mov.l (%sp)+,%d0
+
+fdiv_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fdiv_ovfl_ena # yes
+
+fdiv_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+fdiv_ovfl_ena:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fdiv_ovfl_ena_sd # no, do sgl or dbl
+
+fdiv_ovfl_ena_cont:
+ fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1 # clear sign bit
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fdiv_ovfl_dis
+
+fdiv_ovfl_ena_sd:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # keep rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ bra.b fdiv_ovfl_ena_cont
+
+fdiv_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fdiv_unfl_ena # yes
+
+fdiv_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# UNFL is enabled.
+#
+fdiv_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fdiv_unfl_ena_sd # no, sgl or dbl
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+fdiv_unfl_ena_cont:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp1 # execute divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factoer
+ addi.l &0x6000,%d1 # add bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exp
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fdiv_unfl_dis
+
+fdiv_unfl_ena_sd:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # use only rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ bra.b fdiv_unfl_ena_cont
+
+#
+# the divide operation MAY underflow:
+#
+fdiv_may_unfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x1 # is |result| > 1.b?
+ fbgt.w fdiv_normal_exit # no; no underflow occurred
+ fblt.w fdiv_unfl # yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert RZ
+
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp1 # execute divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fabs.x %fp1 # make absolute value
+ fcmp.b %fp1,&0x1 # is |result| < 1.b?
+ fbge.w fdiv_normal_exit # no; no underflow occurred
+ bra.w fdiv_unfl # yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fdiv_not_norm:
+ mov.w (tbl_fdiv_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fdiv_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fdiv_op:
+ short fdiv_norm - tbl_fdiv_op # NORM / NORM
+ short fdiv_inf_load - tbl_fdiv_op # NORM / ZERO
+ short fdiv_zero_load - tbl_fdiv_op # NORM / INF
+ short fdiv_res_qnan - tbl_fdiv_op # NORM / QNAN
+ short fdiv_norm - tbl_fdiv_op # NORM / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # NORM / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_zero_load - tbl_fdiv_op # ZERO / NORM
+ short fdiv_res_operr - tbl_fdiv_op # ZERO / ZERO
+ short fdiv_zero_load - tbl_fdiv_op # ZERO / INF
+ short fdiv_res_qnan - tbl_fdiv_op # ZERO / QNAN
+ short fdiv_zero_load - tbl_fdiv_op # ZERO / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # ZERO / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_inf_dst - tbl_fdiv_op # INF / NORM
+ short fdiv_inf_dst - tbl_fdiv_op # INF / ZERO
+ short fdiv_res_operr - tbl_fdiv_op # INF / INF
+ short fdiv_res_qnan - tbl_fdiv_op # INF / QNAN
+ short fdiv_inf_dst - tbl_fdiv_op # INF / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # INF / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / NORM
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / ZERO
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / INF
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / QNAN
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # QNAN / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_norm - tbl_fdiv_op # DENORM / NORM
+ short fdiv_inf_load - tbl_fdiv_op # DENORM / ZERO
+ short fdiv_zero_load - tbl_fdiv_op # DENORM / INF
+ short fdiv_res_qnan - tbl_fdiv_op # DENORM / QNAN
+ short fdiv_norm - tbl_fdiv_op # DENORM / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # DENORM / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / NORM
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / ZERO
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / INF
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / QNAN
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+fdiv_res_qnan:
+ bra.l res_qnan
+fdiv_res_snan:
+ bra.l res_snan
+fdiv_res_operr:
+ bra.l res_operr
+
+ global fdiv_zero_load # global for fsgldiv
+fdiv_zero_load:
+ mov.b SRC_EX(%a0),%d0 # result sign is exclusive
+ mov.b DST_EX(%a1),%d1 # or of input signs.
+ eor.b %d0,%d1
+ bpl.b fdiv_zero_load_p # result is positive
+ fmov.s &0x80000000,%fp0 # load a -ZERO
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
+ rts
+fdiv_zero_load_p:
+ fmov.s &0x00000000,%fp0 # load a +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+#
+# The destination was In Range and the source was a ZERO. The result,
+# therefore, is an INF w/ the proper sign.
+# So, determine the sign and return a new INF (w/ the j-bit cleared).
+#
+ global fdiv_inf_load # global for fsgldiv
+fdiv_inf_load:
+ ori.w &dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
+ mov.b SRC_EX(%a0),%d0 # load both signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bpl.b fdiv_inf_load_p # result is positive
+ fmov.s &0xff800000,%fp0 # make result -INF
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+ rts
+fdiv_inf_load_p:
+ fmov.s &0x7f800000,%fp0 # make result +INF
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+#
+# The destination was an INF w/ an In Range or ZERO source, the result is
+# an INF w/ the proper sign.
+# The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
+# dst INF is set, then then j-bit of the result INF is also set).
+#
+ global fdiv_inf_dst # global for fsgldiv
+fdiv_inf_dst:
+ mov.b DST_EX(%a1),%d0 # load both signs
+ mov.b SRC_EX(%a0),%d1
+ eor.b %d0,%d1
+ bpl.b fdiv_inf_dst_p # result is positive
+
+ fmovm.x DST(%a1),&0x80 # return result in fp0
+ fabs.x %fp0 # clear sign bit
+ fneg.x %fp0 # set sign bit
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+fdiv_inf_dst_p:
+ fmovm.x DST(%a1),&0x80 # return result in fp0
+ fabs.x %fp0 # return positive INF
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fneg(): emulates the fneg instruction #
+# fsneg(): emulates the fsneg instruction #
+# fdneg(): emulates the fdneg instruction #
+# #
+# XREF **************************************************************** #
+# norm() - normalize a denorm to provide EXOP #
+# scale_to_zero_src() - scale sgl/dbl source exponent #
+# ovf_res() - return default overflow result #
+# unf_res() - return default underflow result #
+# res_qnan_1op() - return QNAN result #
+# res_snan_1op() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, zeroes, and infinities as special cases. Separate #
+# norms/denorms into ext/sgl/dbl precisions. Extended precision can be #
+# emulated by simply setting sign bit. Sgl/dbl operands must be scaled #
+# and an actual fneg performed to see if overflow/underflow would have #
+# occurred. If so, return default underflow/overflow result. Else, #
+# scale the result exponent and return result. FPSR gets set based on #
+# the result value. #
+# #
+#########################################################################
+
+ global fsneg
+fsneg:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl precision
+ bra.b fneg
+
+ global fdneg
+fdneg:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fneg
+fneg:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+ mov.b STAG(%a6),%d1
+ bne.w fneg_not_norm # optimize on non-norm input
+
+#
+# NEGATE SIGN : norms and denorms ONLY!
+#
+fneg_norm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.w fneg_not_ext # no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.w SRC_EX(%a0),%d0
+ eori.w &0x8000,%d0 # negate sign
+ bpl.b fneg_norm_load # sign is positive
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+fneg_norm_load:
+ mov.w %d0,FP_SCR0_EX(%a6)
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fneg_denorm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fneg_not_ext # no; go handle sgl or dbl
+
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.w SRC_EX(%a0),%d0
+ eori.w &0x8000,%d0 # negate sign
+ bpl.b fneg_denorm_done # no
+ mov.b &neg_bmask,FPSR_CC(%a6) # yes, set 'N' ccode bit
+fneg_denorm_done:
+ mov.w %d0,FP_SCR0_EX(%a6)
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+
+ btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+ bne.b fneg_ext_unfl_ena # yes
+ rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fneg_ext_unfl_ena:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ bsr.l norm # normalize result
+ neg.w %d0 # new exponent = -(shft val)
+ addi.w &0x6000,%d0 # add new bias to exponent
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
+ andi.w &0x8000,%d1 # keep old sign
+ andi.w &0x7fff,%d0 # clear sign position
+ or.w %d1,%d0 # concat old sign, new exponent
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#
+# operand is either single or double
+#
+fneg_not_ext:
+ cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
+ bne.b fneg_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fneg_sgl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
+ bge.w fneg_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
+ beq.w fneg_sd_may_ovfl # maybe; go check
+ blt.w fneg_sd_ovfl # yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fneg_sd_normal:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fneg.x FP_SCR0(%a6),%fp0 # perform negation
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fneg_sd_normal_exit:
+ mov.l %d2,-(%sp) # save d2
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d1,%d2 # concat old sign,new exp
+ mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# operand is to be rounded to double precision
+#
+fneg_dbl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
+ bge.b fneg_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
+ beq.w fneg_sd_may_ovfl # maybe; go check
+ blt.w fneg_sd_ovfl # yes; go handle overflow
+ bra.w fneg_sd_normal # no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fneg_sd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ eori.b &0x80,FP_SCR0_EX(%a6) # negate sign
+ bpl.b fneg_sd_unfl_tst
+ bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+fneg_sd_unfl_tst:
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fneg_sd_unfl_ena # yes
+
+fneg_sd_unfl_dis:
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fneg_sd_unfl_ena:
+ mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+ mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+ mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # subtract scale factor
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat new sign,new exp
+ mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
+ fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fneg_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fneg_sd_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fneg.x FP_SCR0(%a6),%fp0 # perform negation
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fneg_sd_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fneg_sd_ovfl_ena # yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fneg_sd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass: prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fneg_sd_ovfl_ena:
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat sign,exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fneg_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fneg_sd_may_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fneg.x FP_SCR0(%a6),%fp0 # perform negation
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fneg_sd_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fneg_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fneg_not_norm:
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.w fneg_denorm
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ cmpi.b %d1,&QNAN # weed out QNAN
+ beq.l res_qnan_1op
+
+#
+# do the fneg; at this point, only possible ops are ZERO and INF.
+# use fneg to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+ fneg.x SRC_EX(%a0),%fp0 # do fneg
+ fmov.l %fpsr,%d0
+ rol.l &0x8,%d0 # put ccodes in lo byte
+ mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# ftst(): emulates the ftest instruction #
+# #
+# XREF **************************************************************** #
+# res{s,q}nan_1op() - set NAN result for monadic instruction #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# #
+# OUTPUT ************************************************************** #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# Check the source operand tag (STAG) and set the FPCR according #
+# to the operand type and sign. #
+# #
+#########################################################################
+
+ global ftst
+ftst:
+ mov.b STAG(%a6),%d1
+ bne.b ftst_not_norm # optimize on non-norm input
+
+#
+# Norm:
+#
+ftst_norm:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.b ftst_norm_m # yes
+ rts
+ftst_norm_m:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+#
+# input is not normalized; what is it?
+#
+ftst_not_norm:
+ cmpi.b %d1,&ZERO # weed out ZERO
+ beq.b ftst_zero
+ cmpi.b %d1,&INF # weed out INF
+ beq.b ftst_inf
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ cmpi.b %d1,&QNAN # weed out QNAN
+ beq.l res_qnan_1op
+
+#
+# Denorm:
+#
+ftst_denorm:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.b ftst_denorm_m # yes
+ rts
+ftst_denorm_m:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+#
+# Infinity:
+#
+ftst_inf:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.b ftst_inf_m # yes
+ftst_inf_p:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+ftst_inf_m:
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
+ rts
+
+#
+# Zero:
+#
+ftst_zero:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.b ftst_zero_m # yes
+ftst_zero_p:
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+ftst_zero_m:
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fint(): emulates the fint instruction #
+# #
+# XREF **************************************************************** #
+# res_{s,q}nan_1op() - set NAN result for monadic operation #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = round precision/mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# #
+# ALGORITHM *********************************************************** #
+# Separate according to operand type. Unnorms don't pass through #
+# here. For norms, load the rounding mode/prec, execute a "fint", then #
+# store the resulting FPSR bits. #
+# For denorms, force the j-bit to a one and do the same as for #
+# norms. Denorms are so low that the answer will either be a zero or a #
+# one. #
+# For zeroes/infs/NANs, return the same while setting the FPSR #
+# as appropriate. #
+# #
+#########################################################################
+
+ global fint
+fint:
+ mov.b STAG(%a6),%d1
+ bne.b fint_not_norm # optimize on non-norm input
+
+#
+# Norm:
+#
+fint_norm:
+ andi.b &0x30,%d0 # set prec = ext
+
+ fmov.l %d0,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fint.x SRC(%a0),%fp0 # execute fint
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d0 # save FPSR
+ or.l %d0,USER_FPSR(%a6) # set exception bits
+
+ rts
+
+#
+# input is not normalized; what is it?
+#
+fint_not_norm:
+ cmpi.b %d1,&ZERO # weed out ZERO
+ beq.b fint_zero
+ cmpi.b %d1,&INF # weed out INF
+ beq.b fint_inf
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.b fint_denorm
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ bra.l res_qnan_1op # weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fint_denorm:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+ mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
+ lea FP_SCR0(%a6),%a0
+ bra.b fint_norm
+
+#
+# Zero:
+#
+fint_zero:
+ tst.b SRC_EX(%a0) # is ZERO negative?
+ bmi.b fint_zero_m # yes
+fint_zero_p:
+ fmov.s &0x00000000,%fp0 # return +ZERO in fp0
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+fint_zero_m:
+ fmov.s &0x80000000,%fp0 # return -ZERO in fp0
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+ rts
+
+#
+# Infinity:
+#
+fint_inf:
+ fmovm.x SRC(%a0),&0x80 # return result in fp0
+ tst.b SRC_EX(%a0) # is INF negative?
+ bmi.b fint_inf_m # yes
+fint_inf_p:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+fint_inf_m:
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fintrz(): emulates the fintrz instruction #
+# #
+# XREF **************************************************************** #
+# res_{s,q}nan_1op() - set NAN result for monadic operation #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = round precision/mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# #
+# ALGORITHM *********************************************************** #
+# Separate according to operand type. Unnorms don't pass through #
+# here. For norms, load the rounding mode/prec, execute a "fintrz", #
+# then store the resulting FPSR bits. #
+# For denorms, force the j-bit to a one and do the same as for #
+# norms. Denorms are so low that the answer will either be a zero or a #
+# one. #
+# For zeroes/infs/NANs, return the same while setting the FPSR #
+# as appropriate. #
+# #
+#########################################################################
+
+ global fintrz
+fintrz:
+ mov.b STAG(%a6),%d1
+ bne.b fintrz_not_norm # optimize on non-norm input
+
+#
+# Norm:
+#
+fintrz_norm:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fintrz.x SRC(%a0),%fp0 # execute fintrz
+
+ fmov.l %fpsr,%d0 # save FPSR
+ or.l %d0,USER_FPSR(%a6) # set exception bits
+
+ rts
+
+#
+# input is not normalized; what is it?
+#
+fintrz_not_norm:
+ cmpi.b %d1,&ZERO # weed out ZERO
+ beq.b fintrz_zero
+ cmpi.b %d1,&INF # weed out INF
+ beq.b fintrz_inf
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.b fintrz_denorm
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ bra.l res_qnan_1op # weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be (+/-)ZERO.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fintrz_denorm:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+ mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
+ lea FP_SCR0(%a6),%a0
+ bra.b fintrz_norm
+
+#
+# Zero:
+#
+fintrz_zero:
+ tst.b SRC_EX(%a0) # is ZERO negative?
+ bmi.b fintrz_zero_m # yes
+fintrz_zero_p:
+ fmov.s &0x00000000,%fp0 # return +ZERO in fp0
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+fintrz_zero_m:
+ fmov.s &0x80000000,%fp0 # return -ZERO in fp0
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+ rts
+
+#
+# Infinity:
+#
+fintrz_inf:
+ fmovm.x SRC(%a0),&0x80 # return result in fp0
+ tst.b SRC_EX(%a0) # is INF negative?
+ bmi.b fintrz_inf_m # yes
+fintrz_inf_p:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+fintrz_inf_m:
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fabs(): emulates the fabs instruction #
+# fsabs(): emulates the fsabs instruction #
+# fdabs(): emulates the fdabs instruction #
+# #
+# XREF **************************************************************** #
+# norm() - normalize denorm mantissa to provide EXOP #
+# scale_to_zero_src() - make exponent. = 0; get scale factor #
+# unf_res() - calculate underflow result #
+# ovf_res() - calculate overflow result #
+# res_{s,q}nan_1op() - set NAN result for monadic operation #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = rnd precision/mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms into extended, single, and double precision. #
+# Simply clear sign for extended precision norm. Ext prec denorm #
+# gets an EXOP created for it since it's an underflow. #
+# Double and single precision can overflow and underflow. First, #
+# scale the operand such that the exponent is zero. Perform an "fabs" #
+# using the correct rnd mode/prec. Check to see if the original #
+# exponent would take an exception. If so, use unf_res() or ovf_res() #
+# to calculate the default result. Also, create the EXOP for the #
+# exceptional case. If no exception should occur, insert the correct #
+# result exponent and return. #
+# Unnorms don't pass through here. #
+# #
+#########################################################################
+
+ global fsabs
+fsabs:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl precision
+ bra.b fabs
+
+ global fdabs
+fdabs:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl precision
+
+ global fabs
+fabs:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+ mov.b STAG(%a6),%d1
+ bne.w fabs_not_norm # optimize on non-norm input
+
+#
+# ABSOLUTE VALUE: norms and denorms ONLY!
+#
+fabs_norm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fabs_not_ext # no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.w SRC_EX(%a0),%d1
+ bclr &15,%d1 # force absolute value
+ mov.w %d1,FP_SCR0_EX(%a6) # insert exponent
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fabs_denorm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fabs_not_ext # no
+
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.w SRC_EX(%a0),%d0
+ bclr &15,%d0 # clear sign
+ mov.w %d0,FP_SCR0_EX(%a6) # insert exponent
+
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+
+ btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+ bne.b fabs_ext_unfl_ena
+ rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fabs_ext_unfl_ena:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ bsr.l norm # normalize result
+ neg.w %d0 # new exponent = -(shft val)
+ addi.w &0x6000,%d0 # add new bias to exponent
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
+ andi.w &0x8000,%d1 # keep old sign
+ andi.w &0x7fff,%d0 # clear sign position
+ or.w %d1,%d0 # concat old sign, new exponent
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#
+# operand is either single or double
+#
+fabs_not_ext:
+ cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
+ bne.b fabs_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fabs_sgl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
+ bge.w fabs_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
+ beq.w fabs_sd_may_ovfl # maybe; go check
+ blt.w fabs_sd_ovfl # yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fabs_sd_normal:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fabs.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fabs_sd_normal_exit:
+ mov.l %d2,-(%sp) # save d2
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d1,%d2 # concat old sign,new exp
+ mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# operand is to be rounded to double precision
+#
+fabs_dbl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
+ bge.b fabs_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
+ beq.w fabs_sd_may_ovfl # maybe; go check
+ blt.w fabs_sd_ovfl # yes; go handle overflow
+ bra.w fabs_sd_normal # no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fabs_sd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ bclr &0x7,FP_SCR0_EX(%a6) # force absolute value
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fabs_sd_unfl_ena # yes
+
+fabs_sd_unfl_dis:
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fabs_sd_unfl_ena:
+ mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+ mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+ mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # subtract scale factor
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat new sign,new exp
+ mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
+ fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fabs_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fabs_sd_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fabs.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fabs_sd_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fabs_sd_ovfl_ena # yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fabs_sd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass: prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fabs_sd_ovfl_ena:
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat sign,exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fabs_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fabs_sd_may_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fabs.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fabs_sd_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fabs_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fabs_not_norm:
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.w fabs_denorm
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ cmpi.b %d1,&QNAN # weed out QNAN
+ beq.l res_qnan_1op
+
+ fabs.x SRC(%a0),%fp0 # force absolute value
+
+ cmpi.b %d1,&INF # weed out INF
+ beq.b fabs_inf
+fabs_zero:
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+fabs_inf:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fcmp(): fp compare op routine #
+# #
+# XREF **************************************************************** #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 = round prec/mode #
+# #
+# OUTPUT ************************************************************** #
+# None #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs and denorms as special cases. For everything else, #
+# just use the actual fcmp instruction to produce the correct condition #
+# codes. #
+# #
+#########################################################################
+
+ global fcmp
+fcmp:
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1
+ bne.b fcmp_not_norm # optimize on non-norm input
+
+#
+# COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
+#
+fcmp_norm:
+ fmovm.x DST(%a1),&0x80 # load dst op
+
+ fcmp.x %fp0,SRC(%a0) # do compare
+
+ fmov.l %fpsr,%d0 # save FPSR
+ rol.l &0x8,%d0 # extract ccode bits
+ mov.b %d0,FPSR_CC(%a6) # set ccode bits(no exc bits are set)
+
+ rts
+
+#
+# fcmp: inputs are not both normalized; what are they?
+#
+fcmp_not_norm:
+ mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fcmp_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fcmp_op:
+ short fcmp_norm - tbl_fcmp_op # NORM - NORM
+ short fcmp_norm - tbl_fcmp_op # NORM - ZERO
+ short fcmp_norm - tbl_fcmp_op # NORM - INF
+ short fcmp_res_qnan - tbl_fcmp_op # NORM - QNAN
+ short fcmp_nrm_dnrm - tbl_fcmp_op # NORM - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # NORM - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_norm - tbl_fcmp_op # ZERO - NORM
+ short fcmp_norm - tbl_fcmp_op # ZERO - ZERO
+ short fcmp_norm - tbl_fcmp_op # ZERO - INF
+ short fcmp_res_qnan - tbl_fcmp_op # ZERO - QNAN
+ short fcmp_dnrm_s - tbl_fcmp_op # ZERO - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # ZERO - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_norm - tbl_fcmp_op # INF - NORM
+ short fcmp_norm - tbl_fcmp_op # INF - ZERO
+ short fcmp_norm - tbl_fcmp_op # INF - INF
+ short fcmp_res_qnan - tbl_fcmp_op # INF - QNAN
+ short fcmp_dnrm_s - tbl_fcmp_op # INF - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # INF - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - NORM
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - ZERO
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - INF
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - QNAN
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # QNAN - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_dnrm_nrm - tbl_fcmp_op # DENORM - NORM
+ short fcmp_dnrm_d - tbl_fcmp_op # DENORM - ZERO
+ short fcmp_dnrm_d - tbl_fcmp_op # DENORM - INF
+ short fcmp_res_qnan - tbl_fcmp_op # DENORM - QNAN
+ short fcmp_dnrm_sd - tbl_fcmp_op # DENORM - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # DENORM - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - NORM
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - ZERO
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - INF
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - QNAN
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+# unlike all other functions for QNAN and SNAN, fcmp does NOT set the
+# 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
+fcmp_res_qnan:
+ bsr.l res_qnan
+ andi.b &0xf7,FPSR_CC(%a6)
+ rts
+fcmp_res_snan:
+ bsr.l res_snan
+ andi.b &0xf7,FPSR_CC(%a6)
+ rts
+
+#
+# DENORMs are a little more difficult.
+# If you have a 2 DENORMs, then you can just force the j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
+# But with a DENORM and a NORM of the same sign, the neg bit is set if the
+# (1) signs are (+) and the DENORM is the dst or
+# (2) signs are (-) and the DENORM is the src
+#
+
+fcmp_dnrm_s:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),%d0
+ bset &31,%d0 # DENORM src; make into small norm
+ mov.l %d0,FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ lea FP_SCR0(%a6),%a0
+ bra.w fcmp_norm
+
+fcmp_dnrm_d:
+ mov.l DST_EX(%a1),FP_SCR0_EX(%a6)
+ mov.l DST_HI(%a1),%d0
+ bset &31,%d0 # DENORM src; make into small norm
+ mov.l %d0,FP_SCR0_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
+ lea FP_SCR0(%a6),%a1
+ bra.w fcmp_norm
+
+fcmp_dnrm_sd:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l DST_HI(%a1),%d0
+ bset &31,%d0 # DENORM dst; make into small norm
+ mov.l %d0,FP_SCR1_HI(%a6)
+ mov.l SRC_HI(%a0),%d0
+ bset &31,%d0 # DENORM dst; make into small norm
+ mov.l %d0,FP_SCR0_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ lea FP_SCR1(%a6),%a1
+ lea FP_SCR0(%a6),%a0
+ bra.w fcmp_norm
+
+fcmp_nrm_dnrm:
+ mov.b SRC_EX(%a0),%d0 # determine if like signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bmi.w fcmp_dnrm_s
+
+# signs are the same, so must determine the answer ourselves.
+ tst.b %d0 # is src op negative?
+ bmi.b fcmp_nrm_dnrm_m # yes
+ rts
+fcmp_nrm_dnrm_m:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+
+fcmp_dnrm_nrm:
+ mov.b SRC_EX(%a0),%d0 # determine if like signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bmi.w fcmp_dnrm_d
+
+# signs are the same, so must determine the answer ourselves.
+ tst.b %d0 # is src op negative?
+ bpl.b fcmp_dnrm_nrm_m # no
+ rts
+fcmp_dnrm_nrm_m:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fsglmul(): emulates the fsglmul instruction #
+# #
+# XREF **************************************************************** #
+# scale_to_zero_src() - scale src exponent to zero #
+# scale_to_zero_dst() - scale dst exponent to zero #
+# unf_res4() - return default underflow result for sglop #
+# ovf_res() - return default overflow result #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a multiply #
+# instruction won't cause an exception. Use the regular fsglmul to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ global fsglmul
+fsglmul:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1
+
+ bne.w fsglmul_not_norm # optimize on non-norm input
+
+fsglmul_norm:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_to_zero_src # scale exponent
+ mov.l %d0,-(%sp) # save scale factor 1
+
+ bsr.l scale_to_zero_dst # scale dst exponent
+
+ add.l (%sp)+,%d0 # SCALE_FACTOR = scale1 + scale2
+
+ cmpi.l %d0,&0x3fff-0x7ffe # would result ovfl?
+ beq.w fsglmul_may_ovfl # result may rnd to overflow
+ blt.w fsglmul_ovfl # result will overflow
+
+ cmpi.l %d0,&0x3fff+0x0001 # would result unfl?
+ beq.w fsglmul_may_unfl # result may rnd to no unfl
+ bgt.w fsglmul_unfl # result will underflow
+
+fsglmul_normal:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsglmul_normal_exit:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+fsglmul_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsglmul_ovfl_tst:
+
+# save setting this until now because this is where fsglmul_may_ovfl may jump in
+ or.l &ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fsglmul_ovfl_ena # yes
+
+fsglmul_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ andi.b &0x30,%d0 # force prec = ext
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+fsglmul_ovfl_ena:
+ fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fsglmul_ovfl_dis
+
+fsglmul_may_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fsglmul_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fsglmul_normal_exit
+
+fsglmul_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fsglmul_unfl_ena # yes
+
+fsglmul_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res4 # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# UNFL is enabled.
+#
+fsglmul_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # add bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fsglmul_unfl_dis
+
+fsglmul_may_unfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| > 2.b?
+ fbgt.w fsglmul_normal_exit # no; no underflow occurred
+ fblt.w fsglmul_unfl # yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert RZ
+
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fabs.x %fp1 # make absolute value
+ fcmp.b %fp1,&0x2 # is |result| < 2.b?
+ fbge.w fsglmul_normal_exit # no; no underflow occurred
+ bra.w fsglmul_unfl # yes, underflow occurred
+
+##############################################################################
+
+#
+# Single Precision Multiply: inputs are not both normalized; what are they?
+#
+fsglmul_not_norm:
+ mov.w (tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fsglmul_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fsglmul_op:
+ short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
+ short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
+ short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
+ short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_zero - tbl_fsglmul_op # ZERO x NORM
+ short fsglmul_zero - tbl_fsglmul_op # ZERO x ZERO
+ short fsglmul_res_operr - tbl_fsglmul_op # ZERO x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # ZERO x QNAN
+ short fsglmul_zero - tbl_fsglmul_op # ZERO x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # ZERO x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_inf_dst - tbl_fsglmul_op # INF x NORM
+ short fsglmul_res_operr - tbl_fsglmul_op # INF x ZERO
+ short fsglmul_inf_dst - tbl_fsglmul_op # INF x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # INF x QNAN
+ short fsglmul_inf_dst - tbl_fsglmul_op # INF x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # INF x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x NORM
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x ZERO
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x QNAN
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # QNAN x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
+ short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
+ short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
+ short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x NORM
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x ZERO
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x INF
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x QNAN
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+fsglmul_res_operr:
+ bra.l res_operr
+fsglmul_res_snan:
+ bra.l res_snan
+fsglmul_res_qnan:
+ bra.l res_qnan
+fsglmul_zero:
+ bra.l fmul_zero
+fsglmul_inf_src:
+ bra.l fmul_inf_src
+fsglmul_inf_dst:
+ bra.l fmul_inf_dst
+
+#########################################################################
+# XDEF **************************************************************** #
+# fsgldiv(): emulates the fsgldiv instruction #
+# #
+# XREF **************************************************************** #
+# scale_to_zero_src() - scale src exponent to zero #
+# scale_to_zero_dst() - scale dst exponent to zero #
+# unf_res4() - return default underflow result for sglop #
+# ovf_res() - return default overflow result #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a divide #
+# instruction won't cause an exception. Use the regular fsgldiv to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ global fsgldiv
+fsgldiv:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+
+ bne.w fsgldiv_not_norm # optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fsgldiv_norm:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_to_zero_src # calculate scale factor 1
+ mov.l %d0,-(%sp) # save scale factor 1
+
+ bsr.l scale_to_zero_dst # calculate scale factor 2
+
+ neg.l (%sp) # S.F. = scale1 - scale2
+ add.l %d0,(%sp)
+
+ mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode
+ lsr.b &0x6,%d1
+ mov.l (%sp)+,%d0
+ cmpi.l %d0,&0x3fff-0x7ffe
+ ble.w fsgldiv_may_ovfl
+
+ cmpi.l %d0,&0x3fff-0x0000 # will result underflow?
+ beq.w fsgldiv_may_unfl # maybe
+ bgt.w fsgldiv_unfl # yes; go handle underflow
+
+fsgldiv_normal:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # save FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp0 # perform sgl divide
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsgldiv_normal_exit:
+ fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+fsgldiv_may_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # set FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l %fpsr,%d1
+ fmov.l &0x0,%fpcr
+
+ or.l %d1,USER_FPSR(%a6) # save INEX,N
+
+ fmovm.x &0x01,-(%sp) # save result to stack
+ mov.w (%sp),%d1 # fetch new exponent
+ add.l &0xc,%sp # clear result
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ cmp.l %d1,&0x7fff # did divide overflow?
+ blt.b fsgldiv_normal_exit
+
+fsgldiv_ovfl_tst:
+ or.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fsgldiv_ovfl_ena # yes
+
+fsgldiv_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ andi.b &0x30,%d0 # kill precision
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+fsgldiv_ovfl_ena:
+ fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract new bias
+ andi.w &0x7fff,%d1 # clear ms bit
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fsgldiv_ovfl_dis
+
+fsgldiv_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fsgldiv_unfl_ena # yes
+
+fsgldiv_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res4 # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# UNFL is enabled.
+#
+fsgldiv_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # add bias
+ andi.w &0x7fff,%d1 # clear top bit
+ or.w %d2,%d1 # concat old sign, new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fsgldiv_unfl_dis
+
+#
+# the divide operation MAY underflow:
+#
+fsgldiv_may_unfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x1 # is |result| > 1.b?
+ fbgt.w fsgldiv_normal_exit # no; no underflow occurred
+ fblt.w fsgldiv_unfl # yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into %fp1
+
+ clr.l %d1 # clear scratch register
+ ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
+
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fabs.x %fp1 # make absolute value
+ fcmp.b %fp1,&0x1 # is |result| < 1.b?
+ fbge.w fsgldiv_normal_exit # no; no underflow occurred
+ bra.w fsgldiv_unfl # yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fsgldiv_not_norm:
+ mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fsgldiv_op:
+ short fsgldiv_norm - tbl_fsgldiv_op # NORM / NORM
+ short fsgldiv_inf_load - tbl_fsgldiv_op # NORM / ZERO
+ short fsgldiv_zero_load - tbl_fsgldiv_op # NORM / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # NORM / QNAN
+ short fsgldiv_norm - tbl_fsgldiv_op # NORM / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # NORM / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / NORM
+ short fsgldiv_res_operr - tbl_fsgldiv_op # ZERO / ZERO
+ short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # ZERO / QNAN
+ short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # ZERO / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / NORM
+ short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / ZERO
+ short fsgldiv_res_operr - tbl_fsgldiv_op # INF / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # INF / QNAN
+ short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # INF / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / NORM
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / ZERO
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / QNAN
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # QNAN / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_norm - tbl_fsgldiv_op # DENORM / NORM
+ short fsgldiv_inf_load - tbl_fsgldiv_op # DENORM / ZERO
+ short fsgldiv_zero_load - tbl_fsgldiv_op # DENORM / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # DENORM / QNAN
+ short fsgldiv_norm - tbl_fsgldiv_op # DENORM / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # DENORM / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / NORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / ZERO
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / INF
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / QNAN
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+fsgldiv_res_qnan:
+ bra.l res_qnan
+fsgldiv_res_snan:
+ bra.l res_snan
+fsgldiv_res_operr:
+ bra.l res_operr
+fsgldiv_inf_load:
+ bra.l fdiv_inf_load
+fsgldiv_zero_load:
+ bra.l fdiv_zero_load
+fsgldiv_inf_dst:
+ bra.l fdiv_inf_dst
+
+#########################################################################
+# XDEF **************************************************************** #
+# fadd(): emulates the fadd instruction #
+# fsadd(): emulates the fadd instruction #
+# fdadd(): emulates the fdadd instruction #
+# #
+# XREF **************************************************************** #
+# addsub_scaler2() - scale the operands so they won't take exc #
+# ovf_res() - return default overflow result #
+# unf_res() - return default underflow result #
+# res_qnan() - set QNAN result #
+# res_snan() - set SNAN result #
+# res_operr() - set OPERR result #
+# scale_to_zero_src() - set src operand exponent equal to zero #
+# scale_to_zero_dst() - set dst operand exponent equal to zero #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms into extended, single, and double precision. #
+# Do addition after scaling exponents such that exception won't #
+# occur. Then, check result exponent to see if exception would have #
+# occurred. If so, return default result and maybe EXOP. Else, insert #
+# the correct result exponent and return. Set FPSR bits as appropriate. #
+# #
+#########################################################################
+
+ global fsadd
+fsadd:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ bra.b fadd
+
+ global fdadd
+fdadd:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fadd
+fadd:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+
+ bne.w fadd_not_norm # optimize on non-norm input
+
+#
+# ADD: norms and denorms
+#
+fadd_norm:
+ bsr.l addsub_scaler2 # scale exponents
+
+fadd_zero_entry:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fadd.x FP_SCR0(%a6),%fp0 # execute add
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch INEX2,N,Z
+
+ or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
+
+ fbeq.w fadd_zero_exit # if result is zero, end now
+
+ mov.l %d2,-(%sp) # save d2
+
+ fmovm.x &0x01,-(%sp) # save result to stack
+
+ mov.w 2+L_SCR3(%a6),%d1
+ lsr.b &0x6,%d1
+
+ mov.w (%sp),%d2 # fetch new sign, exp
+ andi.l &0x7fff,%d2 # strip sign
+ sub.l %d0,%d2 # add scale factor
+
+ cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+ bge.b fadd_ovfl # yes
+
+ cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
+ blt.w fadd_unfl # yes
+ beq.w fadd_may_unfl # maybe; go find out
+
+fadd_normal:
+ mov.w (%sp),%d1
+ andi.w &0x8000,%d1 # keep sign
+ or.w %d2,%d1 # concat sign,new exp
+ mov.w %d1,(%sp) # insert new exponent
+
+ fmovm.x (%sp)+,&0x80 # return result in fp0
+
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fadd_zero_exit:
+# fmov.s &0x00000000,%fp0 # return zero in fp0
+ rts
+
+tbl_fadd_ovfl:
+ long 0x7fff # ext ovfl
+ long 0x407f # sgl ovfl
+ long 0x43ff # dbl ovfl
+
+tbl_fadd_unfl:
+ long 0x0000 # ext unfl
+ long 0x3f81 # sgl unfl
+ long 0x3c01 # dbl unfl
+
+fadd_ovfl:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fadd_ovfl_ena # yes
+
+ add.l &0xc,%sp
+fadd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fadd_ovfl_ena:
+ mov.b L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fadd_ovfl_ena_sd # no; prec = sgl or dbl
+
+fadd_ovfl_ena_cont:
+ mov.w (%sp),%d1
+ andi.w &0x8000,%d1 # keep sign
+ subi.l &0x6000,%d2 # add extra bias
+ andi.w &0x7fff,%d2
+ or.w %d2,%d1 # concat sign,new exp
+ mov.w %d1,(%sp) # insert new exponent
+
+ fmovm.x (%sp)+,&0x40 # return EXOP in fp1
+ bra.b fadd_ovfl_dis
+
+fadd_ovfl_ena_sd:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # keep rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ fadd.x FP_SCR0(%a6),%fp0 # execute add
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ add.l &0xc,%sp
+ fmovm.x &0x01,-(%sp)
+ bra.b fadd_ovfl_ena_cont
+
+fadd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ add.l &0xc,%sp
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fadd.x FP_SCR0(%a6),%fp0 # execute add
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save status
+
+ or.l %d1,USER_FPSR(%a6) # save INEX,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fadd_unfl_ena # yes
+
+fadd_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fadd_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fadd_unfl_ena_sd # no; sgl or dbl
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+fadd_unfl_ena_cont:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fadd.x FP_SCR0(%a6),%fp1 # execute multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1 # clear top bit
+ or.w %d2,%d1 # concat sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fadd_unfl_dis
+
+fadd_unfl_ena_sd:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # use only rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ bra.b fadd_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fadd_may_unfl:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1
+ beq.w fadd_normal # yes; no underflow occurred
+
+ mov.l 0x4(%sp),%d1 # extract hi(man)
+ cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
+ bne.w fadd_normal # no; no underflow occurred
+
+ tst.l 0x8(%sp) # is lo(man) = 0x0?
+ bne.w fadd_normal # no; no underflow occurred
+
+ btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+ beq.w fadd_normal # no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fadd.x FP_SCR0(%a6),%fp1 # execute add
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fabs.x %fp0 # compare absolute values
+ fabs.x %fp1
+ fcmp.x %fp0,%fp1 # is first result > second?
+
+ fbgt.w fadd_unfl # yes; it's an underflow
+ bra.w fadd_normal # no; it's not an underflow
+
+##########################################################################
+
+#
+# Add: inputs are not both normalized; what are they?
+#
+fadd_not_norm:
+ mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fadd_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fadd_op:
+ short fadd_norm - tbl_fadd_op # NORM + NORM
+ short fadd_zero_src - tbl_fadd_op # NORM + ZERO
+ short fadd_inf_src - tbl_fadd_op # NORM + INF
+ short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
+ short fadd_norm - tbl_fadd_op # NORM + DENORM
+ short fadd_res_snan - tbl_fadd_op # NORM + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_zero_dst - tbl_fadd_op # ZERO + NORM
+ short fadd_zero_2 - tbl_fadd_op # ZERO + ZERO
+ short fadd_inf_src - tbl_fadd_op # ZERO + INF
+ short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
+ short fadd_zero_dst - tbl_fadd_op # ZERO + DENORM
+ short fadd_res_snan - tbl_fadd_op # NORM + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_inf_dst - tbl_fadd_op # INF + NORM
+ short fadd_inf_dst - tbl_fadd_op # INF + ZERO
+ short fadd_inf_2 - tbl_fadd_op # INF + INF
+ short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
+ short fadd_inf_dst - tbl_fadd_op # INF + DENORM
+ short fadd_res_snan - tbl_fadd_op # NORM + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_res_qnan - tbl_fadd_op # QNAN + NORM
+ short fadd_res_qnan - tbl_fadd_op # QNAN + ZERO
+ short fadd_res_qnan - tbl_fadd_op # QNAN + INF
+ short fadd_res_qnan - tbl_fadd_op # QNAN + QNAN
+ short fadd_res_qnan - tbl_fadd_op # QNAN + DENORM
+ short fadd_res_snan - tbl_fadd_op # QNAN + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_norm - tbl_fadd_op # DENORM + NORM
+ short fadd_zero_src - tbl_fadd_op # DENORM + ZERO
+ short fadd_inf_src - tbl_fadd_op # DENORM + INF
+ short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
+ short fadd_norm - tbl_fadd_op # DENORM + DENORM
+ short fadd_res_snan - tbl_fadd_op # NORM + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_res_snan - tbl_fadd_op # SNAN + NORM
+ short fadd_res_snan - tbl_fadd_op # SNAN + ZERO
+ short fadd_res_snan - tbl_fadd_op # SNAN + INF
+ short fadd_res_snan - tbl_fadd_op # SNAN + QNAN
+ short fadd_res_snan - tbl_fadd_op # SNAN + DENORM
+ short fadd_res_snan - tbl_fadd_op # SNAN + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+fadd_res_qnan:
+ bra.l res_qnan
+fadd_res_snan:
+ bra.l res_snan
+
+#
+# both operands are ZEROes
+#
+fadd_zero_2:
+ mov.b SRC_EX(%a0),%d0 # are the signs opposite
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bmi.w fadd_zero_2_chk_rm # weed out (-ZERO)+(+ZERO)
+
+# the signs are the same. so determine whether they are positive or negative
+# and return the appropriately signed zero.
+ tst.b %d0 # are ZEROes positive or negative?
+ bmi.b fadd_zero_rm # negative
+ fmov.s &0x00000000,%fp0 # return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+#
+# the ZEROes have opposite signs:
+# - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
+# - -ZERO is returned in the case of RM.
+#
+fadd_zero_2_chk_rm:
+ mov.b 3+L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # extract rnd mode
+ cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
+ beq.b fadd_zero_rm # yes
+ fmov.s &0x00000000,%fp0 # return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+fadd_zero_rm:
+ fmov.s &0x80000000,%fp0 # return -ZERO
+ mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
+ rts
+
+#
+# one operand is a ZERO and the other is a DENORM or NORM. scale
+# the DENORM or NORM and jump to the regular fadd routine.
+#
+fadd_zero_dst:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # scale the operand
+ clr.w FP_SCR1_EX(%a6)
+ clr.l FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6)
+ bra.w fadd_zero_entry # go execute fadd
+
+fadd_zero_src:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+ bsr.l scale_to_zero_dst # scale the operand
+ clr.w FP_SCR0_EX(%a6)
+ clr.l FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6)
+ bra.w fadd_zero_entry # go execute fadd
+
+#
+# both operands are INFs. an OPERR will result if the INFs have
+# different signs. else, an INF of the same sign is returned
+#
+fadd_inf_2:
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d1,%d0
+ bmi.l res_operr # weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but, we do have to remember to return the
+# src INF since that's where the 881/882 gets the j-bit from...
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_src:
+ fmovm.x SRC(%a0),&0x80 # return src INF
+ tst.b SRC_EX(%a0) # is INF positive?
+ bpl.b fadd_inf_done # yes; we're done
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_dst:
+ fmovm.x DST(%a1),&0x80 # return dst INF
+ tst.b DST_EX(%a1) # is INF positive?
+ bpl.b fadd_inf_done # yes; we're done
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+fadd_inf_done:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fsub(): emulates the fsub instruction #
+# fssub(): emulates the fssub instruction #
+# fdsub(): emulates the fdsub instruction #
+# #
+# XREF **************************************************************** #
+# addsub_scaler2() - scale the operands so they won't take exc #
+# ovf_res() - return default overflow result #
+# unf_res() - return default underflow result #
+# res_qnan() - set QNAN result #
+# res_snan() - set SNAN result #
+# res_operr() - set OPERR result #
+# scale_to_zero_src() - set src operand exponent equal to zero #
+# scale_to_zero_dst() - set dst operand exponent equal to zero #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms into extended, single, and double precision. #
+# Do subtraction after scaling exponents such that exception won't#
+# occur. Then, check result exponent to see if exception would have #
+# occurred. If so, return default result and maybe EXOP. Else, insert #
+# the correct result exponent and return. Set FPSR bits as appropriate. #
+# #
+#########################################################################
+
+ global fssub
+fssub:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ bra.b fsub
+
+ global fdsub
+fdsub:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fsub
+fsub:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+
+ bne.w fsub_not_norm # optimize on non-norm input
+
+#
+# SUB: norms and denorms
+#
+fsub_norm:
+ bsr.l addsub_scaler2 # scale exponents
+
+fsub_zero_entry:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fsub.x FP_SCR0(%a6),%fp0 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch INEX2, N, Z
+
+ or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
+
+ fbeq.w fsub_zero_exit # if result zero, end now
+
+ mov.l %d2,-(%sp) # save d2
+
+ fmovm.x &0x01,-(%sp) # save result to stack
+
+ mov.w 2+L_SCR3(%a6),%d1
+ lsr.b &0x6,%d1
+
+ mov.w (%sp),%d2 # fetch new exponent
+ andi.l &0x7fff,%d2 # strip sign
+ sub.l %d0,%d2 # add scale factor
+
+ cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+ bge.b fsub_ovfl # yes
+
+ cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
+ blt.w fsub_unfl # yes
+ beq.w fsub_may_unfl # maybe; go find out
+
+fsub_normal:
+ mov.w (%sp),%d1
+ andi.w &0x8000,%d1 # keep sign
+ or.w %d2,%d1 # insert new exponent
+ mov.w %d1,(%sp) # insert new exponent
+
+ fmovm.x (%sp)+,&0x80 # return result in fp0
+
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fsub_zero_exit:
+# fmov.s &0x00000000,%fp0 # return zero in fp0
+ rts
+
+tbl_fsub_ovfl:
+ long 0x7fff # ext ovfl
+ long 0x407f # sgl ovfl
+ long 0x43ff # dbl ovfl
+
+tbl_fsub_unfl:
+ long 0x0000 # ext unfl
+ long 0x3f81 # sgl unfl
+ long 0x3c01 # dbl unfl
+
+fsub_ovfl:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fsub_ovfl_ena # yes
+
+ add.l &0xc,%sp
+fsub_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fsub_ovfl_ena:
+ mov.b L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fsub_ovfl_ena_sd # no
+
+fsub_ovfl_ena_cont:
+ mov.w (%sp),%d1 # fetch {sgn,exp}
+ andi.w &0x8000,%d1 # keep sign
+ subi.l &0x6000,%d2 # subtract new bias
+ andi.w &0x7fff,%d2 # clear top bit
+ or.w %d2,%d1 # concat sign,exp
+ mov.w %d1,(%sp) # insert new exponent
+
+ fmovm.x (%sp)+,&0x40 # return EXOP in fp1
+ bra.b fsub_ovfl_dis
+
+fsub_ovfl_ena_sd:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # clear rnd prec
+ fmov.l %d1,%fpcr # set FPCR
+
+ fsub.x FP_SCR0(%a6),%fp0 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ add.l &0xc,%sp
+ fmovm.x &0x01,-(%sp)
+ bra.b fsub_ovfl_ena_cont
+
+fsub_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ add.l &0xc,%sp
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsub.x FP_SCR0(%a6),%fp0 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save status
+
+ or.l %d1,USER_FPSR(%a6)
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fsub_unfl_ena # yes
+
+fsub_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fsub_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fsub_unfl_ena_sd # no
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+fsub_unfl_ena_cont:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsub.x FP_SCR0(%a6),%fp1 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # store result to stack
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # subtract new bias
+ andi.w &0x7fff,%d1 # clear top bit
+ or.w %d2,%d1 # concat sgn,exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fsub_unfl_dis
+
+fsub_unfl_ena_sd:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # clear rnd prec
+ fmov.l %d1,%fpcr # set FPCR
+
+ bra.b fsub_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fsub_may_unfl:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # fetch rnd prec
+ beq.w fsub_normal # yes; no underflow occurred
+
+ mov.l 0x4(%sp),%d1
+ cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
+ bne.w fsub_normal # no; no underflow occurred
+
+ tst.l 0x8(%sp) # is lo(man) = 0x0?
+ bne.w fsub_normal # no; no underflow occurred
+
+ btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+ beq.w fsub_normal # no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsub.x FP_SCR0(%a6),%fp1 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fabs.x %fp0 # compare absolute values
+ fabs.x %fp1
+ fcmp.x %fp0,%fp1 # is first result > second?
+
+ fbgt.w fsub_unfl # yes; it's an underflow
+ bra.w fsub_normal # no; it's not an underflow
+
+##########################################################################
+
+#
+# Sub: inputs are not both normalized; what are they?
+#
+fsub_not_norm:
+ mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fsub_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fsub_op:
+ short fsub_norm - tbl_fsub_op # NORM - NORM
+ short fsub_zero_src - tbl_fsub_op # NORM - ZERO
+ short fsub_inf_src - tbl_fsub_op # NORM - INF
+ short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
+ short fsub_norm - tbl_fsub_op # NORM - DENORM
+ short fsub_res_snan - tbl_fsub_op # NORM - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_zero_dst - tbl_fsub_op # ZERO - NORM
+ short fsub_zero_2 - tbl_fsub_op # ZERO - ZERO
+ short fsub_inf_src - tbl_fsub_op # ZERO - INF
+ short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
+ short fsub_zero_dst - tbl_fsub_op # ZERO - DENORM
+ short fsub_res_snan - tbl_fsub_op # NORM - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_inf_dst - tbl_fsub_op # INF - NORM
+ short fsub_inf_dst - tbl_fsub_op # INF - ZERO
+ short fsub_inf_2 - tbl_fsub_op # INF - INF
+ short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
+ short fsub_inf_dst - tbl_fsub_op # INF - DENORM
+ short fsub_res_snan - tbl_fsub_op # NORM - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_res_qnan - tbl_fsub_op # QNAN - NORM
+ short fsub_res_qnan - tbl_fsub_op # QNAN - ZERO
+ short fsub_res_qnan - tbl_fsub_op # QNAN - INF
+ short fsub_res_qnan - tbl_fsub_op # QNAN - QNAN
+ short fsub_res_qnan - tbl_fsub_op # QNAN - DENORM
+ short fsub_res_snan - tbl_fsub_op # QNAN - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_norm - tbl_fsub_op # DENORM - NORM
+ short fsub_zero_src - tbl_fsub_op # DENORM - ZERO
+ short fsub_inf_src - tbl_fsub_op # DENORM - INF
+ short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
+ short fsub_norm - tbl_fsub_op # DENORM - DENORM
+ short fsub_res_snan - tbl_fsub_op # NORM - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_res_snan - tbl_fsub_op # SNAN - NORM
+ short fsub_res_snan - tbl_fsub_op # SNAN - ZERO
+ short fsub_res_snan - tbl_fsub_op # SNAN - INF
+ short fsub_res_snan - tbl_fsub_op # SNAN - QNAN
+ short fsub_res_snan - tbl_fsub_op # SNAN - DENORM
+ short fsub_res_snan - tbl_fsub_op # SNAN - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+fsub_res_qnan:
+ bra.l res_qnan
+fsub_res_snan:
+ bra.l res_snan
+
+#
+# both operands are ZEROes
+#
+fsub_zero_2:
+ mov.b SRC_EX(%a0),%d0
+ mov.b DST_EX(%a1),%d1
+ eor.b %d1,%d0
+ bpl.b fsub_zero_2_chk_rm
+
+# the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
+ tst.b %d0 # is dst negative?
+ bmi.b fsub_zero_2_rm # yes
+ fmov.s &0x00000000,%fp0 # no; return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+#
+# the ZEROes have the same signs:
+# - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
+# - -ZERO is returned in the case of RM.
+#
+fsub_zero_2_chk_rm:
+ mov.b 3+L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # extract rnd mode
+ cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
+ beq.b fsub_zero_2_rm # yes
+ fmov.s &0x00000000,%fp0 # no; return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+fsub_zero_2_rm:
+ fmov.s &0x80000000,%fp0 # return -ZERO
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/NEG
+ rts
+
+#
+# one operand is a ZERO and the other is a DENORM or a NORM.
+# scale the DENORM or NORM and jump to the regular fsub routine.
+#
+fsub_zero_dst:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # scale the operand
+ clr.w FP_SCR1_EX(%a6)
+ clr.l FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6)
+ bra.w fsub_zero_entry # go execute fsub
+
+fsub_zero_src:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+ bsr.l scale_to_zero_dst # scale the operand
+ clr.w FP_SCR0_EX(%a6)
+ clr.l FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6)
+ bra.w fsub_zero_entry # go execute fsub
+
+#
+# both operands are INFs. an OPERR will result if the INFs have the
+# same signs. else,
+#
+fsub_inf_2:
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d1,%d0
+ bpl.l res_operr # weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but we do have to remember to return
+# the src INF since that's where the 881/882 gets the j-bit.
+
+fsub_inf_src:
+ fmovm.x SRC(%a0),&0x80 # return src INF
+ fneg.x %fp0 # invert sign
+ fbge.w fsub_inf_done # sign is now positive
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+fsub_inf_dst:
+ fmovm.x DST(%a1),&0x80 # return dst INF
+ tst.b DST_EX(%a1) # is INF negative?
+ bpl.b fsub_inf_done # no
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+fsub_inf_done:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fsqrt(): emulates the fsqrt instruction #
+# fssqrt(): emulates the fssqrt instruction #
+# fdsqrt(): emulates the fdsqrt instruction #
+# #
+# XREF **************************************************************** #
+# scale_sqrt() - scale the source operand #
+# unf_res() - return default underflow result #
+# ovf_res() - return default overflow result #
+# res_qnan_1op() - return QNAN result #
+# res_snan_1op() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a sqrt #
+# instruction won't cause an exception. Use the regular fsqrt to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ global fssqrt
+fssqrt:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl precision
+ bra.b fsqrt
+
+ global fdsqrt
+fdsqrt:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl precision
+
+ global fsqrt
+fsqrt:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+ clr.w %d1
+ mov.b STAG(%a6),%d1
+ bne.w fsqrt_not_norm # optimize on non-norm input
+
+#
+# SQUARE ROOT: norms and denorms ONLY!
+#
+fsqrt_norm:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.l res_operr # yes
+
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fsqrt_not_ext # no; go handle sgl or dbl
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsqrt.x (%a0),%fp0 # execute square root
+
+ fmov.l %fpsr,%d1
+ or.l %d1,USER_FPSR(%a6) # set N,INEX
+
+ rts
+
+fsqrt_denorm:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.l res_operr # yes
+
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fsqrt_not_ext # no; go handle sgl or dbl
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_sqrt # calculate scale factor
+
+ bra.w fsqrt_sd_normal
+
+#
+# operand is either single or double
+#
+fsqrt_not_ext:
+ cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
+ bne.w fsqrt_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fsqrt_sgl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_sqrt # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3f81 # will move in underflow?
+ beq.w fsqrt_sd_may_unfl
+ bgt.w fsqrt_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x407f # will move in overflow?
+ beq.w fsqrt_sd_may_ovfl # maybe; go check
+ blt.w fsqrt_sd_ovfl # yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fsqrt_sd_normal:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsqrt_sd_normal_exit:
+ mov.l %d2,-(%sp) # save d2
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d1,%d2 # concat old sign,new exp
+ mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# operand is to be rounded to double precision
+#
+fsqrt_dbl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_sqrt # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3c01 # will move in underflow?
+ beq.w fsqrt_sd_may_unfl
+ bgt.b fsqrt_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x43ff # will move in overflow?
+ beq.w fsqrt_sd_may_ovfl # maybe; go check
+ blt.w fsqrt_sd_ovfl # yes; go handle overflow
+ bra.w fsqrt_sd_normal # no; ho handle normalized op
+
+# we're on the line here and the distinguising characteristic is whether
+# the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
+# elsewise fall through to underflow.
+fsqrt_sd_may_unfl:
+ btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
+ bne.w fsqrt_sd_normal # yes, so no underflow
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fsqrt_sd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsqrt.x FP_SCR0(%a6),%fp0 # execute square root
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fsqrt_sd_unfl_ena # yes
+
+fsqrt_sd_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fsqrt_sd_unfl_ena:
+ mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+ mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+ mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # subtract scale factor
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat new sign,new exp
+ mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
+ fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fsqrt_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fsqrt_sd_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fsqrt.x FP_SCR0(%a6),%fp0 # perform square root
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsqrt_sd_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fsqrt_sd_ovfl_ena # yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fsqrt_sd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass: prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fsqrt_sd_ovfl_ena:
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat sign,exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fsqrt_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fsqrt_sd_may_ovfl:
+ btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
+ bne.w fsqrt_sd_ovfl # yes, so overflow
+
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fmov.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x1 # is |result| >= 1.b?
+ fbge.w fsqrt_sd_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fsqrt_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fsqrt_not_norm:
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.w fsqrt_denorm
+ cmpi.b %d1,&ZERO # weed out ZERO
+ beq.b fsqrt_zero
+ cmpi.b %d1,&INF # weed out INF
+ beq.b fsqrt_inf
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ bra.l res_qnan_1op
+
+#
+# fsqrt(+0) = +0
+# fsqrt(-0) = -0
+# fsqrt(+INF) = +INF
+# fsqrt(-INF) = OPERR
+#
+fsqrt_zero:
+ tst.b SRC_EX(%a0) # is ZERO positive or negative?
+ bmi.b fsqrt_zero_m # negative
+fsqrt_zero_p:
+ fmov.s &0x00000000,%fp0 # return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+fsqrt_zero_m:
+ fmov.s &0x80000000,%fp0 # return -ZERO
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+ rts
+
+fsqrt_inf:
+ tst.b SRC_EX(%a0) # is INF positive or negative?
+ bmi.l res_operr # negative
+fsqrt_inf_p:
+ fmovm.x SRC(%a0),&0x80 # return +INF in fp0
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+
+##########################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# addsub_scaler2(): scale inputs to fadd/fsub such that no #
+# OVFL/UNFL exceptions will result #
+# #
+# XREF **************************************************************** #
+# norm() - normalize mantissa after adjusting exponent #
+# #
+# INPUT *************************************************************** #
+# FP_SRC(a6) = fp op1(src) #
+# FP_DST(a6) = fp op2(dst) #
+# #
+# OUTPUT ************************************************************** #
+# FP_SRC(a6) = fp op1 scaled(src) #
+# FP_DST(a6) = fp op2 scaled(dst) #
+# d0 = scale amount #
+# #
+# ALGORITHM *********************************************************** #
+# If the DST exponent is > the SRC exponent, set the DST exponent #
+# equal to 0x3fff and scale the SRC exponent by the value that the #
+# DST exponent was scaled by. If the SRC exponent is greater or equal, #
+# do the opposite. Return this scale factor in d0. #
+# If the two exponents differ by > the number of mantissa bits #
+# plus two, then set the smallest exponent to a very small value as a #
+# quick shortcut. #
+# #
+#########################################################################
+
+ global addsub_scaler2
+addsub_scaler2:
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+ mov.w SRC_EX(%a0),%d0
+ mov.w DST_EX(%a1),%d1
+ mov.w %d0,FP_SCR0_EX(%a6)
+ mov.w %d1,FP_SCR1_EX(%a6)
+
+ andi.w &0x7fff,%d0
+ andi.w &0x7fff,%d1
+ mov.w %d0,L_SCR1(%a6) # store src exponent
+ mov.w %d1,2+L_SCR1(%a6) # store dst exponent
+
+ cmp.w %d0, %d1 # is src exp >= dst exp?
+ bge.l src_exp_ge2
+
+# dst exp is > src exp; scale dst to exp = 0x3fff
+dst_exp_gt2:
+ bsr.l scale_to_zero_dst
+ mov.l %d0,-(%sp) # save scale factor
+
+ cmpi.b STAG(%a6),&DENORM # is dst denormalized?
+ bne.b cmpexp12
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize the denorm; result is new exp
+ neg.w %d0 # new exp = -(shft val)
+ mov.w %d0,L_SCR1(%a6) # inset new exp
+
+cmpexp12:
+ mov.w 2+L_SCR1(%a6),%d0
+ subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
+
+ cmp.w %d0,L_SCR1(%a6) # is difference >= len(mantissa)+2?
+ bge.b quick_scale12
+
+ mov.w L_SCR1(%a6),%d0
+ add.w 0x2(%sp),%d0 # scale src exponent by scale factor
+ mov.w FP_SCR0_EX(%a6),%d1
+ and.w &0x8000,%d1
+ or.w %d1,%d0 # concat {sgn,new exp}
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new dst exponent
+
+ mov.l (%sp)+,%d0 # return SCALE factor
+ rts
+
+quick_scale12:
+ andi.w &0x8000,FP_SCR0_EX(%a6) # zero src exponent
+ bset &0x0,1+FP_SCR0_EX(%a6) # set exp = 1
+
+ mov.l (%sp)+,%d0 # return SCALE factor
+ rts
+
+# src exp is >= dst exp; scale src to exp = 0x3fff
+src_exp_ge2:
+ bsr.l scale_to_zero_src
+ mov.l %d0,-(%sp) # save scale factor
+
+ cmpi.b DTAG(%a6),&DENORM # is dst denormalized?
+ bne.b cmpexp22
+ lea FP_SCR1(%a6),%a0
+ bsr.l norm # normalize the denorm; result is new exp
+ neg.w %d0 # new exp = -(shft val)
+ mov.w %d0,2+L_SCR1(%a6) # inset new exp
+
+cmpexp22:
+ mov.w L_SCR1(%a6),%d0
+ subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
+
+ cmp.w %d0,2+L_SCR1(%a6) # is difference >= len(mantissa)+2?
+ bge.b quick_scale22
+
+ mov.w 2+L_SCR1(%a6),%d0
+ add.w 0x2(%sp),%d0 # scale dst exponent by scale factor
+ mov.w FP_SCR1_EX(%a6),%d1
+ andi.w &0x8000,%d1
+ or.w %d1,%d0 # concat {sgn,new exp}
+ mov.w %d0,FP_SCR1_EX(%a6) # insert new dst exponent
+
+ mov.l (%sp)+,%d0 # return SCALE factor
+ rts
+
+quick_scale22:
+ andi.w &0x8000,FP_SCR1_EX(%a6) # zero dst exponent
+ bset &0x0,1+FP_SCR1_EX(%a6) # set exp = 1
+
+ mov.l (%sp)+,%d0 # return SCALE factor
+ rts
+
+##########################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# scale_to_zero_src(): scale the exponent of extended precision #
+# value at FP_SCR0(a6). #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa if the operand was a DENORM #
+# #
+# INPUT *************************************************************** #
+# FP_SCR0(a6) = extended precision operand to be scaled #
+# #
+# OUTPUT ************************************************************** #
+# FP_SCR0(a6) = scaled extended precision operand #
+# d0 = scale value #
+# #
+# ALGORITHM *********************************************************** #
+# Set the exponent of the input operand to 0x3fff. Save the value #
+# of the difference between the original and new exponent. Then, #
+# normalize the operand if it was a DENORM. Add this normalization #
+# value to the previous value. Return the result. #
+# #
+#########################################################################
+
+ global scale_to_zero_src
+scale_to_zero_src:
+ mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
+ mov.w %d1,%d0 # make a copy
+
+ andi.l &0x7fff,%d1 # extract operand's exponent
+
+ andi.w &0x8000,%d0 # extract operand's sgn
+ or.w &0x3fff,%d0 # insert new operand's exponent(=0)
+
+ mov.w %d0,FP_SCR0_EX(%a6) # insert biased exponent
+
+ cmpi.b STAG(%a6),&DENORM # is operand normalized?
+ beq.b stzs_denorm # normalize the DENORM
+
+stzs_norm:
+ mov.l &0x3fff,%d0
+ sub.l %d1,%d0 # scale = BIAS + (-exp)
+
+ rts
+
+stzs_denorm:
+ lea FP_SCR0(%a6),%a0 # pass ptr to src op
+ bsr.l norm # normalize denorm
+ neg.l %d0 # new exponent = -(shft val)
+ mov.l %d0,%d1 # prepare for op_norm call
+ bra.b stzs_norm # finish scaling
+
+###
+
+#########################################################################
+# XDEF **************************************************************** #
+# scale_sqrt(): scale the input operand exponent so a subsequent #
+# fsqrt operation won't take an exception. #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa if the operand was a DENORM #
+# #
+# INPUT *************************************************************** #
+# FP_SCR0(a6) = extended precision operand to be scaled #
+# #
+# OUTPUT ************************************************************** #
+# FP_SCR0(a6) = scaled extended precision operand #
+# d0 = scale value #
+# #
+# ALGORITHM *********************************************************** #
+# If the input operand is a DENORM, normalize it. #
+# If the exponent of the input operand is even, set the exponent #
+# to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the #
+# exponent of the input operand is off, set the exponent to ox3fff and #
+# return a scale factor of "(exp-0x3fff)/2". #
+# #
+#########################################################################
+
+ global scale_sqrt
+scale_sqrt:
+ cmpi.b STAG(%a6),&DENORM # is operand normalized?
+ beq.b ss_denorm # normalize the DENORM
+
+ mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
+ andi.l &0x7fff,%d1 # extract operand's exponent
+
+ andi.w &0x8000,FP_SCR0_EX(%a6) # extract operand's sgn
+
+ btst &0x0,%d1 # is exp even or odd?
+ beq.b ss_norm_even
+
+ ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
+
+ mov.l &0x3fff,%d0
+ sub.l %d1,%d0 # scale = BIAS + (-exp)
+ asr.l &0x1,%d0 # divide scale factor by 2
+ rts
+
+ss_norm_even:
+ ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
+
+ mov.l &0x3ffe,%d0
+ sub.l %d1,%d0 # scale = BIAS + (-exp)
+ asr.l &0x1,%d0 # divide scale factor by 2
+ rts
+
+ss_denorm:
+ lea FP_SCR0(%a6),%a0 # pass ptr to src op
+ bsr.l norm # normalize denorm
+
+ btst &0x0,%d0 # is exp even or odd?
+ beq.b ss_denorm_even
+
+ ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
+
+ add.l &0x3fff,%d0
+ asr.l &0x1,%d0 # divide scale factor by 2
+ rts
+
+ss_denorm_even:
+ ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
+
+ add.l &0x3ffe,%d0
+ asr.l &0x1,%d0 # divide scale factor by 2
+ rts
+
+###
+
+#########################################################################
+# XDEF **************************************************************** #
+# scale_to_zero_dst(): scale the exponent of extended precision #
+# value at FP_SCR1(a6). #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa if the operand was a DENORM #
+# #
+# INPUT *************************************************************** #
+# FP_SCR1(a6) = extended precision operand to be scaled #
+# #
+# OUTPUT ************************************************************** #
+# FP_SCR1(a6) = scaled extended precision operand #
+# d0 = scale value #
+# #
+# ALGORITHM *********************************************************** #
+# Set the exponent of the input operand to 0x3fff. Save the value #
+# of the difference between the original and new exponent. Then, #
+# normalize the operand if it was a DENORM. Add this normalization #
+# value to the previous value. Return the result. #
+# #
+#########################################################################
+
+ global scale_to_zero_dst
+scale_to_zero_dst:
+ mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp}
+ mov.w %d1,%d0 # make a copy
+
+ andi.l &0x7fff,%d1 # extract operand's exponent
+
+ andi.w &0x8000,%d0 # extract operand's sgn
+ or.w &0x3fff,%d0 # insert new operand's exponent(=0)
+
+ mov.w %d0,FP_SCR1_EX(%a6) # insert biased exponent
+
+ cmpi.b DTAG(%a6),&DENORM # is operand normalized?
+ beq.b stzd_denorm # normalize the DENORM
+
+stzd_norm:
+ mov.l &0x3fff,%d0
+ sub.l %d1,%d0 # scale = BIAS + (-exp)
+ rts
+
+stzd_denorm:
+ lea FP_SCR1(%a6),%a0 # pass ptr to dst op
+ bsr.l norm # normalize denorm
+ neg.l %d0 # new exponent = -(shft val)
+ mov.l %d0,%d1 # prepare for op_norm call
+ bra.b stzd_norm # finish scaling
+
+##########################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# res_qnan(): return default result w/ QNAN operand for dyadic #
+# res_snan(): return default result w/ SNAN operand for dyadic #
+# res_qnan_1op(): return dflt result w/ QNAN operand for monadic #
+# res_snan_1op(): return dflt result w/ SNAN operand for monadic #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# FP_SRC(a6) = pointer to extended precision src operand #
+# FP_DST(a6) = pointer to extended precision dst operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default result #
+# #
+# ALGORITHM *********************************************************** #
+# If either operand (but not both operands) of an operation is a #
+# nonsignalling NAN, then that NAN is returned as the result. If both #
+# operands are nonsignalling NANs, then the destination operand #
+# nonsignalling NAN is returned as the result. #
+# If either operand to an operation is a signalling NAN (SNAN), #
+# then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap #
+# enable bit is set in the FPCR, then the trap is taken and the #
+# destination is not modified. If the SNAN trap enable bit is not set, #
+# then the SNAN is converted to a nonsignalling NAN (by setting the #
+# SNAN bit in the operand to one), and the operation continues as #
+# described in the preceding paragraph, for nonsignalling NANs. #
+# Make sure the appropriate FPSR bits are set before exiting. #
+# #
+#########################################################################
+
+ global res_qnan
+ global res_snan
+res_qnan:
+res_snan:
+ cmp.b DTAG(%a6), &SNAN # is the dst an SNAN?
+ beq.b dst_snan2
+ cmp.b DTAG(%a6), &QNAN # is the dst a QNAN?
+ beq.b dst_qnan2
+src_nan:
+ cmp.b STAG(%a6), &QNAN
+ beq.b src_qnan2
+ global res_snan_1op
+res_snan_1op:
+src_snan2:
+ bset &0x6, FP_SRC_HI(%a6) # set SNAN bit
+ or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+ lea FP_SRC(%a6), %a0
+ bra.b nan_comp
+ global res_qnan_1op
+res_qnan_1op:
+src_qnan2:
+ or.l &nan_mask, USER_FPSR(%a6)
+ lea FP_SRC(%a6), %a0
+ bra.b nan_comp
+dst_snan2:
+ or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+ bset &0x6, FP_DST_HI(%a6) # set SNAN bit
+ lea FP_DST(%a6), %a0
+ bra.b nan_comp
+dst_qnan2:
+ lea FP_DST(%a6), %a0
+ cmp.b STAG(%a6), &SNAN
+ bne nan_done
+ or.l &aiop_mask+snan_mask, USER_FPSR(%a6)
+nan_done:
+ or.l &nan_mask, USER_FPSR(%a6)
+nan_comp:
+ btst &0x7, FTEMP_EX(%a0) # is NAN neg?
+ beq.b nan_not_neg
+ or.l &neg_mask, USER_FPSR(%a6)
+nan_not_neg:
+ fmovm.x (%a0), &0x80
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# res_operr(): return default result during operand error #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default operand error result #
+# #
+# ALGORITHM *********************************************************** #
+# An nonsignalling NAN is returned as the default result when #
+# an operand error occurs for the following cases: #
+# #
+# Multiply: (Infinity x Zero) #
+# Divide : (Zero / Zero) || (Infinity / Infinity) #
+# #
+#########################################################################
+
+ global res_operr
+res_operr:
+ or.l &nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
+ fmovm.x nan_return(%pc), &0x80
+ rts
+
+nan_return:
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# fdbcc(): routine to emulate the fdbcc instruction #
+# #
+# XDEF **************************************************************** #
+# _fdbcc() #
+# #
+# XREF **************************************************************** #
+# fetch_dreg() - fetch Dn value #
+# store_dreg_l() - store updated Dn value #
+# #
+# INPUT *************************************************************** #
+# d0 = displacement #
+# #
+# OUTPUT ************************************************************** #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# This routine checks which conditional predicate is specified by #
+# the stacked fdbcc instruction opcode and then branches to a routine #
+# for that predicate. The corresponding fbcc instruction is then used #
+# to see whether the condition (specified by the stacked FPSR) is true #
+# or false. #
+# If a BSUN exception should be indicated, the BSUN and ABSUN #
+# bits are set in the stacked FPSR. If the BSUN exception is enabled, #
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
+# enabled BSUN should not be flagged and the predicate is true, then #
+# Dn is fetched and decremented by one. If Dn is not equal to -1, add #
+# the displacement value to the stacked PC so that when an "rte" is #
+# finally executed, the branch occurs. #
+# #
+#########################################################################
+ global _fdbcc
+_fdbcc:
+ mov.l %d0,L_SCR1(%a6) # save displacement
+
+ mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
+
+ clr.l %d1 # clear scratch reg
+ mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
+ ror.l &0x8,%d1 # rotate to top byte
+ fmov.l %d1,%fpsr # insert into FPSR
+
+ mov.w (tbl_fdbcc.b,%pc,%d0.w*2),%d1 # load table
+ jmp (tbl_fdbcc.b,%pc,%d1.w) # jump to fdbcc routine
+
+tbl_fdbcc:
+ short fdbcc_f - tbl_fdbcc # 00
+ short fdbcc_eq - tbl_fdbcc # 01
+ short fdbcc_ogt - tbl_fdbcc # 02
+ short fdbcc_oge - tbl_fdbcc # 03
+ short fdbcc_olt - tbl_fdbcc # 04
+ short fdbcc_ole - tbl_fdbcc # 05
+ short fdbcc_ogl - tbl_fdbcc # 06
+ short fdbcc_or - tbl_fdbcc # 07
+ short fdbcc_un - tbl_fdbcc # 08
+ short fdbcc_ueq - tbl_fdbcc # 09
+ short fdbcc_ugt - tbl_fdbcc # 10
+ short fdbcc_uge - tbl_fdbcc # 11
+ short fdbcc_ult - tbl_fdbcc # 12
+ short fdbcc_ule - tbl_fdbcc # 13
+ short fdbcc_neq - tbl_fdbcc # 14
+ short fdbcc_t - tbl_fdbcc # 15
+ short fdbcc_sf - tbl_fdbcc # 16
+ short fdbcc_seq - tbl_fdbcc # 17
+ short fdbcc_gt - tbl_fdbcc # 18
+ short fdbcc_ge - tbl_fdbcc # 19
+ short fdbcc_lt - tbl_fdbcc # 20
+ short fdbcc_le - tbl_fdbcc # 21
+ short fdbcc_gl - tbl_fdbcc # 22
+ short fdbcc_gle - tbl_fdbcc # 23
+ short fdbcc_ngle - tbl_fdbcc # 24
+ short fdbcc_ngl - tbl_fdbcc # 25
+ short fdbcc_nle - tbl_fdbcc # 26
+ short fdbcc_nlt - tbl_fdbcc # 27
+ short fdbcc_nge - tbl_fdbcc # 28
+ short fdbcc_ngt - tbl_fdbcc # 29
+ short fdbcc_sneq - tbl_fdbcc # 30
+ short fdbcc_st - tbl_fdbcc # 31
+
+#########################################################################
+# #
+# IEEE Nonaware tests #
+# #
+# For the IEEE nonaware tests, only the false branch changes the #
+# counter. However, the true branch may set bsun so we check to see #
+# if the NAN bit is set, in which case BSUN and AIOP will be set. #
+# #
+# The cases EQ and NE are shared by the Aware and Nonaware groups #
+# and are incapable of setting the BSUN exception bit. #
+# #
+# Typically, only one of the two possible branch directions could #
+# have the NAN bit set. #
+# (This is assuming the mutual exclusiveness of FPSR cc bit groupings #
+# is preserved.) #
+# #
+#########################################################################
+
+#
+# equal:
+#
+# Z
+#
+fdbcc_eq:
+ fbeq.w fdbcc_eq_yes # equal?
+fdbcc_eq_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_eq_yes:
+ rts
+
+#
+# not equal:
+# _
+# Z
+#
+fdbcc_neq:
+ fbneq.w fdbcc_neq_yes # not equal?
+fdbcc_neq_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_neq_yes:
+ rts
+
+#
+# greater than:
+# _______
+# NANvZvN
+#
+fdbcc_gt:
+ fbgt.w fdbcc_gt_yes # greater than?
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fdbcc_false # no;go handle counter
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_gt_yes:
+ rts # do nothing
+
+#
+# not greater than:
+#
+# NANvZvN
+#
+fdbcc_ngt:
+ fbngt.w fdbcc_ngt_yes # not greater than?
+fdbcc_ngt_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ngt_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b fdbcc_ngt_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_ngt_done:
+ rts # no; do nothing
+
+#
+# greater than or equal:
+# _____
+# Zv(NANvN)
+#
+fdbcc_ge:
+ fbge.w fdbcc_ge_yes # greater than or equal?
+fdbcc_ge_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fdbcc_false # no;go handle counter
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ge_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b fdbcc_ge_yes_done # no;go do nothing
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_ge_yes_done:
+ rts # do nothing
+
+#
+# not (greater than or equal):
+# _
+# NANv(N^Z)
+#
+fdbcc_nge:
+ fbnge.w fdbcc_nge_yes # not (greater than or equal)?
+fdbcc_nge_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_nge_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b fdbcc_nge_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_nge_done:
+ rts # no; do nothing
+
+#
+# less than:
+# _____
+# N^(NANvZ)
+#
+fdbcc_lt:
+ fblt.w fdbcc_lt_yes # less than?
+fdbcc_lt_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fdbcc_false # no; go handle counter
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_lt_yes:
+ rts # do nothing
+
+#
+# not less than:
+# _
+# NANv(ZvN)
+#
+fdbcc_nlt:
+ fbnlt.w fdbcc_nlt_yes # not less than?
+fdbcc_nlt_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_nlt_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b fdbcc_nlt_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_nlt_done:
+ rts # no; do nothing
+
+#
+# less than or equal:
+# ___
+# Zv(N^NAN)
+#
+fdbcc_le:
+ fble.w fdbcc_le_yes # less than or equal?
+fdbcc_le_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fdbcc_false # no; go handle counter
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_le_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b fdbcc_le_yes_done # no; go do nothing
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_le_yes_done:
+ rts # do nothing
+
+#
+# not (less than or equal):
+# ___
+# NANv(NvZ)
+#
+fdbcc_nle:
+ fbnle.w fdbcc_nle_yes # not (less than or equal)?
+fdbcc_nle_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_nle_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fdbcc_nle_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_nle_done:
+ rts # no; do nothing
+
+#
+# greater or less than:
+# _____
+# NANvZ
+#
+fdbcc_gl:
+ fbgl.w fdbcc_gl_yes # greater or less than?
+fdbcc_gl_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fdbcc_false # no; handle counter
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_gl_yes:
+ rts # do nothing
+
+#
+# not (greater or less than):
+#
+# NANvZ
+#
+fdbcc_ngl:
+ fbngl.w fdbcc_ngl_yes # not (greater or less than)?
+fdbcc_ngl_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ngl_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b fdbcc_ngl_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_ngl_done:
+ rts # no; do nothing
+
+#
+# greater, less, or equal:
+# ___
+# NAN
+#
+fdbcc_gle:
+ fbgle.w fdbcc_gle_yes # greater, less, or equal?
+fdbcc_gle_no:
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_gle_yes:
+ rts # do nothing
+
+#
+# not (greater, less, or equal):
+#
+# NAN
+#
+fdbcc_ngle:
+ fbngle.w fdbcc_ngle_yes # not (greater, less, or equal)?
+fdbcc_ngle_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ngle_yes:
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ rts # no; do nothing
+
+#########################################################################
+# #
+# Miscellaneous tests #
+# #
+# For the IEEE miscellaneous tests, all but fdbf and fdbt can set bsun. #
+# #
+#########################################################################
+
+#
+# false:
+#
+# False
+#
+fdbcc_f: # no bsun possible
+ bra.w fdbcc_false # go handle counter
+
+#
+# true:
+#
+# True
+#
+fdbcc_t: # no bsun possible
+ rts # do nothing
+
+#
+# signalling false:
+#
+# False
+#
+fdbcc_sf:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set?
+ beq.w fdbcc_false # no;go handle counter
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # go handle counter
+
+#
+# signalling true:
+#
+# True
+#
+fdbcc_st:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set?
+ beq.b fdbcc_st_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_st_done:
+ rts
+
+#
+# signalling equal:
+#
+# Z
+#
+fdbcc_seq:
+ fbseq.w fdbcc_seq_yes # signalling equal?
+fdbcc_seq_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set?
+ beq.w fdbcc_false # no;go handle counter
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # go handle counter
+fdbcc_seq_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set?
+ beq.b fdbcc_seq_yes_done # no;go do nothing
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_seq_yes_done:
+ rts # yes; do nothing
+
+#
+# signalling not equal:
+# _
+# Z
+#
+fdbcc_sneq:
+ fbsneq.w fdbcc_sneq_yes # signalling not equal?
+fdbcc_sneq_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set?
+ beq.w fdbcc_false # no;go handle counter
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+ bra.w fdbcc_false # go handle counter
+fdbcc_sneq_yes:
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w fdbcc_sneq_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # is BSUN enabled?
+ bne.w fdbcc_bsun # yes; we have an exception
+fdbcc_sneq_done:
+ rts
+
+#########################################################################
+# #
+# IEEE Aware tests #
+# #
+# For the IEEE aware tests, action is only taken if the result is false.#
+# Therefore, the opposite branch type is used to jump to the decrement #
+# routine. #
+# The BSUN exception will not be set for any of these tests. #
+# #
+#########################################################################
+
+#
+# ordered greater than:
+# _______
+# NANvZvN
+#
+fdbcc_ogt:
+ fbogt.w fdbcc_ogt_yes # ordered greater than?
+fdbcc_ogt_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ogt_yes:
+ rts # yes; do nothing
+
+#
+# unordered or less or equal:
+# _______
+# NANvZvN
+#
+fdbcc_ule:
+ fbule.w fdbcc_ule_yes # unordered or less or equal?
+fdbcc_ule_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ule_yes:
+ rts # yes; do nothing
+
+#
+# ordered greater than or equal:
+# _____
+# Zv(NANvN)
+#
+fdbcc_oge:
+ fboge.w fdbcc_oge_yes # ordered greater than or equal?
+fdbcc_oge_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_oge_yes:
+ rts # yes; do nothing
+
+#
+# unordered or less than:
+# _
+# NANv(N^Z)
+#
+fdbcc_ult:
+ fbult.w fdbcc_ult_yes # unordered or less than?
+fdbcc_ult_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ult_yes:
+ rts # yes; do nothing
+
+#
+# ordered less than:
+# _____
+# N^(NANvZ)
+#
+fdbcc_olt:
+ fbolt.w fdbcc_olt_yes # ordered less than?
+fdbcc_olt_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_olt_yes:
+ rts # yes; do nothing
+
+#
+# unordered or greater or equal:
+#
+# NANvZvN
+#
+fdbcc_uge:
+ fbuge.w fdbcc_uge_yes # unordered or greater than?
+fdbcc_uge_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_uge_yes:
+ rts # yes; do nothing
+
+#
+# ordered less than or equal:
+# ___
+# Zv(N^NAN)
+#
+fdbcc_ole:
+ fbole.w fdbcc_ole_yes # ordered greater or less than?
+fdbcc_ole_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ole_yes:
+ rts # yes; do nothing
+
+#
+# unordered or greater than:
+# ___
+# NANv(NvZ)
+#
+fdbcc_ugt:
+ fbugt.w fdbcc_ugt_yes # unordered or greater than?
+fdbcc_ugt_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ugt_yes:
+ rts # yes; do nothing
+
+#
+# ordered greater or less than:
+# _____
+# NANvZ
+#
+fdbcc_ogl:
+ fbogl.w fdbcc_ogl_yes # ordered greater or less than?
+fdbcc_ogl_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ogl_yes:
+ rts # yes; do nothing
+
+#
+# unordered or equal:
+#
+# NANvZ
+#
+fdbcc_ueq:
+ fbueq.w fdbcc_ueq_yes # unordered or equal?
+fdbcc_ueq_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_ueq_yes:
+ rts # yes; do nothing
+
+#
+# ordered:
+# ___
+# NAN
+#
+fdbcc_or:
+ fbor.w fdbcc_or_yes # ordered?
+fdbcc_or_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_or_yes:
+ rts # yes; do nothing
+
+#
+# unordered:
+#
+# NAN
+#
+fdbcc_un:
+ fbun.w fdbcc_un_yes # unordered?
+fdbcc_un_no:
+ bra.w fdbcc_false # no; go handle counter
+fdbcc_un_yes:
+ rts # yes; do nothing
+
+#######################################################################
+
+#
+# the bsun exception bit was not set.
+#
+# (1) subtract 1 from the count register
+# (2) if (cr == -1) then
+# pc = pc of next instruction
+# else
+# pc += sign_ext(16-bit displacement)
+#
+fdbcc_false:
+ mov.b 1+EXC_OPWORD(%a6), %d1 # fetch lo opword
+ andi.w &0x7, %d1 # extract count register
+
+ bsr.l fetch_dreg # fetch count value
+# make sure that d0 isn't corrupted between calls...
+
+ subq.w &0x1, %d0 # Dn - 1 -> Dn
+
+ bsr.l store_dreg_l # store new count value
+
+ cmpi.w %d0, &-0x1 # is (Dn == -1)?
+ bne.b fdbcc_false_cont # no;
+ rts
+
+fdbcc_false_cont:
+ mov.l L_SCR1(%a6),%d0 # fetch displacement
+ add.l USER_FPIAR(%a6),%d0 # add instruction PC
+ addq.l &0x4,%d0 # add instruction length
+ mov.l %d0,EXC_PC(%a6) # set new PC
+ rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+fdbcc_bsun:
+ mov.b &fbsun_flg,SPCOND_FLG(%a6)
+ rts
+
+#########################################################################
+# ftrapcc(): routine to emulate the ftrapcc instruction #
+# #
+# XDEF **************************************************************** #
+# _ftrapcc() #
+# #
+# XREF **************************************************************** #
+# none #
+# #
+# INPUT *************************************************************** #
+# none #
+# #
+# OUTPUT ************************************************************** #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# This routine checks which conditional predicate is specified by #
+# the stacked ftrapcc instruction opcode and then branches to a routine #
+# for that predicate. The corresponding fbcc instruction is then used #
+# to see whether the condition (specified by the stacked FPSR) is true #
+# or false. #
+# If a BSUN exception should be indicated, the BSUN and ABSUN #
+# bits are set in the stacked FPSR. If the BSUN exception is enabled, #
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
+# enabled BSUN should not be flagged and the predicate is true, then #
+# the ftrapcc_flg is set in the SPCOND_FLG location. These special #
+# flags indicate to the calling routine to emulate the exceptional #
+# condition. #
+# #
+#########################################################################
+
+ global _ftrapcc
+_ftrapcc:
+ mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
+
+ clr.l %d1 # clear scratch reg
+ mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
+ ror.l &0x8,%d1 # rotate to top byte
+ fmov.l %d1,%fpsr # insert into FPSR
+
+ mov.w (tbl_ftrapcc.b,%pc,%d0.w*2), %d1 # load table
+ jmp (tbl_ftrapcc.b,%pc,%d1.w) # jump to ftrapcc routine
+
+tbl_ftrapcc:
+ short ftrapcc_f - tbl_ftrapcc # 00
+ short ftrapcc_eq - tbl_ftrapcc # 01
+ short ftrapcc_ogt - tbl_ftrapcc # 02
+ short ftrapcc_oge - tbl_ftrapcc # 03
+ short ftrapcc_olt - tbl_ftrapcc # 04
+ short ftrapcc_ole - tbl_ftrapcc # 05
+ short ftrapcc_ogl - tbl_ftrapcc # 06
+ short ftrapcc_or - tbl_ftrapcc # 07
+ short ftrapcc_un - tbl_ftrapcc # 08
+ short ftrapcc_ueq - tbl_ftrapcc # 09
+ short ftrapcc_ugt - tbl_ftrapcc # 10
+ short ftrapcc_uge - tbl_ftrapcc # 11
+ short ftrapcc_ult - tbl_ftrapcc # 12
+ short ftrapcc_ule - tbl_ftrapcc # 13
+ short ftrapcc_neq - tbl_ftrapcc # 14
+ short ftrapcc_t - tbl_ftrapcc # 15
+ short ftrapcc_sf - tbl_ftrapcc # 16
+ short ftrapcc_seq - tbl_ftrapcc # 17
+ short ftrapcc_gt - tbl_ftrapcc # 18
+ short ftrapcc_ge - tbl_ftrapcc # 19
+ short ftrapcc_lt - tbl_ftrapcc # 20
+ short ftrapcc_le - tbl_ftrapcc # 21
+ short ftrapcc_gl - tbl_ftrapcc # 22
+ short ftrapcc_gle - tbl_ftrapcc # 23
+ short ftrapcc_ngle - tbl_ftrapcc # 24
+ short ftrapcc_ngl - tbl_ftrapcc # 25
+ short ftrapcc_nle - tbl_ftrapcc # 26
+ short ftrapcc_nlt - tbl_ftrapcc # 27
+ short ftrapcc_nge - tbl_ftrapcc # 28
+ short ftrapcc_ngt - tbl_ftrapcc # 29
+ short ftrapcc_sneq - tbl_ftrapcc # 30
+ short ftrapcc_st - tbl_ftrapcc # 31
+
+#########################################################################
+# #
+# IEEE Nonaware tests #
+# #
+# For the IEEE nonaware tests, we set the result based on the #
+# floating point condition codes. In addition, we check to see #
+# if the NAN bit is set, in which case BSUN and AIOP will be set. #
+# #
+# The cases EQ and NE are shared by the Aware and Nonaware groups #
+# and are incapable of setting the BSUN exception bit. #
+# #
+# Typically, only one of the two possible branch directions could #
+# have the NAN bit set. #
+# #
+#########################################################################
+
+#
+# equal:
+#
+# Z
+#
+ftrapcc_eq:
+ fbeq.w ftrapcc_trap # equal?
+ftrapcc_eq_no:
+ rts # do nothing
+
+#
+# not equal:
+# _
+# Z
+#
+ftrapcc_neq:
+ fbneq.w ftrapcc_trap # not equal?
+ftrapcc_neq_no:
+ rts # do nothing
+
+#
+# greater than:
+# _______
+# NANvZvN
+#
+ftrapcc_gt:
+ fbgt.w ftrapcc_trap # greater than?
+ftrapcc_gt_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b ftrapcc_gt_done # no
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ftrapcc_gt_done:
+ rts # no; do nothing
+
+#
+# not greater than:
+#
+# NANvZvN
+#
+ftrapcc_ngt:
+ fbngt.w ftrapcc_ngt_yes # not greater than?
+ftrapcc_ngt_no:
+ rts # do nothing
+ftrapcc_ngt_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# greater than or equal:
+# _____
+# Zv(NANvN)
+#
+ftrapcc_ge:
+ fbge.w ftrapcc_ge_yes # greater than or equal?
+ftrapcc_ge_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b ftrapcc_ge_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ftrapcc_ge_done:
+ rts # no; do nothing
+ftrapcc_ge_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# not (greater than or equal):
+# _
+# NANv(N^Z)
+#
+ftrapcc_nge:
+ fbnge.w ftrapcc_nge_yes # not (greater than or equal)?
+ftrapcc_nge_no:
+ rts # do nothing
+ftrapcc_nge_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# less than:
+# _____
+# N^(NANvZ)
+#
+ftrapcc_lt:
+ fblt.w ftrapcc_trap # less than?
+ftrapcc_lt_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b ftrapcc_lt_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ftrapcc_lt_done:
+ rts # no; do nothing
+
+#
+# not less than:
+# _
+# NANv(ZvN)
+#
+ftrapcc_nlt:
+ fbnlt.w ftrapcc_nlt_yes # not less than?
+ftrapcc_nlt_no:
+ rts # do nothing
+ftrapcc_nlt_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# less than or equal:
+# ___
+# Zv(N^NAN)
+#
+ftrapcc_le:
+ fble.w ftrapcc_le_yes # less than or equal?
+ftrapcc_le_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b ftrapcc_le_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ftrapcc_le_done:
+ rts # no; do nothing
+ftrapcc_le_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# not (less than or equal):
+# ___
+# NANv(NvZ)
+#
+ftrapcc_nle:
+ fbnle.w ftrapcc_nle_yes # not (less than or equal)?
+ftrapcc_nle_no:
+ rts # do nothing
+ftrapcc_nle_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# greater or less than:
+# _____
+# NANvZ
+#
+ftrapcc_gl:
+ fbgl.w ftrapcc_trap # greater or less than?
+ftrapcc_gl_no:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.b ftrapcc_gl_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ftrapcc_gl_done:
+ rts # no; do nothing
+
+#
+# not (greater or less than):
+#
+# NANvZ
+#
+ftrapcc_ngl:
+ fbngl.w ftrapcc_ngl_yes # not (greater or less than)?
+ftrapcc_ngl_no:
+ rts # do nothing
+ftrapcc_ngl_yes:
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# greater, less, or equal:
+# ___
+# NAN
+#
+ftrapcc_gle:
+ fbgle.w ftrapcc_trap # greater, less, or equal?
+ftrapcc_gle_no:
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ rts # no; do nothing
+
+#
+# not (greater, less, or equal):
+#
+# NAN
+#
+ftrapcc_ngle:
+ fbngle.w ftrapcc_ngle_yes # not (greater, less, or equal)?
+ftrapcc_ngle_no:
+ rts # do nothing
+ftrapcc_ngle_yes:
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#########################################################################
+# #
+# Miscellaneous tests #
+# #
+# For the IEEE aware tests, we only have to set the result based on the #
+# floating point condition codes. The BSUN exception will not be #
+# set for any of these tests. #
+# #
+#########################################################################
+
+#
+# false:
+#
+# False
+#
+ftrapcc_f:
+ rts # do nothing
+
+#
+# true:
+#
+# True
+#
+ftrapcc_t:
+ bra.w ftrapcc_trap # go take trap
+
+#
+# signalling false:
+#
+# False
+#
+ftrapcc_sf:
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.b ftrapcc_sf_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ftrapcc_sf_done:
+ rts # no; do nothing
+
+#
+# signalling true:
+#
+# True
+#
+ftrapcc_st:
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# signalling equal:
+#
+# Z
+#
+ftrapcc_seq:
+ fbseq.w ftrapcc_seq_yes # signalling equal?
+ftrapcc_seq_no:
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w ftrapcc_seq_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ftrapcc_seq_done:
+ rts # no; do nothing
+ftrapcc_seq_yes:
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#
+# signalling not equal:
+# _
+# Z
+#
+ftrapcc_sneq:
+ fbsneq.w ftrapcc_sneq_yes # signalling equal?
+ftrapcc_sneq_no:
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w ftrapcc_sneq_no_done # no; go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ftrapcc_sneq_no_done:
+ rts # do nothing
+ftrapcc_sneq_yes:
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w ftrapcc_trap # no; go take trap
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ btst &bsun_bit, FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w ftrapcc_bsun # yes
+ bra.w ftrapcc_trap # no; go take trap
+
+#########################################################################
+# #
+# IEEE Aware tests #
+# #
+# For the IEEE aware tests, we only have to set the result based on the #
+# floating point condition codes. The BSUN exception will not be #
+# set for any of these tests. #
+# #
+#########################################################################
+
+#
+# ordered greater than:
+# _______
+# NANvZvN
+#
+ftrapcc_ogt:
+ fbogt.w ftrapcc_trap # ordered greater than?
+ftrapcc_ogt_no:
+ rts # do nothing
+
+#
+# unordered or less or equal:
+# _______
+# NANvZvN
+#
+ftrapcc_ule:
+ fbule.w ftrapcc_trap # unordered or less or equal?
+ftrapcc_ule_no:
+ rts # do nothing
+
+#
+# ordered greater than or equal:
+# _____
+# Zv(NANvN)
+#
+ftrapcc_oge:
+ fboge.w ftrapcc_trap # ordered greater than or equal?
+ftrapcc_oge_no:
+ rts # do nothing
+
+#
+# unordered or less than:
+# _
+# NANv(N^Z)
+#
+ftrapcc_ult:
+ fbult.w ftrapcc_trap # unordered or less than?
+ftrapcc_ult_no:
+ rts # do nothing
+
+#
+# ordered less than:
+# _____
+# N^(NANvZ)
+#
+ftrapcc_olt:
+ fbolt.w ftrapcc_trap # ordered less than?
+ftrapcc_olt_no:
+ rts # do nothing
+
+#
+# unordered or greater or equal:
+#
+# NANvZvN
+#
+ftrapcc_uge:
+ fbuge.w ftrapcc_trap # unordered or greater than?
+ftrapcc_uge_no:
+ rts # do nothing
+
+#
+# ordered less than or equal:
+# ___
+# Zv(N^NAN)
+#
+ftrapcc_ole:
+ fbole.w ftrapcc_trap # ordered greater or less than?
+ftrapcc_ole_no:
+ rts # do nothing
+
+#
+# unordered or greater than:
+# ___
+# NANv(NvZ)
+#
+ftrapcc_ugt:
+ fbugt.w ftrapcc_trap # unordered or greater than?
+ftrapcc_ugt_no:
+ rts # do nothing
+
+#
+# ordered greater or less than:
+# _____
+# NANvZ
+#
+ftrapcc_ogl:
+ fbogl.w ftrapcc_trap # ordered greater or less than?
+ftrapcc_ogl_no:
+ rts # do nothing
+
+#
+# unordered or equal:
+#
+# NANvZ
+#
+ftrapcc_ueq:
+ fbueq.w ftrapcc_trap # unordered or equal?
+ftrapcc_ueq_no:
+ rts # do nothing
+
+#
+# ordered:
+# ___
+# NAN
+#
+ftrapcc_or:
+ fbor.w ftrapcc_trap # ordered?
+ftrapcc_or_no:
+ rts # do nothing
+
+#
+# unordered:
+#
+# NAN
+#
+ftrapcc_un:
+ fbun.w ftrapcc_trap # unordered?
+ftrapcc_un_no:
+ rts # do nothing
+
+#######################################################################
+
+# the bsun exception bit was not set.
+# we will need to jump to the ftrapcc vector. the stack frame
+# is the same size as that of the fp unimp instruction. the
+# only difference is that the <ea> field should hold the PC
+# of the ftrapcc instruction and the vector offset field
+# should denote the ftrapcc trap.
+ftrapcc_trap:
+ mov.b &ftrapcc_flg,SPCOND_FLG(%a6)
+ rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+ftrapcc_bsun:
+ mov.b &fbsun_flg,SPCOND_FLG(%a6)
+ rts
+
+#########################################################################
+# fscc(): routine to emulate the fscc instruction #
+# #
+# XDEF **************************************************************** #
+# _fscc() #
+# #
+# XREF **************************************************************** #
+# store_dreg_b() - store result to data register file #
+# dec_areg() - decrement an areg for -(an) mode #
+# inc_areg() - increment an areg for (an)+ mode #
+# _dmem_write_byte() - store result to memory #
+# #
+# INPUT *************************************************************** #
+# none #
+# #
+# OUTPUT ************************************************************** #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# This routine checks which conditional predicate is specified by #
+# the stacked fscc instruction opcode and then branches to a routine #
+# for that predicate. The corresponding fbcc instruction is then used #
+# to see whether the condition (specified by the stacked FPSR) is true #
+# or false. #
+# If a BSUN exception should be indicated, the BSUN and ABSUN #
+# bits are set in the stacked FPSR. If the BSUN exception is enabled, #
+# the fbsun_flg is set in the SPCOND_FLG location on the stack. If an #
+# enabled BSUN should not be flagged and the predicate is true, then #
+# the result is stored to the data register file or memory #
+# #
+#########################################################################
+
+ global _fscc
+_fscc:
+ mov.w EXC_CMDREG(%a6),%d0 # fetch predicate
+
+ clr.l %d1 # clear scratch reg
+ mov.b FPSR_CC(%a6),%d1 # fetch fp ccodes
+ ror.l &0x8,%d1 # rotate to top byte
+ fmov.l %d1,%fpsr # insert into FPSR
+
+ mov.w (tbl_fscc.b,%pc,%d0.w*2),%d1 # load table
+ jmp (tbl_fscc.b,%pc,%d1.w) # jump to fscc routine
+
+tbl_fscc:
+ short fscc_f - tbl_fscc # 00
+ short fscc_eq - tbl_fscc # 01
+ short fscc_ogt - tbl_fscc # 02
+ short fscc_oge - tbl_fscc # 03
+ short fscc_olt - tbl_fscc # 04
+ short fscc_ole - tbl_fscc # 05
+ short fscc_ogl - tbl_fscc # 06
+ short fscc_or - tbl_fscc # 07
+ short fscc_un - tbl_fscc # 08
+ short fscc_ueq - tbl_fscc # 09
+ short fscc_ugt - tbl_fscc # 10
+ short fscc_uge - tbl_fscc # 11
+ short fscc_ult - tbl_fscc # 12
+ short fscc_ule - tbl_fscc # 13
+ short fscc_neq - tbl_fscc # 14
+ short fscc_t - tbl_fscc # 15
+ short fscc_sf - tbl_fscc # 16
+ short fscc_seq - tbl_fscc # 17
+ short fscc_gt - tbl_fscc # 18
+ short fscc_ge - tbl_fscc # 19
+ short fscc_lt - tbl_fscc # 20
+ short fscc_le - tbl_fscc # 21
+ short fscc_gl - tbl_fscc # 22
+ short fscc_gle - tbl_fscc # 23
+ short fscc_ngle - tbl_fscc # 24
+ short fscc_ngl - tbl_fscc # 25
+ short fscc_nle - tbl_fscc # 26
+ short fscc_nlt - tbl_fscc # 27
+ short fscc_nge - tbl_fscc # 28
+ short fscc_ngt - tbl_fscc # 29
+ short fscc_sneq - tbl_fscc # 30
+ short fscc_st - tbl_fscc # 31
+
+#########################################################################
+# #
+# IEEE Nonaware tests #
+# #
+# For the IEEE nonaware tests, we set the result based on the #
+# floating point condition codes. In addition, we check to see #
+# if the NAN bit is set, in which case BSUN and AIOP will be set. #
+# #
+# The cases EQ and NE are shared by the Aware and Nonaware groups #
+# and are incapable of setting the BSUN exception bit. #
+# #
+# Typically, only one of the two possible branch directions could #
+# have the NAN bit set. #
+# #
+#########################################################################
+
+#
+# equal:
+#
+# Z
+#
+fscc_eq:
+ fbeq.w fscc_eq_yes # equal?
+fscc_eq_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_eq_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# not equal:
+# _
+# Z
+#
+fscc_neq:
+ fbneq.w fscc_neq_yes # not equal?
+fscc_neq_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_neq_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# greater than:
+# _______
+# NANvZvN
+#
+fscc_gt:
+ fbgt.w fscc_gt_yes # greater than?
+fscc_gt_no:
+ clr.b %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+fscc_gt_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# not greater than:
+#
+# NANvZvN
+#
+fscc_ngt:
+ fbngt.w fscc_ngt_yes # not greater than?
+fscc_ngt_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ngt_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# greater than or equal:
+# _____
+# Zv(NANvN)
+#
+fscc_ge:
+ fbge.w fscc_ge_yes # greater than or equal?
+fscc_ge_no:
+ clr.b %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+fscc_ge_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# not (greater than or equal):
+# _
+# NANv(N^Z)
+#
+fscc_nge:
+ fbnge.w fscc_nge_yes # not (greater than or equal)?
+fscc_nge_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_nge_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# less than:
+# _____
+# N^(NANvZ)
+#
+fscc_lt:
+ fblt.w fscc_lt_yes # less than?
+fscc_lt_no:
+ clr.b %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+fscc_lt_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# not less than:
+# _
+# NANv(ZvN)
+#
+fscc_nlt:
+ fbnlt.w fscc_nlt_yes # not less than?
+fscc_nlt_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_nlt_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# less than or equal:
+# ___
+# Zv(N^NAN)
+#
+fscc_le:
+ fble.w fscc_le_yes # less than or equal?
+fscc_le_no:
+ clr.b %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+fscc_le_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# not (less than or equal):
+# ___
+# NANv(NvZ)
+#
+fscc_nle:
+ fbnle.w fscc_nle_yes # not (less than or equal)?
+fscc_nle_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_nle_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# greater or less than:
+# _____
+# NANvZ
+#
+fscc_gl:
+ fbgl.w fscc_gl_yes # greater or less than?
+fscc_gl_no:
+ clr.b %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+fscc_gl_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# not (greater or less than):
+#
+# NANvZ
+#
+fscc_ngl:
+ fbngl.w fscc_ngl_yes # not (greater or less than)?
+fscc_ngl_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ngl_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # is NAN set in cc?
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# greater, less, or equal:
+# ___
+# NAN
+#
+fscc_gle:
+ fbgle.w fscc_gle_yes # greater, less, or equal?
+fscc_gle_no:
+ clr.b %d0 # set false
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+fscc_gle_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# not (greater, less, or equal):
+#
+# NAN
+#
+fscc_ngle:
+ fbngle.w fscc_ngle_yes # not (greater, less, or equal)?
+fscc_ngle_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ngle_yes:
+ st %d0 # set true
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#########################################################################
+# #
+# Miscellaneous tests #
+# #
+# For the IEEE aware tests, we only have to set the result based on the #
+# floating point condition codes. The BSUN exception will not be #
+# set for any of these tests. #
+# #
+#########################################################################
+
+#
+# false:
+#
+# False
+#
+fscc_f:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+
+#
+# true:
+#
+# True
+#
+fscc_t:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# signalling false:
+#
+# False
+#
+fscc_sf:
+ clr.b %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# signalling true:
+#
+# True
+#
+fscc_st:
+ st %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# signalling equal:
+#
+# Z
+#
+fscc_seq:
+ fbseq.w fscc_seq_yes # signalling equal?
+fscc_seq_no:
+ clr.b %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+fscc_seq_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#
+# signalling not equal:
+# _
+# Z
+#
+fscc_sneq:
+ fbsneq.w fscc_sneq_yes # signalling equal?
+fscc_sneq_no:
+ clr.b %d0 # set false
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+fscc_sneq_yes:
+ st %d0 # set true
+ btst &nan_bit, FPSR_CC(%a6) # set BSUN exc bit
+ beq.w fscc_done # no;go finish
+ ori.l &bsun_mask+aiop_mask, USER_FPSR(%a6) # set BSUN exc bit
+ bra.w fscc_chk_bsun # go finish
+
+#########################################################################
+# #
+# IEEE Aware tests #
+# #
+# For the IEEE aware tests, we only have to set the result based on the #
+# floating point condition codes. The BSUN exception will not be #
+# set for any of these tests. #
+# #
+#########################################################################
+
+#
+# ordered greater than:
+# _______
+# NANvZvN
+#
+fscc_ogt:
+ fbogt.w fscc_ogt_yes # ordered greater than?
+fscc_ogt_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ogt_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# unordered or less or equal:
+# _______
+# NANvZvN
+#
+fscc_ule:
+ fbule.w fscc_ule_yes # unordered or less or equal?
+fscc_ule_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ule_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# ordered greater than or equal:
+# _____
+# Zv(NANvN)
+#
+fscc_oge:
+ fboge.w fscc_oge_yes # ordered greater than or equal?
+fscc_oge_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_oge_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# unordered or less than:
+# _
+# NANv(N^Z)
+#
+fscc_ult:
+ fbult.w fscc_ult_yes # unordered or less than?
+fscc_ult_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ult_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# ordered less than:
+# _____
+# N^(NANvZ)
+#
+fscc_olt:
+ fbolt.w fscc_olt_yes # ordered less than?
+fscc_olt_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_olt_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# unordered or greater or equal:
+#
+# NANvZvN
+#
+fscc_uge:
+ fbuge.w fscc_uge_yes # unordered or greater than?
+fscc_uge_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_uge_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# ordered less than or equal:
+# ___
+# Zv(N^NAN)
+#
+fscc_ole:
+ fbole.w fscc_ole_yes # ordered greater or less than?
+fscc_ole_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ole_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# unordered or greater than:
+# ___
+# NANv(NvZ)
+#
+fscc_ugt:
+ fbugt.w fscc_ugt_yes # unordered or greater than?
+fscc_ugt_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ugt_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# ordered greater or less than:
+# _____
+# NANvZ
+#
+fscc_ogl:
+ fbogl.w fscc_ogl_yes # ordered greater or less than?
+fscc_ogl_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ogl_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# unordered or equal:
+#
+# NANvZ
+#
+fscc_ueq:
+ fbueq.w fscc_ueq_yes # unordered or equal?
+fscc_ueq_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_ueq_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# ordered:
+# ___
+# NAN
+#
+fscc_or:
+ fbor.w fscc_or_yes # ordered?
+fscc_or_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_or_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#
+# unordered:
+#
+# NAN
+#
+fscc_un:
+ fbun.w fscc_un_yes # unordered?
+fscc_un_no:
+ clr.b %d0 # set false
+ bra.w fscc_done # go finish
+fscc_un_yes:
+ st %d0 # set true
+ bra.w fscc_done # go finish
+
+#######################################################################
+
+#
+# the bsun exception bit was set. now, check to see is BSUN
+# is enabled. if so, don't store result and correct stack frame
+# for a bsun exception.
+#
+fscc_chk_bsun:
+ btst &bsun_bit,FPCR_ENABLE(%a6) # was BSUN set?
+ bne.w fscc_bsun
+
+#
+# the bsun exception bit was not set.
+# the result has been selected.
+# now, check to see if the result is to be stored in the data register
+# file or in memory.
+#
+fscc_done:
+ mov.l %d0,%a0 # save result for a moment
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # fetch lo opword
+ mov.l %d1,%d0 # make a copy
+ andi.b &0x38,%d1 # extract src mode
+
+ bne.b fscc_mem_op # it's a memory operation
+
+ mov.l %d0,%d1
+ andi.w &0x7,%d1 # pass index in d1
+ mov.l %a0,%d0 # pass result in d0
+ bsr.l store_dreg_b # save result in regfile
+ rts
+
+#
+# the stacked <ea> is correct with the exception of:
+# -> Dn : <ea> is garbage
+#
+# if the addressing mode is post-increment or pre-decrement,
+# then the address registers have not been updated.
+#
+fscc_mem_op:
+ cmpi.b %d1,&0x18 # is <ea> (An)+ ?
+ beq.b fscc_mem_inc # yes
+ cmpi.b %d1,&0x20 # is <ea> -(An) ?
+ beq.b fscc_mem_dec # yes
+
+ mov.l %a0,%d0 # pass result in d0
+ mov.l EXC_EA(%a6),%a0 # fetch <ea>
+ bsr.l _dmem_write_byte # write result byte
+
+ tst.l %d1 # did dstore fail?
+ bne.w fscc_err # yes
+
+ rts
+
+# addresing mode is post-increment. write the result byte. if the write
+# fails then don't update the address register. if write passes then
+# call inc_areg() to update the address register.
+fscc_mem_inc:
+ mov.l %a0,%d0 # pass result in d0
+ mov.l EXC_EA(%a6),%a0 # fetch <ea>
+ bsr.l _dmem_write_byte # write result byte
+
+ tst.l %d1 # did dstore fail?
+ bne.w fscc_err # yes
+
+ mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
+ andi.w &0x7,%d1 # pass index in d1
+ movq.l &0x1,%d0 # pass amt to inc by
+ bsr.l inc_areg # increment address register
+
+ rts
+
+# addressing mode is pre-decrement. write the result byte. if the write
+# fails then don't update the address register. if the write passes then
+# call dec_areg() to update the address register.
+fscc_mem_dec:
+ mov.l %a0,%d0 # pass result in d0
+ mov.l EXC_EA(%a6),%a0 # fetch <ea>
+ bsr.l _dmem_write_byte # write result byte
+
+ tst.l %d1 # did dstore fail?
+ bne.w fscc_err # yes
+
+ mov.b 0x1+EXC_OPWORD(%a6),%d1 # fetch opword
+ andi.w &0x7,%d1 # pass index in d1
+ movq.l &0x1,%d0 # pass amt to dec by
+ bsr.l dec_areg # decrement address register
+
+ rts
+
+# the emulation routine set bsun and BSUN was enabled. have to
+# fix stack and jump to the bsun handler.
+# let the caller of this routine shift the stack frame up to
+# eliminate the effective address field.
+fscc_bsun:
+ mov.b &fbsun_flg,SPCOND_FLG(%a6)
+ rts
+
+# the byte write to memory has failed. pass the failing effective address
+# and a FSLW to funimp_dacc().
+fscc_err:
+ mov.w &0x00a1,EXC_VOFF(%a6)
+ bra.l facc_finish
+
+#########################################################################
+# XDEF **************************************************************** #
+# fmovm_dynamic(): emulate "fmovm" dynamic instruction #
+# #
+# XREF **************************************************************** #
+# fetch_dreg() - fetch data register #
+# {i,d,}mem_read() - fetch data from memory #
+# _mem_write() - write data to memory #
+# iea_iacc() - instruction memory access error occurred #
+# iea_dacc() - data memory access error occurred #
+# restore() - restore An index regs if access error occurred #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# If instr is "fmovm Dn,-(A7)" from supervisor mode, #
+# d0 = size of dump #
+# d1 = Dn #
+# Else if instruction access error, #
+# d0 = FSLW #
+# Else if data access error, #
+# d0 = FSLW #
+# a0 = address of fault #
+# Else #
+# none. #
+# #
+# ALGORITHM *********************************************************** #
+# The effective address must be calculated since this is entered #
+# from an "Unimplemented Effective Address" exception handler. So, we #
+# have our own fcalc_ea() routine here. If an access error is flagged #
+# by a _{i,d,}mem_read() call, we must exit through the special #
+# handler. #
+# The data register is determined and its value loaded to get the #
+# string of FP registers affected. This value is used as an index into #
+# a lookup table such that we can determine the number of bytes #
+# involved. #
+# If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used #
+# to read in all FP values. Again, _mem_read() may fail and require a #
+# special exit. #
+# If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used #
+# to write all FP values. _mem_write() may also fail. #
+# If the instruction is "fmovm.x DN,-(a7)" from supervisor mode, #
+# then we return the size of the dump and the string to the caller #
+# so that the move can occur outside of this routine. This special #
+# case is required so that moves to the system stack are handled #
+# correctly. #
+# #
+# DYNAMIC: #
+# fmovm.x dn, <ea> #
+# fmovm.x <ea>, dn #
+# #
+# <WORD 1> <WORD2> #
+# 1111 0010 00 |<ea>| 11@& 1000 0$$$ 0000 #
+# #
+# & = (0): predecrement addressing mode #
+# (1): postincrement or control addressing mode #
+# @ = (0): move listed regs from memory to the FPU #
+# (1): move listed regs from the FPU to memory #
+# $$$ : index of data register holding reg select mask #
+# #
+# NOTES: #
+# If the data register holds a zero, then the #
+# instruction is a nop. #
+# #
+#########################################################################
+
+ global fmovm_dynamic
+fmovm_dynamic:
+
+# extract the data register in which the bit string resides...
+ mov.b 1+EXC_EXTWORD(%a6),%d1 # fetch extword
+ andi.w &0x70,%d1 # extract reg bits
+ lsr.b &0x4,%d1 # shift into lo bits
+
+# fetch the bit string into d0...
+ bsr.l fetch_dreg # fetch reg string
+
+ andi.l &0x000000ff,%d0 # keep only lo byte
+
+ mov.l %d0,-(%sp) # save strg
+ mov.b (tbl_fmovm_size.w,%pc,%d0),%d0
+ mov.l %d0,-(%sp) # save size
+ bsr.l fmovm_calc_ea # calculate <ea>
+ mov.l (%sp)+,%d0 # restore size
+ mov.l (%sp)+,%d1 # restore strg
+
+# if the bit string is a zero, then the operation is a no-op
+# but, make sure that we've calculated ea and advanced the opword pointer
+ beq.w fmovm_data_done
+
+# separate move ins from move outs...
+ btst &0x5,EXC_EXTWORD(%a6) # is it a move in or out?
+ beq.w fmovm_data_in # it's a move out
+
+#############
+# MOVE OUT: #
+#############
+fmovm_data_out:
+ btst &0x4,EXC_EXTWORD(%a6) # control or predecrement?
+ bne.w fmovm_out_ctrl # control
+
+############################
+fmovm_out_predec:
+# for predecrement mode, the bit string is the opposite of both control
+# operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
+# here, we convert it to be just like the others...
+ mov.b (tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor mode?
+ beq.b fmovm_out_ctrl # user
+
+fmovm_out_predec_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+ bne.b fmovm_out_ctrl
+
+# the operation was unfortunately an: fmovm.x dn,-(sp)
+# called from supervisor mode.
+# we're also passing "size" and "strg" back to the calling routine
+ rts
+
+############################
+fmovm_out_ctrl:
+ mov.l %a0,%a1 # move <ea> to a1
+
+ sub.l %d0,%sp # subtract size of dump
+ lea (%sp),%a0
+
+ tst.b %d1 # should FP0 be moved?
+ bpl.b fmovm_out_ctrl_fp1 # no
+
+ mov.l 0x0+EXC_FP0(%a6),(%a0)+ # yes
+ mov.l 0x4+EXC_FP0(%a6),(%a0)+
+ mov.l 0x8+EXC_FP0(%a6),(%a0)+
+
+fmovm_out_ctrl_fp1:
+ lsl.b &0x1,%d1 # should FP1 be moved?
+ bpl.b fmovm_out_ctrl_fp2 # no
+
+ mov.l 0x0+EXC_FP1(%a6),(%a0)+ # yes
+ mov.l 0x4+EXC_FP1(%a6),(%a0)+
+ mov.l 0x8+EXC_FP1(%a6),(%a0)+
+
+fmovm_out_ctrl_fp2:
+ lsl.b &0x1,%d1 # should FP2 be moved?
+ bpl.b fmovm_out_ctrl_fp3 # no
+
+ fmovm.x &0x20,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp3:
+ lsl.b &0x1,%d1 # should FP3 be moved?
+ bpl.b fmovm_out_ctrl_fp4 # no
+
+ fmovm.x &0x10,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp4:
+ lsl.b &0x1,%d1 # should FP4 be moved?
+ bpl.b fmovm_out_ctrl_fp5 # no
+
+ fmovm.x &0x08,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp5:
+ lsl.b &0x1,%d1 # should FP5 be moved?
+ bpl.b fmovm_out_ctrl_fp6 # no
+
+ fmovm.x &0x04,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp6:
+ lsl.b &0x1,%d1 # should FP6 be moved?
+ bpl.b fmovm_out_ctrl_fp7 # no
+
+ fmovm.x &0x02,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp7:
+ lsl.b &0x1,%d1 # should FP7 be moved?
+ bpl.b fmovm_out_ctrl_done # no
+
+ fmovm.x &0x01,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_done:
+ mov.l %a1,L_SCR1(%a6)
+
+ lea (%sp),%a0 # pass: supervisor src
+ mov.l %d0,-(%sp) # save size
+ bsr.l _dmem_write # copy data to user mem
+
+ mov.l (%sp)+,%d0
+ add.l %d0,%sp # clear fpreg data from stack
+
+ tst.l %d1 # did dstore err?
+ bne.w fmovm_out_err # yes
+
+ rts
+
+############
+# MOVE IN: #
+############
+fmovm_data_in:
+ mov.l %a0,L_SCR1(%a6)
+
+ sub.l %d0,%sp # make room for fpregs
+ lea (%sp),%a1
+
+ mov.l %d1,-(%sp) # save bit string for later
+ mov.l %d0,-(%sp) # save # of bytes
+
+ bsr.l _dmem_read # copy data from user mem
+
+ mov.l (%sp)+,%d0 # retrieve # of bytes
+
+ tst.l %d1 # did dfetch fail?
+ bne.w fmovm_in_err # yes
+
+ mov.l (%sp)+,%d1 # load bit string
+
+ lea (%sp),%a0 # addr of stack
+
+ tst.b %d1 # should FP0 be moved?
+ bpl.b fmovm_data_in_fp1 # no
+
+ mov.l (%a0)+,0x0+EXC_FP0(%a6) # yes
+ mov.l (%a0)+,0x4+EXC_FP0(%a6)
+ mov.l (%a0)+,0x8+EXC_FP0(%a6)
+
+fmovm_data_in_fp1:
+ lsl.b &0x1,%d1 # should FP1 be moved?
+ bpl.b fmovm_data_in_fp2 # no
+
+ mov.l (%a0)+,0x0+EXC_FP1(%a6) # yes
+ mov.l (%a0)+,0x4+EXC_FP1(%a6)
+ mov.l (%a0)+,0x8+EXC_FP1(%a6)
+
+fmovm_data_in_fp2:
+ lsl.b &0x1,%d1 # should FP2 be moved?
+ bpl.b fmovm_data_in_fp3 # no
+
+ fmovm.x (%a0)+,&0x20 # yes
+
+fmovm_data_in_fp3:
+ lsl.b &0x1,%d1 # should FP3 be moved?
+ bpl.b fmovm_data_in_fp4 # no
+
+ fmovm.x (%a0)+,&0x10 # yes
+
+fmovm_data_in_fp4:
+ lsl.b &0x1,%d1 # should FP4 be moved?
+ bpl.b fmovm_data_in_fp5 # no
+
+ fmovm.x (%a0)+,&0x08 # yes
+
+fmovm_data_in_fp5:
+ lsl.b &0x1,%d1 # should FP5 be moved?
+ bpl.b fmovm_data_in_fp6 # no
+
+ fmovm.x (%a0)+,&0x04 # yes
+
+fmovm_data_in_fp6:
+ lsl.b &0x1,%d1 # should FP6 be moved?
+ bpl.b fmovm_data_in_fp7 # no
+
+ fmovm.x (%a0)+,&0x02 # yes
+
+fmovm_data_in_fp7:
+ lsl.b &0x1,%d1 # should FP7 be moved?
+ bpl.b fmovm_data_in_done # no
+
+ fmovm.x (%a0)+,&0x01 # yes
+
+fmovm_data_in_done:
+ add.l %d0,%sp # remove fpregs from stack
+ rts
+
+#####################################
+
+fmovm_data_done:
+ rts
+
+##############################################################################
+
+#
+# table indexed by the operation's bit string that gives the number
+# of bytes that will be moved.
+#
+# number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
+#
+tbl_fmovm_size:
+ byte 0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
+
+#
+# table to convert a pre-decrement bit string into a post-increment
+# or control bit string.
+# ex: 0x00 ==> 0x00
+# 0x01 ==> 0x80
+# 0x02 ==> 0x40
+# .
+# .
+# 0xfd ==> 0xbf
+# 0xfe ==> 0x7f
+# 0xff ==> 0xff
+#
+tbl_fmovm_convert:
+ byte 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
+ byte 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
+ byte 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
+ byte 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
+ byte 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
+ byte 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
+ byte 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
+ byte 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
+ byte 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
+ byte 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
+ byte 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
+ byte 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
+ byte 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
+ byte 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
+ byte 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
+ byte 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
+ byte 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
+ byte 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
+ byte 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
+ byte 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
+ byte 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
+ byte 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
+ byte 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
+ byte 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
+ byte 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
+ byte 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
+ byte 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
+ byte 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
+ byte 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
+ byte 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
+ byte 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
+ byte 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
+
+ global fmovm_calc_ea
+###############################################
+# _fmovm_calc_ea: calculate effective address #
+###############################################
+fmovm_calc_ea:
+ mov.l %d0,%a0 # move # bytes to a0
+
+# currently, MODE and REG are taken from the EXC_OPWORD. this could be
+# easily changed if they were inputs passed in registers.
+ mov.w EXC_OPWORD(%a6),%d0 # fetch opcode word
+ mov.w %d0,%d1 # make a copy
+
+ andi.w &0x3f,%d0 # extract mode field
+ andi.l &0x7,%d1 # extract reg field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+ mov.w (tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
+ jmp (tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+ swbeg &64
+tbl_fea_mode:
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+
+ short faddr_ind_a0 - tbl_fea_mode
+ short faddr_ind_a1 - tbl_fea_mode
+ short faddr_ind_a2 - tbl_fea_mode
+ short faddr_ind_a3 - tbl_fea_mode
+ short faddr_ind_a4 - tbl_fea_mode
+ short faddr_ind_a5 - tbl_fea_mode
+ short faddr_ind_a6 - tbl_fea_mode
+ short faddr_ind_a7 - tbl_fea_mode
+
+ short faddr_ind_p_a0 - tbl_fea_mode
+ short faddr_ind_p_a1 - tbl_fea_mode
+ short faddr_ind_p_a2 - tbl_fea_mode
+ short faddr_ind_p_a3 - tbl_fea_mode
+ short faddr_ind_p_a4 - tbl_fea_mode
+ short faddr_ind_p_a5 - tbl_fea_mode
+ short faddr_ind_p_a6 - tbl_fea_mode
+ short faddr_ind_p_a7 - tbl_fea_mode
+
+ short faddr_ind_m_a0 - tbl_fea_mode
+ short faddr_ind_m_a1 - tbl_fea_mode
+ short faddr_ind_m_a2 - tbl_fea_mode
+ short faddr_ind_m_a3 - tbl_fea_mode
+ short faddr_ind_m_a4 - tbl_fea_mode
+ short faddr_ind_m_a5 - tbl_fea_mode
+ short faddr_ind_m_a6 - tbl_fea_mode
+ short faddr_ind_m_a7 - tbl_fea_mode
+
+ short faddr_ind_disp_a0 - tbl_fea_mode
+ short faddr_ind_disp_a1 - tbl_fea_mode
+ short faddr_ind_disp_a2 - tbl_fea_mode
+ short faddr_ind_disp_a3 - tbl_fea_mode
+ short faddr_ind_disp_a4 - tbl_fea_mode
+ short faddr_ind_disp_a5 - tbl_fea_mode
+ short faddr_ind_disp_a6 - tbl_fea_mode
+ short faddr_ind_disp_a7 - tbl_fea_mode
+
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+
+ short fabs_short - tbl_fea_mode
+ short fabs_long - tbl_fea_mode
+ short fpc_ind - tbl_fea_mode
+ short fpc_ind_ext - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+faddr_ind_a0:
+ mov.l EXC_DREGS+0x8(%a6),%a0 # Get current a0
+ rts
+
+faddr_ind_a1:
+ mov.l EXC_DREGS+0xc(%a6),%a0 # Get current a1
+ rts
+
+faddr_ind_a2:
+ mov.l %a2,%a0 # Get current a2
+ rts
+
+faddr_ind_a3:
+ mov.l %a3,%a0 # Get current a3
+ rts
+
+faddr_ind_a4:
+ mov.l %a4,%a0 # Get current a4
+ rts
+
+faddr_ind_a5:
+ mov.l %a5,%a0 # Get current a5
+ rts
+
+faddr_ind_a6:
+ mov.l (%a6),%a0 # Get current a6
+ rts
+
+faddr_ind_a7:
+ mov.l EXC_A7(%a6),%a0 # Get current a7
+ rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+faddr_ind_p_a0:
+ mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a1:
+ mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a2:
+ mov.l %a2,%d0 # Get current a2
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,%a2 # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a3:
+ mov.l %a3,%d0 # Get current a3
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,%a3 # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a4:
+ mov.l %a4,%d0 # Get current a4
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,%a4 # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a5:
+ mov.l %a5,%d0 # Get current a5
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,%a5 # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a6:
+ mov.l (%a6),%d0 # Get current a6
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,(%a6) # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a7:
+ mov.b &mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+ mov.l EXC_A7(%a6),%d0 # Get current a7
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,EXC_A7(%a6) # Save incr value
+ mov.l %d0,%a0
+ rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+faddr_ind_m_a0:
+ mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_DREGS+0x8(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a1:
+ mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_DREGS+0xc(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a2:
+ mov.l %a2,%d0 # Get current a2
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,%a2 # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a3:
+ mov.l %a3,%d0 # Get current a3
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,%a3 # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a4:
+ mov.l %a4,%d0 # Get current a4
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,%a4 # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a5:
+ mov.l %a5,%d0 # Get current a5
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,%a5 # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a6:
+ mov.l (%a6),%d0 # Get current a6
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a7:
+ mov.b &mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+ mov.l EXC_A7(%a6),%d0 # Get current a7
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A7(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+faddr_ind_disp_a0:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_DREGS+0x8(%a6),%a0 # a0 + d16
+ rts
+
+faddr_ind_disp_a1:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_DREGS+0xc(%a6),%a0 # a1 + d16
+ rts
+
+faddr_ind_disp_a2:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l %a2,%a0 # a2 + d16
+ rts
+
+faddr_ind_disp_a3:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l %a3,%a0 # a3 + d16
+ rts
+
+faddr_ind_disp_a4:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l %a4,%a0 # a4 + d16
+ rts
+
+faddr_ind_disp_a5:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l %a5,%a0 # a5 + d16
+ rts
+
+faddr_ind_disp_a6:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l (%a6),%a0 # a6 + d16
+ rts
+
+faddr_ind_disp_a7:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_A7(%a6),%a0 # a7 + d16
+ rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
+# " " " w/ " (base displacement): (bd, An, Xn) #
+# Memory indirect postindexed: ([bd, An], Xn, od) #
+# Memory indirect preindexed: ([bd, An, Xn], od) #
+########################################################################
+faddr_ind_ext:
+ addq.l &0x8,%d1
+ bsr.l fetch_dreg # fetch base areg
+ mov.l %d0,-(%sp)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch extword in d0
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l (%sp)+,%a0
+
+ btst &0x8,%d0
+ bne.w fcalc_mem_ind
+
+ mov.l %d0,L_SCR1(%a6) # hold opword
+
+ mov.l %d0,%d1
+ rol.w &0x4,%d1
+ andi.w &0xf,%d1 # extract index regno
+
+# count on fetch_dreg() not to alter a0...
+ bsr.l fetch_dreg # fetch index
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l L_SCR1(%a6),%d2 # fetch opword
+
+ btst &0xb,%d2 # is it word or long?
+ bne.b faii8_long
+ ext.l %d0 # sign extend word index
+faii8_long:
+ mov.l %d2,%d1
+ rol.w &0x7,%d1
+ andi.l &0x3,%d1 # extract scale value
+
+ lsl.l %d1,%d0 # shift index by scale
+
+ extb.l %d2 # sign extend displacement
+ add.l %d2,%d0 # index + disp
+ add.l %d0,%a0 # An + (index + disp)
+
+ mov.l (%sp)+,%d2 # restore old d2
+ rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+fabs_short:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch short address
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # return <ea> in a0
+ rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+fabs_long:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch long address
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,%a0 # return <ea> in a0
+ rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+fpc_ind:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch word displacement
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_EXTWPTR(%a6),%a0 # pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+ subq.l &0x2,%a0 # adjust <ea>
+ rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# " " w/ " (base displacement): (bd, PC, An) #
+# PC memory indirect postindexed: ([bd, PC], Xn, od) #
+# PC memory indirect preindexed: ([bd, PC, Xn], od) #
+##########################################################
+fpc_ind_ext:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch ext word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # put base in a0
+ subq.l &0x2,%a0 # adjust base
+
+ btst &0x8,%d0 # is disp only 8 bits?
+ bne.w fcalc_mem_ind # calc memory indirect
+
+ mov.l %d0,L_SCR1(%a6) # store opword
+
+ mov.l %d0,%d1 # make extword copy
+ rol.w &0x4,%d1 # rotate reg num into place
+ andi.w &0xf,%d1 # extract register number
+
+# count on fetch_dreg() not to alter a0...
+ bsr.l fetch_dreg # fetch index
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l L_SCR1(%a6),%d2 # fetch opword
+
+ btst &0xb,%d2 # is index word or long?
+ bne.b fpii8_long # long
+ ext.l %d0 # sign extend word index
+fpii8_long:
+ mov.l %d2,%d1
+ rol.w &0x7,%d1 # rotate scale value into place
+ andi.l &0x3,%d1 # extract scale value
+
+ lsl.l %d1,%d0 # shift index by scale
+
+ extb.l %d2 # sign extend displacement
+ add.l %d2,%d0 # disp + index
+ add.l %d0,%a0 # An + (index + disp)
+
+ mov.l (%sp)+,%d2 # restore temp register
+ rts
+
+# d2 = index
+# d3 = base
+# d4 = od
+# d5 = extword
+fcalc_mem_ind:
+ btst &0x6,%d0 # is the index suppressed?
+ beq.b fcalc_index
+
+ movm.l &0x3c00,-(%sp) # save d2-d5
+
+ mov.l %d0,%d5 # put extword in d5
+ mov.l %a0,%d3 # put base in d3
+
+ clr.l %d2 # yes, so index = 0
+ bra.b fbase_supp_ck
+
+# index:
+fcalc_index:
+ mov.l %d0,L_SCR1(%a6) # save d0 (opword)
+ bfextu %d0{&16:&4},%d1 # fetch dreg index
+ bsr.l fetch_dreg
+
+ movm.l &0x3c00,-(%sp) # save d2-d5
+ mov.l %d0,%d2 # put index in d2
+ mov.l L_SCR1(%a6),%d5
+ mov.l %a0,%d3
+
+ btst &0xb,%d5 # is index word or long?
+ bne.b fno_ext
+ ext.l %d2
+
+fno_ext:
+ bfextu %d5{&21:&2},%d0
+ lsl.l %d0,%d2
+
+# base address (passed as parameter in d3):
+# we clear the value here if it should actually be suppressed.
+fbase_supp_ck:
+ btst &0x7,%d5 # is the bd suppressed?
+ beq.b fno_base_sup
+ clr.l %d3
+
+# base displacement:
+fno_base_sup:
+ bfextu %d5{&26:&2},%d0 # get bd size
+# beq.l fmovm_error # if (size == 0) it's reserved
+
+ cmpi.b %d0,&0x2
+ blt.b fno_bd
+ beq.b fget_word_bd
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long
+
+ tst.l %d1 # did ifetch fail?
+ bne.l fcea_iacc # yes
+
+ bra.b fchk_ind
+
+fget_word_bd:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l fcea_iacc # yes
+
+ ext.l %d0 # sign extend bd
+
+fchk_ind:
+ add.l %d0,%d3 # base += bd
+
+# outer displacement:
+fno_bd:
+ bfextu %d5{&30:&2},%d0 # is od suppressed?
+ beq.w faii_bd
+
+ cmpi.b %d0,&0x2
+ blt.b fnull_od
+ beq.b fword_od
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long
+
+ tst.l %d1 # did ifetch fail?
+ bne.l fcea_iacc # yes
+
+ bra.b fadd_them
+
+fword_od:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l fcea_iacc # yes
+
+ ext.l %d0 # sign extend od
+ bra.b fadd_them
+
+fnull_od:
+ clr.l %d0
+
+fadd_them:
+ mov.l %d0,%d4
+
+ btst &0x2,%d5 # pre or post indexing?
+ beq.b fpre_indexed
+
+ mov.l %d3,%a0
+ bsr.l _dmem_read_long
+
+ tst.l %d1 # did dfetch fail?
+ bne.w fcea_err # yes
+
+ add.l %d2,%d0 # <ea> += index
+ add.l %d4,%d0 # <ea> += od
+ bra.b fdone_ea
+
+fpre_indexed:
+ add.l %d2,%d3 # preindexing
+ mov.l %d3,%a0
+ bsr.l _dmem_read_long
+
+ tst.l %d1 # did dfetch fail?
+ bne.w fcea_err # yes
+
+ add.l %d4,%d0 # ea += od
+ bra.b fdone_ea
+
+faii_bd:
+ add.l %d2,%d3 # ea = (base + bd) + index
+ mov.l %d3,%d0
+fdone_ea:
+ mov.l %d0,%a0
+
+ movm.l (%sp)+,&0x003c # restore d2-d5
+ rts
+
+#########################################################
+fcea_err:
+ mov.l %d3,%a0
+
+ movm.l (%sp)+,&0x003c # restore d2-d5
+ mov.w &0x0101,%d0
+ bra.l iea_dacc
+
+fcea_iacc:
+ movm.l (%sp)+,&0x003c # restore d2-d5
+ bra.l iea_iacc
+
+fmovm_out_err:
+ bsr.l restore
+ mov.w &0x00e1,%d0
+ bra.b fmovm_err
+
+fmovm_in_err:
+ bsr.l restore
+ mov.w &0x0161,%d0
+
+fmovm_err:
+ mov.l L_SCR1(%a6),%a0
+ bra.l iea_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+# fmovm_ctrl(): emulate fmovm.l of control registers instr #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read longword from memory #
+# iea_iacc() - _imem_read_long() failed; error recovery #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# If _imem_read_long() doesn't fail: #
+# USER_FPCR(a6) = new FPCR value #
+# USER_FPSR(a6) = new FPSR value #
+# USER_FPIAR(a6) = new FPIAR value #
+# #
+# ALGORITHM *********************************************************** #
+# Decode the instruction type by looking at the extension word #
+# in order to see how many control registers to fetch from memory. #
+# Fetch them using _imem_read_long(). If this fetch fails, exit through #
+# the special access error exit handler iea_iacc(). #
+# #
+# Instruction word decoding: #
+# #
+# fmovem.l #<data>, {FPIAR&|FPCR&|FPSR} #
+# #
+# WORD1 WORD2 #
+# 1111 0010 00 111100 100$ $$00 0000 0000 #
+# #
+# $$$ (100): FPCR #
+# (010): FPSR #
+# (001): FPIAR #
+# (000): FPIAR #
+# #
+#########################################################################
+
+ global fmovm_ctrl
+fmovm_ctrl:
+ mov.b EXC_EXTWORD(%a6),%d0 # fetch reg select bits
+ cmpi.b %d0,&0x9c # fpcr & fpsr & fpiar ?
+ beq.w fctrl_in_7 # yes
+ cmpi.b %d0,&0x98 # fpcr & fpsr ?
+ beq.w fctrl_in_6 # yes
+ cmpi.b %d0,&0x94 # fpcr & fpiar ?
+ beq.b fctrl_in_5 # yes
+
+# fmovem.l #<data>, fpsr/fpiar
+fctrl_in_3:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPSR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPSR(%a6) # store new FPSR to stack
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPIAR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
+ rts
+
+# fmovem.l #<data>, fpcr/fpiar
+fctrl_in_5:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPCR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPCR(%a6) # store new FPCR to stack
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPIAR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
+ rts
+
+# fmovem.l #<data>, fpcr/fpsr
+fctrl_in_6:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPCR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPSR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
+ rts
+
+# fmovem.l #<data>, fpcr/fpsr/fpiar
+fctrl_in_7:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPCR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPSR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPIAR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to mem
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _dcalc_ea(): calc correct <ea> from <ea> stacked on exception #
+# #
+# XREF **************************************************************** #
+# inc_areg() - increment an address register #
+# dec_areg() - decrement an address register #
+# #
+# INPUT *************************************************************** #
+# d0 = number of bytes to adjust <ea> by #
+# #
+# OUTPUT ************************************************************** #
+# None #
+# #
+# ALGORITHM *********************************************************** #
+# "Dummy" CALCulate Effective Address: #
+# The stacked <ea> for FP unimplemented instructions and opclass #
+# two packed instructions is correct with the exception of... #
+# #
+# 1) -(An) : The register is not updated regardless of size. #
+# Also, for extended precision and packed, the #
+# stacked <ea> value is 8 bytes too big #
+# 2) (An)+ : The register is not updated. #
+# 3) #<data> : The upper longword of the immediate operand is #
+# stacked b,w,l and s sizes are completely stacked. #
+# d,x, and p are not. #
+# #
+#########################################################################
+
+ global _dcalc_ea
+_dcalc_ea:
+ mov.l %d0, %a0 # move # bytes to %a0
+
+ mov.b 1+EXC_OPWORD(%a6), %d0 # fetch opcode word
+ mov.l %d0, %d1 # make a copy
+
+ andi.w &0x38, %d0 # extract mode field
+ andi.l &0x7, %d1 # extract reg field
+
+ cmpi.b %d0,&0x18 # is mode (An)+ ?
+ beq.b dcea_pi # yes
+
+ cmpi.b %d0,&0x20 # is mode -(An) ?
+ beq.b dcea_pd # yes
+
+ or.w %d1,%d0 # concat mode,reg
+ cmpi.b %d0,&0x3c # is mode #<data>?
+
+ beq.b dcea_imm # yes
+
+ mov.l EXC_EA(%a6),%a0 # return <ea>
+ rts
+
+# need to set immediate data flag here since we'll need to do
+# an imem_read to fetch this later.
+dcea_imm:
+ mov.b &immed_flg,SPCOND_FLG(%a6)
+ lea ([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
+ rts
+
+# here, the <ea> is stacked correctly. however, we must update the
+# address register...
+dcea_pi:
+ mov.l %a0,%d0 # pass amt to inc by
+ bsr.l inc_areg # inc addr register
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ rts
+
+# the <ea> is stacked correctly for all but extended and packed which
+# the <ea>s are 8 bytes too large.
+# it would make no sense to have a pre-decrement to a7 in supervisor
+# mode so we don't even worry about this tricky case here : )
+dcea_pd:
+ mov.l %a0,%d0 # pass amt to dec by
+ bsr.l dec_areg # dec addr register
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+
+ cmpi.b %d0,&0xc # is opsize ext or packed?
+ beq.b dcea_pd2 # yes
+ rts
+dcea_pd2:
+ sub.l &0x8,%a0 # correct <ea>
+ mov.l %a0,EXC_EA(%a6) # put correct <ea> on stack
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _calc_ea_fout(): calculate correct stacked <ea> for extended #
+# and packed data opclass 3 operations. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# a0 = return correct effective address #
+# #
+# ALGORITHM *********************************************************** #
+# For opclass 3 extended and packed data operations, the <ea> #
+# stacked for the exception is incorrect for -(an) and (an)+ addressing #
+# modes. Also, while we're at it, the index register itself must get #
+# updated. #
+# So, for -(an), we must subtract 8 off of the stacked <ea> value #
+# and return that value as the correct <ea> and store that value in An. #
+# For (an)+, the stacked <ea> is correct but we must adjust An by +12. #
+# #
+#########################################################################
+
+# This calc_ea is currently used to retrieve the correct <ea>
+# for fmove outs of type extended and packed.
+ global _calc_ea_fout
+_calc_ea_fout:
+ mov.b 1+EXC_OPWORD(%a6),%d0 # fetch opcode word
+ mov.l %d0,%d1 # make a copy
+
+ andi.w &0x38,%d0 # extract mode field
+ andi.l &0x7,%d1 # extract reg field
+
+ cmpi.b %d0,&0x18 # is mode (An)+ ?
+ beq.b ceaf_pi # yes
+
+ cmpi.b %d0,&0x20 # is mode -(An) ?
+ beq.w ceaf_pd # yes
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ rts
+
+# (An)+ : extended and packed fmove out
+# : stacked <ea> is correct
+# : "An" not updated
+ceaf_pi:
+ mov.w (tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
+ mov.l EXC_EA(%a6),%a0
+ jmp (tbl_ceaf_pi.b,%pc,%d1.w*1)
+
+ swbeg &0x8
+tbl_ceaf_pi:
+ short ceaf_pi0 - tbl_ceaf_pi
+ short ceaf_pi1 - tbl_ceaf_pi
+ short ceaf_pi2 - tbl_ceaf_pi
+ short ceaf_pi3 - tbl_ceaf_pi
+ short ceaf_pi4 - tbl_ceaf_pi
+ short ceaf_pi5 - tbl_ceaf_pi
+ short ceaf_pi6 - tbl_ceaf_pi
+ short ceaf_pi7 - tbl_ceaf_pi
+
+ceaf_pi0:
+ addi.l &0xc,EXC_DREGS+0x8(%a6)
+ rts
+ceaf_pi1:
+ addi.l &0xc,EXC_DREGS+0xc(%a6)
+ rts
+ceaf_pi2:
+ add.l &0xc,%a2
+ rts
+ceaf_pi3:
+ add.l &0xc,%a3
+ rts
+ceaf_pi4:
+ add.l &0xc,%a4
+ rts
+ceaf_pi5:
+ add.l &0xc,%a5
+ rts
+ceaf_pi6:
+ addi.l &0xc,EXC_A6(%a6)
+ rts
+ceaf_pi7:
+ mov.b &mia7_flg,SPCOND_FLG(%a6)
+ addi.l &0xc,EXC_A7(%a6)
+ rts
+
+# -(An) : extended and packed fmove out
+# : stacked <ea> = actual <ea> + 8
+# : "An" not updated
+ceaf_pd:
+ mov.w (tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
+ mov.l EXC_EA(%a6),%a0
+ sub.l &0x8,%a0
+ sub.l &0x8,EXC_EA(%a6)
+ jmp (tbl_ceaf_pd.b,%pc,%d1.w*1)
+
+ swbeg &0x8
+tbl_ceaf_pd:
+ short ceaf_pd0 - tbl_ceaf_pd
+ short ceaf_pd1 - tbl_ceaf_pd
+ short ceaf_pd2 - tbl_ceaf_pd
+ short ceaf_pd3 - tbl_ceaf_pd
+ short ceaf_pd4 - tbl_ceaf_pd
+ short ceaf_pd5 - tbl_ceaf_pd
+ short ceaf_pd6 - tbl_ceaf_pd
+ short ceaf_pd7 - tbl_ceaf_pd
+
+ceaf_pd0:
+ mov.l %a0,EXC_DREGS+0x8(%a6)
+ rts
+ceaf_pd1:
+ mov.l %a0,EXC_DREGS+0xc(%a6)
+ rts
+ceaf_pd2:
+ mov.l %a0,%a2
+ rts
+ceaf_pd3:
+ mov.l %a0,%a3
+ rts
+ceaf_pd4:
+ mov.l %a0,%a4
+ rts
+ceaf_pd5:
+ mov.l %a0,%a5
+ rts
+ceaf_pd6:
+ mov.l %a0,EXC_A6(%a6)
+ rts
+ceaf_pd7:
+ mov.l %a0,EXC_A7(%a6)
+ mov.b &mda7_flg,SPCOND_FLG(%a6)
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _load_fop(): load operand for unimplemented FP exception #
+# #
+# XREF **************************************************************** #
+# set_tag_x() - determine ext prec optype tag #
+# set_tag_s() - determine sgl prec optype tag #
+# set_tag_d() - determine dbl prec optype tag #
+# unnorm_fix() - convert normalized number to denorm or zero #
+# norm() - normalize a denormalized number #
+# get_packed() - fetch a packed operand from memory #
+# _dcalc_ea() - calculate <ea>, fixing An in process #
+# #
+# _imem_read_{word,long}() - read from instruction memory #
+# _dmem_read() - read from data memory #
+# _dmem_read_{byte,word,long}() - read from data memory #
+# #
+# facc_in_{b,w,l,d,x}() - mem read failed; special exit point #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# If memory access doesn't fail: #
+# FP_SRC(a6) = source operand in extended precision #
+# FP_DST(a6) = destination operand in extended precision #
+# #
+# ALGORITHM *********************************************************** #
+# This is called from the Unimplemented FP exception handler in #
+# order to load the source and maybe destination operand into #
+# FP_SRC(a6) and FP_DST(a6). If the instruction was opclass zero, load #
+# the source and destination from the FP register file. Set the optype #
+# tags for both if dyadic, one for monadic. If a number is an UNNORM, #
+# convert it to a DENORM or a ZERO. #
+# If the instruction is opclass two (memory->reg), then fetch #
+# the destination from the register file and the source operand from #
+# memory. Tag and fix both as above w/ opclass zero instructions. #
+# If the source operand is byte,word,long, or single, it may be #
+# in the data register file. If it's actually out in memory, use one of #
+# the mem_read() routines to fetch it. If the mem_read() access returns #
+# a failing value, exit through the special facc_in() routine which #
+# will create an access error exception frame from the current exception #
+# frame. #
+# Immediate data and regular data accesses are separated because #
+# if an immediate data access fails, the resulting fault status #
+# longword stacked for the access error exception must have the #
+# instruction bit set. #
+# #
+#########################################################################
+
+ global _load_fop
+_load_fop:
+
+# 15 13 12 10 9 7 6 0
+# / \ / \ / \ / \
+# ---------------------------------
+# | opclass | RX | RY | EXTENSION | (2nd word of general FP instruction)
+# ---------------------------------
+#
+
+# bfextu EXC_CMDREG(%a6){&0:&3}, %d0 # extract opclass
+# cmpi.b %d0, &0x2 # which class is it? ('000,'010,'011)
+# beq.w op010 # handle <ea> -> fpn
+# bgt.w op011 # handle fpn -> <ea>
+
+# we're not using op011 for now...
+ btst &0x6,EXC_CMDREG(%a6)
+ bne.b op010
+
+############################
+# OPCLASS '000: reg -> reg #
+############################
+op000:
+ mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension word lo
+ btst &0x5,%d0 # testing extension bits
+ beq.b op000_src # (bit 5 == 0) => monadic
+ btst &0x4,%d0 # (bit 5 == 1)
+ beq.b op000_dst # (bit 4 == 0) => dyadic
+ and.w &0x007f,%d0 # extract extension bits {6:0}
+ cmpi.w %d0,&0x0038 # is it an fcmp (dyadic) ?
+ bne.b op000_src # it's an fcmp
+
+op000_dst:
+ bfextu EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
+ bsr.l load_fpn2 # fetch dst fpreg into FP_DST
+
+ bsr.l set_tag_x # get dst optype tag
+
+ cmpi.b %d0, &UNNORM # is dst fpreg an UNNORM?
+ beq.b op000_dst_unnorm # yes
+op000_dst_cont:
+ mov.b %d0, DTAG(%a6) # store the dst optype tag
+
+op000_src:
+ bfextu EXC_CMDREG(%a6){&3:&3}, %d0 # extract src field
+ bsr.l load_fpn1 # fetch src fpreg into FP_SRC
+
+ bsr.l set_tag_x # get src optype tag
+
+ cmpi.b %d0, &UNNORM # is src fpreg an UNNORM?
+ beq.b op000_src_unnorm # yes
+op000_src_cont:
+ mov.b %d0, STAG(%a6) # store the src optype tag
+ rts
+
+op000_dst_unnorm:
+ bsr.l unnorm_fix # fix the dst UNNORM
+ bra.b op000_dst_cont
+op000_src_unnorm:
+ bsr.l unnorm_fix # fix the src UNNORM
+ bra.b op000_src_cont
+
+#############################
+# OPCLASS '010: <ea> -> reg #
+#############################
+op010:
+ mov.w EXC_CMDREG(%a6),%d0 # fetch extension word
+ btst &0x5,%d0 # testing extension bits
+ beq.b op010_src # (bit 5 == 0) => monadic
+ btst &0x4,%d0 # (bit 5 == 1)
+ beq.b op010_dst # (bit 4 == 0) => dyadic
+ and.w &0x007f,%d0 # extract extension bits {6:0}
+ cmpi.w %d0,&0x0038 # is it an fcmp (dyadic) ?
+ bne.b op010_src # it's an fcmp
+
+op010_dst:
+ bfextu EXC_CMDREG(%a6){&6:&3}, %d0 # extract dst field
+ bsr.l load_fpn2 # fetch dst fpreg ptr
+
+ bsr.l set_tag_x # get dst type tag
+
+ cmpi.b %d0, &UNNORM # is dst fpreg an UNNORM?
+ beq.b op010_dst_unnorm # yes
+op010_dst_cont:
+ mov.b %d0, DTAG(%a6) # store the dst optype tag
+
+op010_src:
+ bfextu EXC_CMDREG(%a6){&3:&3}, %d0 # extract src type field
+
+ bfextu EXC_OPWORD(%a6){&10:&3}, %d1 # extract <ea> mode field
+ bne.w fetch_from_mem # src op is in memory
+
+op010_dreg:
+ clr.b STAG(%a6) # either NORM or ZERO
+ bfextu EXC_OPWORD(%a6){&13:&3}, %d1 # extract src reg field
+
+ mov.w (tbl_op010_dreg.b,%pc,%d0.w*2), %d0 # jmp based on optype
+ jmp (tbl_op010_dreg.b,%pc,%d0.w*1) # fetch src from dreg
+
+op010_dst_unnorm:
+ bsr.l unnorm_fix # fix the dst UNNORM
+ bra.b op010_dst_cont
+
+ swbeg &0x8
+tbl_op010_dreg:
+ short opd_long - tbl_op010_dreg
+ short opd_sgl - tbl_op010_dreg
+ short tbl_op010_dreg - tbl_op010_dreg
+ short tbl_op010_dreg - tbl_op010_dreg
+ short opd_word - tbl_op010_dreg
+ short tbl_op010_dreg - tbl_op010_dreg
+ short opd_byte - tbl_op010_dreg
+ short tbl_op010_dreg - tbl_op010_dreg
+
+#
+# LONG: can be either NORM or ZERO...
+#
+opd_long:
+ bsr.l fetch_dreg # fetch long in d0
+ fmov.l %d0, %fp0 # load a long
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+ fbeq.w opd_long_zero # long is a ZERO
+ rts
+opd_long_zero:
+ mov.b &ZERO, STAG(%a6) # set ZERO optype flag
+ rts
+
+#
+# WORD: can be either NORM or ZERO...
+#
+opd_word:
+ bsr.l fetch_dreg # fetch word in d0
+ fmov.w %d0, %fp0 # load a word
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+ fbeq.w opd_word_zero # WORD is a ZERO
+ rts
+opd_word_zero:
+ mov.b &ZERO, STAG(%a6) # set ZERO optype flag
+ rts
+
+#
+# BYTE: can be either NORM or ZERO...
+#
+opd_byte:
+ bsr.l fetch_dreg # fetch word in d0
+ fmov.b %d0, %fp0 # load a byte
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+ fbeq.w opd_byte_zero # byte is a ZERO
+ rts
+opd_byte_zero:
+ mov.b &ZERO, STAG(%a6) # set ZERO optype flag
+ rts
+
+#
+# SGL: can be either NORM, DENORM, ZERO, INF, QNAN or SNAN but not UNNORM
+#
+# separate SNANs and DENORMs so they can be loaded w/ special care.
+# all others can simply be moved "in" using fmove.
+#
+opd_sgl:
+ bsr.l fetch_dreg # fetch sgl in d0
+ mov.l %d0,L_SCR1(%a6)
+
+ lea L_SCR1(%a6), %a0 # pass: ptr to the sgl
+ bsr.l set_tag_s # determine sgl type
+ mov.b %d0, STAG(%a6) # save the src tag
+
+ cmpi.b %d0, &SNAN # is it an SNAN?
+ beq.w get_sgl_snan # yes
+
+ cmpi.b %d0, &DENORM # is it a DENORM?
+ beq.w get_sgl_denorm # yes
+
+ fmov.s (%a0), %fp0 # no, so can load it regular
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+ rts
+
+##############################################################################
+
+#########################################################################
+# fetch_from_mem(): #
+# - src is out in memory. must: #
+# (1) calc ea - must read AFTER you know the src type since #
+# if the ea is -() or ()+, need to know # of bytes. #
+# (2) read it in from either user or supervisor space #
+# (3) if (b || w || l) then simply read in #
+# if (s || d || x) then check for SNAN,UNNORM,DENORM #
+# if (packed) then punt for now #
+# INPUT: #
+# %d0 : src type field #
+#########################################################################
+fetch_from_mem:
+ clr.b STAG(%a6) # either NORM or ZERO
+
+ mov.w (tbl_fp_type.b,%pc,%d0.w*2), %d0 # index by src type field
+ jmp (tbl_fp_type.b,%pc,%d0.w*1)
+
+ swbeg &0x8
+tbl_fp_type:
+ short load_long - tbl_fp_type
+ short load_sgl - tbl_fp_type
+ short load_ext - tbl_fp_type
+ short load_packed - tbl_fp_type
+ short load_word - tbl_fp_type
+ short load_dbl - tbl_fp_type
+ short load_byte - tbl_fp_type
+ short tbl_fp_type - tbl_fp_type
+
+#########################################
+# load a LONG into %fp0: #
+# -number can't fault #
+# (1) calc ea #
+# (2) read 4 bytes into L_SCR1 #
+# (3) fmov.l into %fp0 #
+#########################################
+load_long:
+ movq.l &0x4, %d0 # pass: 4 (bytes)
+ bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
+
+ cmpi.b SPCOND_FLG(%a6),&immed_flg
+ beq.b load_long_immed
+
+ bsr.l _dmem_read_long # fetch src operand from memory
+
+ tst.l %d1 # did dfetch fail?
+ bne.l facc_in_l # yes
+
+load_long_cont:
+ fmov.l %d0, %fp0 # read into %fp0;convert to xprec
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+
+ fbeq.w load_long_zero # src op is a ZERO
+ rts
+load_long_zero:
+ mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
+ rts
+
+load_long_immed:
+ bsr.l _imem_read_long # fetch src operand immed data
+
+ tst.l %d1 # did ifetch fail?
+ bne.l funimp_iacc # yes
+ bra.b load_long_cont
+
+#########################################
+# load a WORD into %fp0: #
+# -number can't fault #
+# (1) calc ea #
+# (2) read 2 bytes into L_SCR1 #
+# (3) fmov.w into %fp0 #
+#########################################
+load_word:
+ movq.l &0x2, %d0 # pass: 2 (bytes)
+ bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
+
+ cmpi.b SPCOND_FLG(%a6),&immed_flg
+ beq.b load_word_immed
+
+ bsr.l _dmem_read_word # fetch src operand from memory
+
+ tst.l %d1 # did dfetch fail?
+ bne.l facc_in_w # yes
+
+load_word_cont:
+ fmov.w %d0, %fp0 # read into %fp0;convert to xprec
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+
+ fbeq.w load_word_zero # src op is a ZERO
+ rts
+load_word_zero:
+ mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
+ rts
+
+load_word_immed:
+ bsr.l _imem_read_word # fetch src operand immed data
+
+ tst.l %d1 # did ifetch fail?
+ bne.l funimp_iacc # yes
+ bra.b load_word_cont
+
+#########################################
+# load a BYTE into %fp0: #
+# -number can't fault #
+# (1) calc ea #
+# (2) read 1 byte into L_SCR1 #
+# (3) fmov.b into %fp0 #
+#########################################
+load_byte:
+ movq.l &0x1, %d0 # pass: 1 (byte)
+ bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
+
+ cmpi.b SPCOND_FLG(%a6),&immed_flg
+ beq.b load_byte_immed
+
+ bsr.l _dmem_read_byte # fetch src operand from memory
+
+ tst.l %d1 # did dfetch fail?
+ bne.l facc_in_b # yes
+
+load_byte_cont:
+ fmov.b %d0, %fp0 # read into %fp0;convert to xprec
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+
+ fbeq.w load_byte_zero # src op is a ZERO
+ rts
+load_byte_zero:
+ mov.b &ZERO, STAG(%a6) # set optype tag to ZERO
+ rts
+
+load_byte_immed:
+ bsr.l _imem_read_word # fetch src operand immed data
+
+ tst.l %d1 # did ifetch fail?
+ bne.l funimp_iacc # yes
+ bra.b load_byte_cont
+
+#########################################
+# load a SGL into %fp0: #
+# -number can't fault #
+# (1) calc ea #
+# (2) read 4 bytes into L_SCR1 #
+# (3) fmov.s into %fp0 #
+#########################################
+load_sgl:
+ movq.l &0x4, %d0 # pass: 4 (bytes)
+ bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
+
+ cmpi.b SPCOND_FLG(%a6),&immed_flg
+ beq.b load_sgl_immed
+
+ bsr.l _dmem_read_long # fetch src operand from memory
+ mov.l %d0, L_SCR1(%a6) # store src op on stack
+
+ tst.l %d1 # did dfetch fail?
+ bne.l facc_in_l # yes
+
+load_sgl_cont:
+ lea L_SCR1(%a6), %a0 # pass: ptr to sgl src op
+ bsr.l set_tag_s # determine src type tag
+ mov.b %d0, STAG(%a6) # save src optype tag on stack
+
+ cmpi.b %d0, &DENORM # is it a sgl DENORM?
+ beq.w get_sgl_denorm # yes
+
+ cmpi.b %d0, &SNAN # is it a sgl SNAN?
+ beq.w get_sgl_snan # yes
+
+ fmov.s L_SCR1(%a6), %fp0 # read into %fp0;convert to xprec
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+ rts
+
+load_sgl_immed:
+ bsr.l _imem_read_long # fetch src operand immed data
+
+ tst.l %d1 # did ifetch fail?
+ bne.l funimp_iacc # yes
+ bra.b load_sgl_cont
+
+# must convert sgl denorm format to an Xprec denorm fmt suitable for
+# normalization...
+# %a0 : points to sgl denorm
+get_sgl_denorm:
+ clr.w FP_SRC_EX(%a6)
+ bfextu (%a0){&9:&23}, %d0 # fetch sgl hi(_mantissa)
+ lsl.l &0x8, %d0
+ mov.l %d0, FP_SRC_HI(%a6) # set ext hi(_mantissa)
+ clr.l FP_SRC_LO(%a6) # set ext lo(_mantissa)
+
+ clr.w FP_SRC_EX(%a6)
+ btst &0x7, (%a0) # is sgn bit set?
+ beq.b sgl_dnrm_norm
+ bset &0x7, FP_SRC_EX(%a6) # set sgn of xprec value
+
+sgl_dnrm_norm:
+ lea FP_SRC(%a6), %a0
+ bsr.l norm # normalize number
+ mov.w &0x3f81, %d1 # xprec exp = 0x3f81
+ sub.w %d0, %d1 # exp = 0x3f81 - shft amt.
+ or.w %d1, FP_SRC_EX(%a6) # {sgn,exp}
+
+ mov.b &NORM, STAG(%a6) # fix src type tag
+ rts
+
+# convert sgl to ext SNAN
+# %a0 : points to sgl SNAN
+get_sgl_snan:
+ mov.w &0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
+ bfextu (%a0){&9:&23}, %d0
+ lsl.l &0x8, %d0 # extract and insert hi(man)
+ mov.l %d0, FP_SRC_HI(%a6)
+ clr.l FP_SRC_LO(%a6)
+
+ btst &0x7, (%a0) # see if sign of SNAN is set
+ beq.b no_sgl_snan_sgn
+ bset &0x7, FP_SRC_EX(%a6)
+no_sgl_snan_sgn:
+ rts
+
+#########################################
+# load a DBL into %fp0: #
+# -number can't fault #
+# (1) calc ea #
+# (2) read 8 bytes into L_SCR(1,2)#
+# (3) fmov.d into %fp0 #
+#########################################
+load_dbl:
+ movq.l &0x8, %d0 # pass: 8 (bytes)
+ bsr.l _dcalc_ea # calc <ea>; <ea> in %a0
+
+ cmpi.b SPCOND_FLG(%a6),&immed_flg
+ beq.b load_dbl_immed
+
+ lea L_SCR1(%a6), %a1 # pass: ptr to input dbl tmp space
+ movq.l &0x8, %d0 # pass: # bytes to read
+ bsr.l _dmem_read # fetch src operand from memory
+
+ tst.l %d1 # did dfetch fail?
+ bne.l facc_in_d # yes
+
+load_dbl_cont:
+ lea L_SCR1(%a6), %a0 # pass: ptr to input dbl
+ bsr.l set_tag_d # determine src type tag
+ mov.b %d0, STAG(%a6) # set src optype tag
+
+ cmpi.b %d0, &DENORM # is it a dbl DENORM?
+ beq.w get_dbl_denorm # yes
+
+ cmpi.b %d0, &SNAN # is it a dbl SNAN?
+ beq.w get_dbl_snan # yes
+
+ fmov.d L_SCR1(%a6), %fp0 # read into %fp0;convert to xprec
+ fmovm.x &0x80, FP_SRC(%a6) # return src op in FP_SRC
+ rts
+
+load_dbl_immed:
+ lea L_SCR1(%a6), %a1 # pass: ptr to input dbl tmp space
+ movq.l &0x8, %d0 # pass: # bytes to read
+ bsr.l _imem_read # fetch src operand from memory
+
+ tst.l %d1 # did ifetch fail?
+ bne.l funimp_iacc # yes
+ bra.b load_dbl_cont
+
+# must convert dbl denorm format to an Xprec denorm fmt suitable for
+# normalization...
+# %a0 : loc. of dbl denorm
+get_dbl_denorm:
+ clr.w FP_SRC_EX(%a6)
+ bfextu (%a0){&12:&31}, %d0 # fetch hi(_mantissa)
+ mov.l %d0, FP_SRC_HI(%a6)
+ bfextu 4(%a0){&11:&21}, %d0 # fetch lo(_mantissa)
+ mov.l &0xb, %d1
+ lsl.l %d1, %d0
+ mov.l %d0, FP_SRC_LO(%a6)
+
+ btst &0x7, (%a0) # is sgn bit set?
+ beq.b dbl_dnrm_norm
+ bset &0x7, FP_SRC_EX(%a6) # set sgn of xprec value
+
+dbl_dnrm_norm:
+ lea FP_SRC(%a6), %a0
+ bsr.l norm # normalize number
+ mov.w &0x3c01, %d1 # xprec exp = 0x3c01
+ sub.w %d0, %d1 # exp = 0x3c01 - shft amt.
+ or.w %d1, FP_SRC_EX(%a6) # {sgn,exp}
+
+ mov.b &NORM, STAG(%a6) # fix src type tag
+ rts
+
+# convert dbl to ext SNAN
+# %a0 : points to dbl SNAN
+get_dbl_snan:
+ mov.w &0x7fff, FP_SRC_EX(%a6) # set exp of SNAN
+
+ bfextu (%a0){&12:&31}, %d0 # fetch hi(_mantissa)
+ mov.l %d0, FP_SRC_HI(%a6)
+ bfextu 4(%a0){&11:&21}, %d0 # fetch lo(_mantissa)
+ mov.l &0xb, %d1
+ lsl.l %d1, %d0
+ mov.l %d0, FP_SRC_LO(%a6)
+
+ btst &0x7, (%a0) # see if sign of SNAN is set
+ beq.b no_dbl_snan_sgn
+ bset &0x7, FP_SRC_EX(%a6)
+no_dbl_snan_sgn:
+ rts
+
+#################################################
+# load a Xprec into %fp0: #
+# -number can't fault #
+# (1) calc ea #
+# (2) read 12 bytes into L_SCR(1,2) #
+# (3) fmov.x into %fp0 #
+#################################################
+load_ext:
+ mov.l &0xc, %d0 # pass: 12 (bytes)
+ bsr.l _dcalc_ea # calc <ea>
+
+ lea FP_SRC(%a6), %a1 # pass: ptr to input ext tmp space
+ mov.l &0xc, %d0 # pass: # of bytes to read
+ bsr.l _dmem_read # fetch src operand from memory
+
+ tst.l %d1 # did dfetch fail?
+ bne.l facc_in_x # yes
+
+ lea FP_SRC(%a6), %a0 # pass: ptr to src op
+ bsr.l set_tag_x # determine src type tag
+
+ cmpi.b %d0, &UNNORM # is the src op an UNNORM?
+ beq.b load_ext_unnorm # yes
+
+ mov.b %d0, STAG(%a6) # store the src optype tag
+ rts
+
+load_ext_unnorm:
+ bsr.l unnorm_fix # fix the src UNNORM
+ mov.b %d0, STAG(%a6) # store the src optype tag
+ rts
+
+#################################################
+# load a packed into %fp0: #
+# -number can't fault #
+# (1) calc ea #
+# (2) read 12 bytes into L_SCR(1,2,3) #
+# (3) fmov.x into %fp0 #
+#################################################
+load_packed:
+ bsr.l get_packed
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src op
+ bsr.l set_tag_x # determine src type tag
+ cmpi.b %d0,&UNNORM # is the src op an UNNORM ZERO?
+ beq.b load_packed_unnorm # yes
+
+ mov.b %d0,STAG(%a6) # store the src optype tag
+ rts
+
+load_packed_unnorm:
+ bsr.l unnorm_fix # fix the UNNORM ZERO
+ mov.b %d0,STAG(%a6) # store the src optype tag
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fout(): move from fp register to memory or data register #
+# #
+# XREF **************************************************************** #
+# _round() - needed to create EXOP for sgl/dbl precision #
+# norm() - needed to create EXOP for extended precision #
+# ovf_res() - create default overflow result for sgl/dbl precision#
+# unf_res() - create default underflow result for sgl/dbl prec. #
+# dst_dbl() - create rounded dbl precision result. #
+# dst_sgl() - create rounded sgl precision result. #
+# fetch_dreg() - fetch dynamic k-factor reg for packed. #
+# bindec() - convert FP binary number to packed number. #
+# _mem_write() - write data to memory. #
+# _mem_write2() - write data to memory unless supv mode -(a7) exc.#
+# _dmem_write_{byte,word,long}() - write data to memory. #
+# store_dreg_{b,w,l}() - store data to data register file. #
+# facc_out_{b,w,l,d,x}() - data access error occurred. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = round prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 : intermediate underflow or overflow result if #
+# OVFL/UNFL occurred for a sgl or dbl operand #
+# #
+# ALGORITHM *********************************************************** #
+# This routine is accessed by many handlers that need to do an #
+# opclass three move of an operand out to memory. #
+# Decode an fmove out (opclass 3) instruction to determine if #
+# it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data #
+# register or memory. The algorithm uses a standard "fmove" to create #
+# the rounded result. Also, since exceptions are disabled, this also #
+# create the correct OPERR default result if appropriate. #
+# For sgl or dbl precision, overflow or underflow can occur. If #
+# either occurs and is enabled, the EXOP. #
+# For extended precision, the stacked <ea> must be fixed along #
+# w/ the address index register as appropriate w/ _calc_ea_fout(). If #
+# the source is a denorm and if underflow is enabled, an EXOP must be #
+# created. #
+# For packed, the k-factor must be fetched from the instruction #
+# word or a data register. The <ea> must be fixed as w/ extended #
+# precision. Then, bindec() is called to create the appropriate #
+# packed result. #
+# If at any time an access error is flagged by one of the move- #
+# to-memory routines, then a special exit must be made so that the #
+# access error can be handled properly. #
+# #
+#########################################################################
+
+ global fout
+fout:
+ bfextu EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
+ mov.w (tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
+ jmp (tbl_fout.b,%pc,%a1) # jump to routine
+
+ swbeg &0x8
+tbl_fout:
+ short fout_long - tbl_fout
+ short fout_sgl - tbl_fout
+ short fout_ext - tbl_fout
+ short fout_pack - tbl_fout
+ short fout_word - tbl_fout
+ short fout_dbl - tbl_fout
+ short fout_byte - tbl_fout
+ short fout_pack - tbl_fout
+
+#################################################################
+# fmove.b out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_byte:
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_byte_denorm # no
+
+ fmovm.x SRC(%a0),&0x80 # load value
+
+fout_byte_norm:
+ fmov.l %d0,%fpcr # insert rnd prec,mode
+
+ fmov.b %fp0,%d0 # exec move out w/ correct rnd mode
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch FPSR
+ or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_byte_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_byte # write byte
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_b # yes
+
+ rts
+
+fout_byte_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_b
+ rts
+
+fout_byte_denorm:
+ mov.l SRC_EX(%a0),%d1
+ andi.l &0x80000000,%d1 # keep DENORM sign
+ ori.l &0x00800000,%d1 # make smallest sgl
+ fmov.s %d1,%fp0
+ bra.b fout_byte_norm
+
+#################################################################
+# fmove.w out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_word:
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_word_denorm # no
+
+ fmovm.x SRC(%a0),&0x80 # load value
+
+fout_word_norm:
+ fmov.l %d0,%fpcr # insert rnd prec:mode
+
+ fmov.w %fp0,%d0 # exec move out w/ correct rnd mode
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch FPSR
+ or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_word_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_word # write word
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_w # yes
+
+ rts
+
+fout_word_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_w
+ rts
+
+fout_word_denorm:
+ mov.l SRC_EX(%a0),%d1
+ andi.l &0x80000000,%d1 # keep DENORM sign
+ ori.l &0x00800000,%d1 # make smallest sgl
+ fmov.s %d1,%fp0
+ bra.b fout_word_norm
+
+#################################################################
+# fmove.l out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_long:
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_long_denorm # no
+
+ fmovm.x SRC(%a0),&0x80 # load value
+
+fout_long_norm:
+ fmov.l %d0,%fpcr # insert rnd prec:mode
+
+ fmov.l %fp0,%d0 # exec move out w/ correct rnd mode
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch FPSR
+ or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
+
+fout_long_write:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_long_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_long # write long
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ rts
+
+fout_long_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_l
+ rts
+
+fout_long_denorm:
+ mov.l SRC_EX(%a0),%d1
+ andi.l &0x80000000,%d1 # keep DENORM sign
+ ori.l &0x00800000,%d1 # make smallest sgl
+ fmov.s %d1,%fp0
+ bra.b fout_long_norm
+
+#################################################################
+# fmove.x out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+# The DENORM causes an Underflow exception.
+fout_ext:
+
+# we copy the extended precision result to FP_SCR0 so that the reserved
+# 16-bit field gets zeroed. we do this since we promise not to disturb
+# what's at SRC(a0).
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ clr.w 2+FP_SCR0_EX(%a6) # clear reserved field
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ fmovm.x SRC(%a0),&0x80 # return result
+
+ bsr.l _calc_ea_fout # fix stacked <ea>
+
+ mov.l %a0,%a1 # pass: dst addr
+ lea FP_SCR0(%a6),%a0 # pass: src addr
+ mov.l &0xc,%d0 # pass: opsize is 12 bytes
+
+# we must not yet write the extended precision data to the stack
+# in the pre-decrement case from supervisor mode or else we'll corrupt
+# the stack frame. so, leave it in FP_SRC for now and deal with it later...
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ beq.b fout_ext_a7
+
+ bsr.l _dmem_write # write ext prec number to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.w fout_ext_err # yes
+
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_ext_denorm # no
+ rts
+
+# the number is a DENORM. must set the underflow exception bit
+fout_ext_denorm:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
+
+ mov.b FPCR_ENABLE(%a6),%d0
+ andi.b &0x0a,%d0 # is UNFL or INEX enabled?
+ bne.b fout_ext_exc # yes
+ rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_ext_a7:
+ bsr.l _mem_write2 # write ext prec number to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.w fout_ext_err # yes
+
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_ext_denorm # no
+ rts
+
+fout_ext_exc:
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize the mantissa
+ neg.w %d0 # new exp = -(shft amt)
+ andi.w &0x7fff,%d0
+ andi.w &0x8000,FP_SCR0_EX(%a6) # keep only old sign
+ or.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+fout_ext_err:
+ mov.l EXC_A6(%a6),(%a6) # fix stacked a6
+ bra.l facc_out_x
+
+#########################################################################
+# fmove.s out ###########################################################
+#########################################################################
+fout_sgl:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+ mov.w SRC_EX(%a0),%d0 # extract exponent
+ andi.w &0x7fff,%d0 # strip sign
+
+ cmpi.w %d0,&SGL_HI # will operand overflow?
+ bgt.w fout_sgl_ovfl # yes; go handle OVFL
+ beq.w fout_sgl_may_ovfl # maybe; go handle possible OVFL
+ cmpi.w %d0,&SGL_LO # will operand underflow?
+ blt.w fout_sgl_unfl # yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.s"
+# Unnormalized inputs can come through this point.
+#
+fout_sgl_exg:
+ fmovm.x SRC(%a0),&0x80 # fetch fop from stack
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmov.s %fp0,%d0 # store does convert and round
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.w %d1,2+USER_FPSR(%a6) # set possible inex2/ainex
+
+fout_sgl_exg_write:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_sgl_exg_write_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_long # write long
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ rts
+
+fout_sgl_exg_write_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_l
+ rts
+
+#
+# here, we know that the operand would UNFL if moved out to single prec,
+# so, denorm and round and then use generic store single routine to
+# write the value to memory.
+#
+fout_sgl_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.l %a0,-(%sp)
+
+ clr.l %d0 # pass: S.F. = 0
+
+ cmpi.b STAG(%a6),&DENORM # fetch src optype tag
+ bne.b fout_sgl_unfl_cont # let DENORMs fall through
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize the DENORM
+
+fout_sgl_unfl_cont:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calc default underflow result
+
+ lea FP_SCR0(%a6),%a0 # pass: ptr to fop
+ bsr.l dst_sgl # convert to single prec
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_sgl_unfl_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_long # write long
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.b fout_sgl_unfl_chkexc
+
+fout_sgl_unfl_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_l
+
+fout_sgl_unfl_chkexc:
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0a,%d1 # is UNFL or INEX enabled?
+ bne.w fout_sd_exc_unfl # yes
+ addq.l &0x4,%sp
+ rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_sgl_ovfl:
+ tst.b 3+SRC_HI(%a0) # is result inexact?
+ bne.b fout_sgl_ovfl_inex2
+ tst.l SRC_LO(%a0) # is result inexact?
+ bne.b fout_sgl_ovfl_inex2
+ ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+ bra.b fout_sgl_ovfl_cont
+fout_sgl_ovfl_inex2:
+ ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_sgl_ovfl_cont:
+ mov.l %a0,-(%sp)
+
+# call ovf_res() w/ sgl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+ tst.b SRC_EX(%a0) # is operand negative?
+ smi %d1 # set if so
+ mov.l L_SCR3(%a6),%d0 # pass: sgl prec,rnd mode
+ bsr.l ovf_res # calc OVFL result
+ fmovm.x (%a0),&0x80 # load default overflow result
+ fmov.s %fp0,%d0 # store to single
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_sgl_ovfl_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_long # write long
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.b fout_sgl_ovfl_chkexc
+
+fout_sgl_ovfl_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_l
+
+fout_sgl_ovfl_chkexc:
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0a,%d1 # is UNFL or INEX enabled?
+ bne.w fout_sd_exc_ovfl # yes
+ addq.l &0x4,%sp
+ rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+# for the correct result.
+# if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_sgl_may_ovfl:
+ mov.w SRC_EX(%a0),%d1 # fetch current sign
+ andi.w &0x8000,%d1 # keep it,clear exp
+ ori.w &0x3fff,%d1 # insert exp = 0
+ mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fabs.x %fp0 # need absolute value
+ fcmp.b %fp0,&0x2 # did exponent increase?
+ fblt.w fout_sgl_exg # no; go finish NORM
+ bra.w fout_sgl_ovfl # yes; go handle overflow
+
+################
+
+fout_sd_exc_unfl:
+ mov.l (%sp)+,%a0
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ cmpi.b STAG(%a6),&DENORM # was src a DENORM?
+ bne.b fout_sd_exc_cont # no
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm
+ neg.l %d0
+ andi.w &0x7fff,%d0
+ bfins %d0,FP_SCR0_EX(%a6){&1:&15}
+ bra.b fout_sd_exc_cont
+
+fout_sd_exc:
+fout_sd_exc_ovfl:
+ mov.l (%sp)+,%a0 # restore a0
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+fout_sd_exc_cont:
+ bclr &0x7,FP_SCR0_EX(%a6) # clear sign bit
+ sne.b 2+FP_SCR0_EX(%a6) # set internal sign bit
+ lea FP_SCR0(%a6),%a0 # pass: ptr to DENORM
+
+ mov.b 3+L_SCR3(%a6),%d1
+ lsr.b &0x4,%d1
+ andi.w &0x0c,%d1
+ swap %d1
+ mov.b 3+L_SCR3(%a6),%d1
+ lsr.b &0x4,%d1
+ andi.w &0x03,%d1
+ clr.l %d0 # pass: zero g,r,s
+ bsr.l _round # round the DENORM
+
+ tst.b 2+FP_SCR0_EX(%a6) # is EXOP negative?
+ beq.b fout_sd_exc_done # no
+ bset &0x7,FP_SCR0_EX(%a6) # yes
+
+fout_sd_exc_done:
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#################################################################
+# fmove.d out ###################################################
+#################################################################
+fout_dbl:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+ mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+ mov.w SRC_EX(%a0),%d0 # extract exponent
+ andi.w &0x7fff,%d0 # strip sign
+
+ cmpi.w %d0,&DBL_HI # will operand overflow?
+ bgt.w fout_dbl_ovfl # yes; go handle OVFL
+ beq.w fout_dbl_may_ovfl # maybe; go handle possible OVFL
+ cmpi.w %d0,&DBL_LO # will operand underflow?
+ blt.w fout_dbl_unfl # yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.d"
+# Unnormalized inputs can come through this point.
+#
+fout_dbl_exg:
+ fmovm.x SRC(%a0),&0x80 # fetch fop from stack
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmov.d %fp0,L_SCR1(%a6) # store does convert and round
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d0 # save FPSR
+
+ or.w %d0,2+USER_FPSR(%a6) # set possible inex2/ainex
+
+ mov.l EXC_EA(%a6),%a1 # pass: dst addr
+ lea L_SCR1(%a6),%a0 # pass: src addr
+ movq.l &0x8,%d0 # pass: opsize is 8 bytes
+ bsr.l _dmem_write # store dbl fop to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_d # yes
+
+ rts # no; so we're finished
+
+#
+# here, we know that the operand would UNFL if moved out to double prec,
+# so, denorm and round and then use generic store double routine to
+# write the value to memory.
+#
+fout_dbl_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.l %a0,-(%sp)
+
+ clr.l %d0 # pass: S.F. = 0
+
+ cmpi.b STAG(%a6),&DENORM # fetch src optype tag
+ bne.b fout_dbl_unfl_cont # let DENORMs fall through
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize the DENORM
+
+fout_dbl_unfl_cont:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calc default underflow result
+
+ lea FP_SCR0(%a6),%a0 # pass: ptr to fop
+ bsr.l dst_dbl # convert to single prec
+ mov.l %d0,L_SCR1(%a6)
+ mov.l %d1,L_SCR2(%a6)
+
+ mov.l EXC_EA(%a6),%a1 # pass: dst addr
+ lea L_SCR1(%a6),%a0 # pass: src addr
+ movq.l &0x8,%d0 # pass: opsize is 8 bytes
+ bsr.l _dmem_write # store dbl fop to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_d # yes
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0a,%d1 # is UNFL or INEX enabled?
+ bne.w fout_sd_exc_unfl # yes
+ addq.l &0x4,%sp
+ rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_dbl_ovfl:
+ mov.w 2+SRC_LO(%a0),%d0
+ andi.w &0x7ff,%d0
+ bne.b fout_dbl_ovfl_inex2
+
+ ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+ bra.b fout_dbl_ovfl_cont
+fout_dbl_ovfl_inex2:
+ ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_dbl_ovfl_cont:
+ mov.l %a0,-(%sp)
+
+# call ovf_res() w/ dbl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+ tst.b SRC_EX(%a0) # is operand negative?
+ smi %d1 # set if so
+ mov.l L_SCR3(%a6),%d0 # pass: dbl prec,rnd mode
+ bsr.l ovf_res # calc OVFL result
+ fmovm.x (%a0),&0x80 # load default overflow result
+ fmov.d %fp0,L_SCR1(%a6) # store to double
+
+ mov.l EXC_EA(%a6),%a1 # pass: dst addr
+ lea L_SCR1(%a6),%a0 # pass: src addr
+ movq.l &0x8,%d0 # pass: opsize is 8 bytes
+ bsr.l _dmem_write # store dbl fop to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_d # yes
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0a,%d1 # is UNFL or INEX enabled?
+ bne.w fout_sd_exc_ovfl # yes
+ addq.l &0x4,%sp
+ rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+# for the correct result.
+# if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_dbl_may_ovfl:
+ mov.w SRC_EX(%a0),%d1 # fetch current sign
+ andi.w &0x8000,%d1 # keep it,clear exp
+ ori.w &0x3fff,%d1 # insert exp = 0
+ mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fabs.x %fp0 # need absolute value
+ fcmp.b %fp0,&0x2 # did exponent increase?
+ fblt.w fout_dbl_exg # no; go finish NORM
+ bra.w fout_dbl_ovfl # yes; go handle overflow
+
+#########################################################################
+# XDEF **************************************************************** #
+# dst_dbl(): create double precision value from extended prec. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to source operand in extended precision #
+# #
+# OUTPUT ************************************************************** #
+# d0 = hi(double precision result) #
+# d1 = lo(double precision result) #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# Changes extended precision to double precision. #
+# Note: no attempt is made to round the extended value to double. #
+# dbl_sign = ext_sign #
+# dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias) #
+# get rid of ext integer bit #
+# dbl_mant = ext_mant{62:12} #
+# #
+# --------------- --------------- --------------- #
+# extended -> |s| exp | |1| ms mant | | ls mant | #
+# --------------- --------------- --------------- #
+# 95 64 63 62 32 31 11 0 #
+# | | #
+# | | #
+# | | #
+# v v #
+# --------------- --------------- #
+# double -> |s|exp| mant | | mant | #
+# --------------- --------------- #
+# 63 51 32 31 0 #
+# #
+#########################################################################
+
+dst_dbl:
+ clr.l %d0 # clear d0
+ mov.w FTEMP_EX(%a0),%d0 # get exponent
+ subi.w &EXT_BIAS,%d0 # subtract extended precision bias
+ addi.w &DBL_BIAS,%d0 # add double precision bias
+ tst.b FTEMP_HI(%a0) # is number a denorm?
+ bmi.b dst_get_dupper # no
+ subq.w &0x1,%d0 # yes; denorm bias = DBL_BIAS - 1
+dst_get_dupper:
+ swap %d0 # d0 now in upper word
+ lsl.l &0x4,%d0 # d0 in proper place for dbl prec exp
+ tst.b FTEMP_EX(%a0) # test sign
+ bpl.b dst_get_dman # if postive, go process mantissa
+ bset &0x1f,%d0 # if negative, set sign
+dst_get_dman:
+ mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
+ bfextu %d1{&1:&20},%d1 # get upper 20 bits of ms
+ or.l %d1,%d0 # put these bits in ms word of double
+ mov.l %d0,L_SCR1(%a6) # put the new exp back on the stack
+ mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
+ mov.l &21,%d0 # load shift count
+ lsl.l %d0,%d1 # put lower 11 bits in upper bits
+ mov.l %d1,L_SCR2(%a6) # build lower lword in memory
+ mov.l FTEMP_LO(%a0),%d1 # get ls mantissa
+ bfextu %d1{&0:&21},%d0 # get ls 21 bits of double
+ mov.l L_SCR2(%a6),%d1
+ or.l %d0,%d1 # put them in double result
+ mov.l L_SCR1(%a6),%d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# dst_sgl(): create single precision value from extended prec #
+# #
+# XREF **************************************************************** #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to source operand in extended precision #
+# #
+# OUTPUT ************************************************************** #
+# d0 = single precision result #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# Changes extended precision to single precision. #
+# sgl_sign = ext_sign #
+# sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias) #
+# get rid of ext integer bit #
+# sgl_mant = ext_mant{62:12} #
+# #
+# --------------- --------------- --------------- #
+# extended -> |s| exp | |1| ms mant | | ls mant | #
+# --------------- --------------- --------------- #
+# 95 64 63 62 40 32 31 12 0 #
+# | | #
+# | | #
+# | | #
+# v v #
+# --------------- #
+# single -> |s|exp| mant | #
+# --------------- #
+# 31 22 0 #
+# #
+#########################################################################
+
+dst_sgl:
+ clr.l %d0
+ mov.w FTEMP_EX(%a0),%d0 # get exponent
+ subi.w &EXT_BIAS,%d0 # subtract extended precision bias
+ addi.w &SGL_BIAS,%d0 # add single precision bias
+ tst.b FTEMP_HI(%a0) # is number a denorm?
+ bmi.b dst_get_supper # no
+ subq.w &0x1,%d0 # yes; denorm bias = SGL_BIAS - 1
+dst_get_supper:
+ swap %d0 # put exp in upper word of d0
+ lsl.l &0x7,%d0 # shift it into single exp bits
+ tst.b FTEMP_EX(%a0) # test sign
+ bpl.b dst_get_sman # if positive, continue
+ bset &0x1f,%d0 # if negative, put in sign first
+dst_get_sman:
+ mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
+ andi.l &0x7fffff00,%d1 # get upper 23 bits of ms
+ lsr.l &0x8,%d1 # and put them flush right
+ or.l %d1,%d0 # put these bits in ms word of single
+ rts
+
+##############################################################################
+fout_pack:
+ bsr.l _calc_ea_fout # fetch the <ea>
+ mov.l %a0,-(%sp)
+
+ mov.b STAG(%a6),%d0 # fetch input type
+ bne.w fout_pack_not_norm # input is not NORM
+
+fout_pack_norm:
+ btst &0x4,EXC_CMDREG(%a6) # static or dynamic?
+ beq.b fout_pack_s # static
+
+fout_pack_d:
+ mov.b 1+EXC_CMDREG(%a6),%d1 # fetch dynamic reg
+ lsr.b &0x4,%d1
+ andi.w &0x7,%d1
+
+ bsr.l fetch_dreg # fetch Dn w/ k-factor
+
+ bra.b fout_pack_type
+fout_pack_s:
+ mov.b 1+EXC_CMDREG(%a6),%d0 # fetch static field
+
+fout_pack_type:
+ bfexts %d0{&25:&7},%d0 # extract k-factor
+ mov.l %d0,-(%sp)
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to input
+
+# bindec is currently scrambling FP_SRC for denorm inputs.
+# we'll have to change this, but for now, tough luck!!!
+ bsr.l bindec # convert xprec to packed
+
+# andi.l &0xcfff000f,FP_SCR0(%a6) # clear unused fields
+ andi.l &0xcffff00f,FP_SCR0(%a6) # clear unused fields
+
+ mov.l (%sp)+,%d0
+
+ tst.b 3+FP_SCR0_EX(%a6)
+ bne.b fout_pack_set
+ tst.l FP_SCR0_HI(%a6)
+ bne.b fout_pack_set
+ tst.l FP_SCR0_LO(%a6)
+ bne.b fout_pack_set
+
+# add the extra condition that only if the k-factor was zero, too, should
+# we zero the exponent
+ tst.l %d0
+ bne.b fout_pack_set
+# "mantissa" is all zero which means that the answer is zero. but, the '040
+# algorithm allows the exponent to be non-zero. the 881/2 do not. therefore,
+# if the mantissa is zero, I will zero the exponent, too.
+# the question now is whether the exponents sign bit is allowed to be non-zero
+# for a zero, also...
+ andi.w &0xf000,FP_SCR0(%a6)
+
+fout_pack_set:
+
+ lea FP_SCR0(%a6),%a0 # pass: src addr
+
+fout_pack_write:
+ mov.l (%sp)+,%a1 # pass: dst addr
+ mov.l &0xc,%d0 # pass: opsize is 12 bytes
+
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ beq.b fout_pack_a7
+
+ bsr.l _dmem_write # write ext prec number to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.w fout_ext_err # yes
+
+ rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_pack_a7:
+ bsr.l _mem_write2 # write ext prec number to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.w fout_ext_err # yes
+
+ rts
+
+fout_pack_not_norm:
+ cmpi.b %d0,&DENORM # is it a DENORM?
+ beq.w fout_pack_norm # yes
+ lea FP_SRC(%a6),%a0
+ clr.w 2+FP_SRC_EX(%a6)
+ cmpi.b %d0,&SNAN # is it an SNAN?
+ beq.b fout_pack_snan # yes
+ bra.b fout_pack_write # no
+
+fout_pack_snan:
+ ori.w &snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
+ bset &0x6,FP_SRC_HI(%a6) # set snan bit
+ bra.b fout_pack_write
+
+#########################################################################
+# XDEF **************************************************************** #
+# fetch_dreg(): fetch register according to index in d1 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d1 = index of register to fetch from #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of register fetched #
+# #
+# ALGORITHM *********************************************************** #
+# According to the index value in d1 which can range from zero #
+# to fifteen, load the corresponding register file value (where #
+# address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the #
+# stack. The rest should still be in their original places. #
+# #
+#########################################################################
+
+# this routine leaves d1 intact for subsequent store_dreg calls.
+ global fetch_dreg
+fetch_dreg:
+ mov.w (tbl_fdreg.b,%pc,%d1.w*2),%d0
+ jmp (tbl_fdreg.b,%pc,%d0.w*1)
+
+tbl_fdreg:
+ short fdreg0 - tbl_fdreg
+ short fdreg1 - tbl_fdreg
+ short fdreg2 - tbl_fdreg
+ short fdreg3 - tbl_fdreg
+ short fdreg4 - tbl_fdreg
+ short fdreg5 - tbl_fdreg
+ short fdreg6 - tbl_fdreg
+ short fdreg7 - tbl_fdreg
+ short fdreg8 - tbl_fdreg
+ short fdreg9 - tbl_fdreg
+ short fdrega - tbl_fdreg
+ short fdregb - tbl_fdreg
+ short fdregc - tbl_fdreg
+ short fdregd - tbl_fdreg
+ short fdrege - tbl_fdreg
+ short fdregf - tbl_fdreg
+
+fdreg0:
+ mov.l EXC_DREGS+0x0(%a6),%d0
+ rts
+fdreg1:
+ mov.l EXC_DREGS+0x4(%a6),%d0
+ rts
+fdreg2:
+ mov.l %d2,%d0
+ rts
+fdreg3:
+ mov.l %d3,%d0
+ rts
+fdreg4:
+ mov.l %d4,%d0
+ rts
+fdreg5:
+ mov.l %d5,%d0
+ rts
+fdreg6:
+ mov.l %d6,%d0
+ rts
+fdreg7:
+ mov.l %d7,%d0
+ rts
+fdreg8:
+ mov.l EXC_DREGS+0x8(%a6),%d0
+ rts
+fdreg9:
+ mov.l EXC_DREGS+0xc(%a6),%d0
+ rts
+fdrega:
+ mov.l %a2,%d0
+ rts
+fdregb:
+ mov.l %a3,%d0
+ rts
+fdregc:
+ mov.l %a4,%d0
+ rts
+fdregd:
+ mov.l %a5,%d0
+ rts
+fdrege:
+ mov.l (%a6),%d0
+ rts
+fdregf:
+ mov.l EXC_A7(%a6),%d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# store_dreg_l(): store longword to data register specified by d1 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = longowrd value to store #
+# d1 = index of register to fetch from #
+# #
+# OUTPUT ************************************************************** #
+# (data register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# According to the index value in d1, store the longword value #
+# in d0 to the corresponding data register. D0/D1 are on the stack #
+# while the rest are in their initial places. #
+# #
+#########################################################################
+
+ global store_dreg_l
+store_dreg_l:
+ mov.w (tbl_sdregl.b,%pc,%d1.w*2),%d1
+ jmp (tbl_sdregl.b,%pc,%d1.w*1)
+
+tbl_sdregl:
+ short sdregl0 - tbl_sdregl
+ short sdregl1 - tbl_sdregl
+ short sdregl2 - tbl_sdregl
+ short sdregl3 - tbl_sdregl
+ short sdregl4 - tbl_sdregl
+ short sdregl5 - tbl_sdregl
+ short sdregl6 - tbl_sdregl
+ short sdregl7 - tbl_sdregl
+
+sdregl0:
+ mov.l %d0,EXC_DREGS+0x0(%a6)
+ rts
+sdregl1:
+ mov.l %d0,EXC_DREGS+0x4(%a6)
+ rts
+sdregl2:
+ mov.l %d0,%d2
+ rts
+sdregl3:
+ mov.l %d0,%d3
+ rts
+sdregl4:
+ mov.l %d0,%d4
+ rts
+sdregl5:
+ mov.l %d0,%d5
+ rts
+sdregl6:
+ mov.l %d0,%d6
+ rts
+sdregl7:
+ mov.l %d0,%d7
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# store_dreg_w(): store word to data register specified by d1 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = word value to store #
+# d1 = index of register to fetch from #
+# #
+# OUTPUT ************************************************************** #
+# (data register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# According to the index value in d1, store the word value #
+# in d0 to the corresponding data register. D0/D1 are on the stack #
+# while the rest are in their initial places. #
+# #
+#########################################################################
+
+ global store_dreg_w
+store_dreg_w:
+ mov.w (tbl_sdregw.b,%pc,%d1.w*2),%d1
+ jmp (tbl_sdregw.b,%pc,%d1.w*1)
+
+tbl_sdregw:
+ short sdregw0 - tbl_sdregw
+ short sdregw1 - tbl_sdregw
+ short sdregw2 - tbl_sdregw
+ short sdregw3 - tbl_sdregw
+ short sdregw4 - tbl_sdregw
+ short sdregw5 - tbl_sdregw
+ short sdregw6 - tbl_sdregw
+ short sdregw7 - tbl_sdregw
+
+sdregw0:
+ mov.w %d0,2+EXC_DREGS+0x0(%a6)
+ rts
+sdregw1:
+ mov.w %d0,2+EXC_DREGS+0x4(%a6)
+ rts
+sdregw2:
+ mov.w %d0,%d2
+ rts
+sdregw3:
+ mov.w %d0,%d3
+ rts
+sdregw4:
+ mov.w %d0,%d4
+ rts
+sdregw5:
+ mov.w %d0,%d5
+ rts
+sdregw6:
+ mov.w %d0,%d6
+ rts
+sdregw7:
+ mov.w %d0,%d7
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# store_dreg_b(): store byte to data register specified by d1 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = byte value to store #
+# d1 = index of register to fetch from #
+# #
+# OUTPUT ************************************************************** #
+# (data register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# According to the index value in d1, store the byte value #
+# in d0 to the corresponding data register. D0/D1 are on the stack #
+# while the rest are in their initial places. #
+# #
+#########################################################################
+
+ global store_dreg_b
+store_dreg_b:
+ mov.w (tbl_sdregb.b,%pc,%d1.w*2),%d1
+ jmp (tbl_sdregb.b,%pc,%d1.w*1)
+
+tbl_sdregb:
+ short sdregb0 - tbl_sdregb
+ short sdregb1 - tbl_sdregb
+ short sdregb2 - tbl_sdregb
+ short sdregb3 - tbl_sdregb
+ short sdregb4 - tbl_sdregb
+ short sdregb5 - tbl_sdregb
+ short sdregb6 - tbl_sdregb
+ short sdregb7 - tbl_sdregb
+
+sdregb0:
+ mov.b %d0,3+EXC_DREGS+0x0(%a6)
+ rts
+sdregb1:
+ mov.b %d0,3+EXC_DREGS+0x4(%a6)
+ rts
+sdregb2:
+ mov.b %d0,%d2
+ rts
+sdregb3:
+ mov.b %d0,%d3
+ rts
+sdregb4:
+ mov.b %d0,%d4
+ rts
+sdregb5:
+ mov.b %d0,%d5
+ rts
+sdregb6:
+ mov.b %d0,%d6
+ rts
+sdregb7:
+ mov.b %d0,%d7
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# inc_areg(): increment an address register by the value in d0 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = amount to increment by #
+# d1 = index of address register to increment #
+# #
+# OUTPUT ************************************************************** #
+# (address register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# Typically used for an instruction w/ a post-increment <ea>, #
+# this routine adds the increment value in d0 to the address register #
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
+# in their original places. #
+# For a7, if the increment amount is one, then we have to #
+# increment by two. For any a7 update, set the mia7_flag so that if #
+# an access error exception occurs later in emulation, this address #
+# register update can be undone. #
+# #
+#########################################################################
+
+ global inc_areg
+inc_areg:
+ mov.w (tbl_iareg.b,%pc,%d1.w*2),%d1
+ jmp (tbl_iareg.b,%pc,%d1.w*1)
+
+tbl_iareg:
+ short iareg0 - tbl_iareg
+ short iareg1 - tbl_iareg
+ short iareg2 - tbl_iareg
+ short iareg3 - tbl_iareg
+ short iareg4 - tbl_iareg
+ short iareg5 - tbl_iareg
+ short iareg6 - tbl_iareg
+ short iareg7 - tbl_iareg
+
+iareg0: add.l %d0,EXC_DREGS+0x8(%a6)
+ rts
+iareg1: add.l %d0,EXC_DREGS+0xc(%a6)
+ rts
+iareg2: add.l %d0,%a2
+ rts
+iareg3: add.l %d0,%a3
+ rts
+iareg4: add.l %d0,%a4
+ rts
+iareg5: add.l %d0,%a5
+ rts
+iareg6: add.l %d0,(%a6)
+ rts
+iareg7: mov.b &mia7_flg,SPCOND_FLG(%a6)
+ cmpi.b %d0,&0x1
+ beq.b iareg7b
+ add.l %d0,EXC_A7(%a6)
+ rts
+iareg7b:
+ addq.l &0x2,EXC_A7(%a6)
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# dec_areg(): decrement an address register by the value in d0 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = amount to decrement by #
+# d1 = index of address register to decrement #
+# #
+# OUTPUT ************************************************************** #
+# (address register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# Typically used for an instruction w/ a pre-decrement <ea>, #
+# this routine adds the decrement value in d0 to the address register #
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
+# in their original places. #
+# For a7, if the decrement amount is one, then we have to #
+# decrement by two. For any a7 update, set the mda7_flag so that if #
+# an access error exception occurs later in emulation, this address #
+# register update can be undone. #
+# #
+#########################################################################
+
+ global dec_areg
+dec_areg:
+ mov.w (tbl_dareg.b,%pc,%d1.w*2),%d1
+ jmp (tbl_dareg.b,%pc,%d1.w*1)
+
+tbl_dareg:
+ short dareg0 - tbl_dareg
+ short dareg1 - tbl_dareg
+ short dareg2 - tbl_dareg
+ short dareg3 - tbl_dareg
+ short dareg4 - tbl_dareg
+ short dareg5 - tbl_dareg
+ short dareg6 - tbl_dareg
+ short dareg7 - tbl_dareg
+
+dareg0: sub.l %d0,EXC_DREGS+0x8(%a6)
+ rts
+dareg1: sub.l %d0,EXC_DREGS+0xc(%a6)
+ rts
+dareg2: sub.l %d0,%a2
+ rts
+dareg3: sub.l %d0,%a3
+ rts
+dareg4: sub.l %d0,%a4
+ rts
+dareg5: sub.l %d0,%a5
+ rts
+dareg6: sub.l %d0,(%a6)
+ rts
+dareg7: mov.b &mda7_flg,SPCOND_FLG(%a6)
+ cmpi.b %d0,&0x1
+ beq.b dareg7b
+ sub.l %d0,EXC_A7(%a6)
+ rts
+dareg7b:
+ subq.l &0x2,EXC_A7(%a6)
+ rts
+
+##############################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# load_fpn1(): load FP register value into FP_SRC(a6). #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = index of FP register to load #
+# #
+# OUTPUT ************************************************************** #
+# FP_SRC(a6) = value loaded from FP register file #
+# #
+# ALGORITHM *********************************************************** #
+# Using the index in d0, load FP_SRC(a6) with a number from the #
+# FP register file. #
+# #
+#########################################################################
+
+ global load_fpn1
+load_fpn1:
+ mov.w (tbl_load_fpn1.b,%pc,%d0.w*2), %d0
+ jmp (tbl_load_fpn1.b,%pc,%d0.w*1)
+
+tbl_load_fpn1:
+ short load_fpn1_0 - tbl_load_fpn1
+ short load_fpn1_1 - tbl_load_fpn1
+ short load_fpn1_2 - tbl_load_fpn1
+ short load_fpn1_3 - tbl_load_fpn1
+ short load_fpn1_4 - tbl_load_fpn1
+ short load_fpn1_5 - tbl_load_fpn1
+ short load_fpn1_6 - tbl_load_fpn1
+ short load_fpn1_7 - tbl_load_fpn1
+
+load_fpn1_0:
+ mov.l 0+EXC_FP0(%a6), 0+FP_SRC(%a6)
+ mov.l 4+EXC_FP0(%a6), 4+FP_SRC(%a6)
+ mov.l 8+EXC_FP0(%a6), 8+FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_1:
+ mov.l 0+EXC_FP1(%a6), 0+FP_SRC(%a6)
+ mov.l 4+EXC_FP1(%a6), 4+FP_SRC(%a6)
+ mov.l 8+EXC_FP1(%a6), 8+FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_2:
+ fmovm.x &0x20, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_3:
+ fmovm.x &0x10, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_4:
+ fmovm.x &0x08, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_5:
+ fmovm.x &0x04, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_6:
+ fmovm.x &0x02, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_7:
+ fmovm.x &0x01, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+
+#############################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# load_fpn2(): load FP register value into FP_DST(a6). #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = index of FP register to load #
+# #
+# OUTPUT ************************************************************** #
+# FP_DST(a6) = value loaded from FP register file #
+# #
+# ALGORITHM *********************************************************** #
+# Using the index in d0, load FP_DST(a6) with a number from the #
+# FP register file. #
+# #
+#########################################################################
+
+ global load_fpn2
+load_fpn2:
+ mov.w (tbl_load_fpn2.b,%pc,%d0.w*2), %d0
+ jmp (tbl_load_fpn2.b,%pc,%d0.w*1)
+
+tbl_load_fpn2:
+ short load_fpn2_0 - tbl_load_fpn2
+ short load_fpn2_1 - tbl_load_fpn2
+ short load_fpn2_2 - tbl_load_fpn2
+ short load_fpn2_3 - tbl_load_fpn2
+ short load_fpn2_4 - tbl_load_fpn2
+ short load_fpn2_5 - tbl_load_fpn2
+ short load_fpn2_6 - tbl_load_fpn2
+ short load_fpn2_7 - tbl_load_fpn2
+
+load_fpn2_0:
+ mov.l 0+EXC_FP0(%a6), 0+FP_DST(%a6)
+ mov.l 4+EXC_FP0(%a6), 4+FP_DST(%a6)
+ mov.l 8+EXC_FP0(%a6), 8+FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_1:
+ mov.l 0+EXC_FP1(%a6), 0+FP_DST(%a6)
+ mov.l 4+EXC_FP1(%a6), 4+FP_DST(%a6)
+ mov.l 8+EXC_FP1(%a6), 8+FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_2:
+ fmovm.x &0x20, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_3:
+ fmovm.x &0x10, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_4:
+ fmovm.x &0x08, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_5:
+ fmovm.x &0x04, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_6:
+ fmovm.x &0x02, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_7:
+ fmovm.x &0x01, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+
+#############################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# store_fpreg(): store an fp value to the fpreg designated d0. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# fp0 = extended precision value to store #
+# d0 = index of floating-point register #
+# #
+# OUTPUT ************************************************************** #
+# None #
+# #
+# ALGORITHM *********************************************************** #
+# Store the value in fp0 to the FP register designated by the #
+# value in d0. The FP number can be DENORM or SNAN so we have to be #
+# careful that we don't take an exception here. #
+# #
+#########################################################################
+
+ global store_fpreg
+store_fpreg:
+ mov.w (tbl_store_fpreg.b,%pc,%d0.w*2), %d0
+ jmp (tbl_store_fpreg.b,%pc,%d0.w*1)
+
+tbl_store_fpreg:
+ short store_fpreg_0 - tbl_store_fpreg
+ short store_fpreg_1 - tbl_store_fpreg
+ short store_fpreg_2 - tbl_store_fpreg
+ short store_fpreg_3 - tbl_store_fpreg
+ short store_fpreg_4 - tbl_store_fpreg
+ short store_fpreg_5 - tbl_store_fpreg
+ short store_fpreg_6 - tbl_store_fpreg
+ short store_fpreg_7 - tbl_store_fpreg
+
+store_fpreg_0:
+ fmovm.x &0x80, EXC_FP0(%a6)
+ rts
+store_fpreg_1:
+ fmovm.x &0x80, EXC_FP1(%a6)
+ rts
+store_fpreg_2:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x20
+ rts
+store_fpreg_3:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x10
+ rts
+store_fpreg_4:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x08
+ rts
+store_fpreg_5:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x04
+ rts
+store_fpreg_6:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x02
+ rts
+store_fpreg_7:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x01
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _denorm(): denormalize an intermediate result #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = points to the operand to be denormalized #
+# (in the internal extended format) #
+# #
+# d0 = rounding precision #
+# #
+# OUTPUT ************************************************************** #
+# a0 = pointer to the denormalized result #
+# (in the internal extended format) #
+# #
+# d0 = guard,round,sticky #
+# #
+# ALGORITHM *********************************************************** #
+# According to the exponent underflow threshold for the given #
+# precision, shift the mantissa bits to the right in order raise the #
+# exponent of the operand to the threshold value. While shifting the #
+# mantissa bits right, maintain the value of the guard, round, and #
+# sticky bits. #
+# other notes: #
+# (1) _denorm() is called by the underflow routines #
+# (2) _denorm() does NOT affect the status register #
+# #
+#########################################################################
+
+#
+# table of exponent threshold values for each precision
+#
+tbl_thresh:
+ short 0x0
+ short sgl_thresh
+ short dbl_thresh
+
+ global _denorm
+_denorm:
+#
+# Load the exponent threshold for the precision selected and check
+# to see if (threshold - exponent) is > 65 in which case we can
+# simply calculate the sticky bit and zero the mantissa. otherwise
+# we have to call the denormalization routine.
+#
+ lsr.b &0x2, %d0 # shift prec to lo bits
+ mov.w (tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
+ mov.w %d1, %d0 # copy d1 into d0
+ sub.w FTEMP_EX(%a0), %d0 # diff = threshold - exp
+ cmpi.w %d0, &66 # is diff > 65? (mant + g,r bits)
+ bpl.b denorm_set_stky # yes; just calc sticky
+
+ clr.l %d0 # clear g,r,s
+ btst &inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
+ beq.b denorm_call # no; don't change anything
+ bset &29, %d0 # yes; set sticky bit
+
+denorm_call:
+ bsr.l dnrm_lp # denormalize the number
+ rts
+
+#
+# all bit would have been shifted off during the denorm so simply
+# calculate if the sticky should be set and clear the entire mantissa.
+#
+denorm_set_stky:
+ mov.l &0x20000000, %d0 # set sticky bit in return value
+ mov.w %d1, FTEMP_EX(%a0) # load exp with threshold
+ clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa)
+ clr.l FTEMP_LO(%a0) # set d2 = 0 (ms mantissa)
+ rts
+
+# #
+# dnrm_lp(): normalize exponent/mantissa to specified threshhold #
+# #
+# INPUT: #
+# %a0 : points to the operand to be denormalized #
+# %d0{31:29} : initial guard,round,sticky #
+# %d1{15:0} : denormalization threshold #
+# OUTPUT: #
+# %a0 : points to the denormalized operand #
+# %d0{31:29} : final guard,round,sticky #
+# #
+
+# *** Local Equates *** #
+set GRS, L_SCR2 # g,r,s temp storage
+set FTEMP_LO2, L_SCR1 # FTEMP_LO copy
+
+ global dnrm_lp
+dnrm_lp:
+
+#
+# make a copy of FTEMP_LO and place the g,r,s bits directly after it
+# in memory so as to make the bitfield extraction for denormalization easier.
+#
+ mov.l FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
+ mov.l %d0, GRS(%a6) # place g,r,s after it
+
+#
+# check to see how much less than the underflow threshold the operand
+# exponent is.
+#
+ mov.l %d1, %d0 # copy the denorm threshold
+ sub.w FTEMP_EX(%a0), %d1 # d1 = threshold - uns exponent
+ ble.b dnrm_no_lp # d1 <= 0
+ cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ?
+ blt.b case_1 # yes
+ cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ?
+ blt.b case_2 # yes
+ bra.w case_3 # (d1 >= 64)
+
+#
+# No normalization necessary
+#
+dnrm_no_lp:
+ mov.l GRS(%a6), %d0 # restore original g,r,s
+ rts
+
+#
+# case (0<d1<32)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+# ---------------------------------------------------------
+# | FTEMP_HI | FTEMP_LO |grs000.........000|
+# ---------------------------------------------------------
+# <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# <-(n)-><-(32 - n)-><------(32)-------><------(32)------->
+# ---------------------------------------------------------
+# |0.....0| NEW_HI | NEW_FTEMP_LO |grs |
+# ---------------------------------------------------------
+#
+case_1:
+ mov.l %d2, -(%sp) # create temp storage
+
+ mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
+ mov.l &32, %d0
+ sub.w %d1, %d0 # %d0 = 32 - %d1
+
+ cmpi.w %d1, &29 # is shft amt >= 29
+ blt.b case1_extract # no; no fix needed
+ mov.b GRS(%a6), %d2
+ or.b %d2, 3+FTEMP_LO2(%a6)
+
+case1_extract:
+ bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
+ bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
+ bfextu FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
+
+ mov.l %d2, FTEMP_HI(%a0) # store new FTEMP_HI
+ mov.l %d1, FTEMP_LO(%a0) # store new FTEMP_LO
+
+ bftst %d0{&2:&30} # were bits shifted off?
+ beq.b case1_sticky_clear # no; go finish
+ bset &rnd_stky_bit, %d0 # yes; set sticky bit
+
+case1_sticky_clear:
+ and.l &0xe0000000, %d0 # clear all but G,R,S
+ mov.l (%sp)+, %d2 # restore temp register
+ rts
+
+#
+# case (32<=d1<64)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+# ---------------------------------------------------------
+# | FTEMP_HI | FTEMP_LO |grs000.........000|
+# ---------------------------------------------------------
+# <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+# \ \ \
+# \ \ \
+# \ \ -------------------
+# \ -------------------- \
+# ------------------- \ \
+# \ \ \
+# \ \ \
+# \ \ \
+# <-------(32)------><-(n)-><-(32 - n)-><------(32)------->
+# ---------------------------------------------------------
+# |0...............0|0....0| NEW_LO |grs |
+# ---------------------------------------------------------
+#
+case_2:
+ mov.l %d2, -(%sp) # create temp storage
+
+ mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
+ subi.w &0x20, %d1 # %d1 now between 0 and 32
+ mov.l &0x20, %d0
+ sub.w %d1, %d0 # %d0 = 32 - %d1
+
+# subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
+# the number of bits to check for the sticky detect.
+# it only plays a role in shift amounts of 61-63.
+ mov.b GRS(%a6), %d2
+ or.b %d2, 3+FTEMP_LO2(%a6)
+
+ bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
+ bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
+
+ bftst %d1{&2:&30} # were any bits shifted off?
+ bne.b case2_set_sticky # yes; set sticky bit
+ bftst FTEMP_LO2(%a6){%d0:&31} # were any bits shifted off?
+ bne.b case2_set_sticky # yes; set sticky bit
+
+ mov.l %d1, %d0 # move new G,R,S to %d0
+ bra.b case2_end
+
+case2_set_sticky:
+ mov.l %d1, %d0 # move new G,R,S to %d0
+ bset &rnd_stky_bit, %d0 # set sticky bit
+
+case2_end:
+ clr.l FTEMP_HI(%a0) # store FTEMP_HI = 0
+ mov.l %d2, FTEMP_LO(%a0) # store FTEMP_LO
+ and.l &0xe0000000, %d0 # clear all but G,R,S
+
+ mov.l (%sp)+,%d2 # restore temp register
+ rts
+
+#
+# case (d1>=64)
+#
+# %d0 = denorm threshold
+# %d1 = amt to shift
+#
+case_3:
+ mov.w %d0, FTEMP_EX(%a0) # insert denorm threshold
+
+ cmpi.w %d1, &65 # is shift amt > 65?
+ blt.b case3_64 # no; it's == 64
+ beq.b case3_65 # no; it's == 65
+
+#
+# case (d1>65)
+#
+# Shift value is > 65 and out of range. All bits are shifted off.
+# Return a zero mantissa with the sticky bit set
+#
+ clr.l FTEMP_HI(%a0) # clear hi(mantissa)
+ clr.l FTEMP_LO(%a0) # clear lo(mantissa)
+ mov.l &0x20000000, %d0 # set sticky bit
+ rts
+
+#
+# case (d1 == 64)
+#
+# ---------------------------------------------------------
+# | FTEMP_HI | FTEMP_LO |grs000.........000|
+# ---------------------------------------------------------
+# <-------(32)------>
+# \ \
+# \ \
+# \ \
+# \ ------------------------------
+# ------------------------------- \
+# \ \
+# \ \
+# \ \
+# <-------(32)------>
+# ---------------------------------------------------------
+# |0...............0|0................0|grs |
+# ---------------------------------------------------------
+#
+case3_64:
+ mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
+ mov.l %d0, %d1 # make a copy
+ and.l &0xc0000000, %d0 # extract G,R
+ and.l &0x3fffffff, %d1 # extract other bits
+
+ bra.b case3_complete
+
+#
+# case (d1 == 65)
+#
+# ---------------------------------------------------------
+# | FTEMP_HI | FTEMP_LO |grs000.........000|
+# ---------------------------------------------------------
+# <-------(32)------>
+# \ \
+# \ \
+# \ \
+# \ ------------------------------
+# -------------------------------- \
+# \ \
+# \ \
+# \ \
+# <-------(31)----->
+# ---------------------------------------------------------
+# |0...............0|0................0|0rs |
+# ---------------------------------------------------------
+#
+case3_65:
+ mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
+ and.l &0x80000000, %d0 # extract R bit
+ lsr.l &0x1, %d0 # shift high bit into R bit
+ and.l &0x7fffffff, %d1 # extract other bits
+
+case3_complete:
+# last operation done was an "and" of the bits shifted off so the condition
+# codes are already set so branch accordingly.
+ bne.b case3_set_sticky # yes; go set new sticky
+ tst.l FTEMP_LO(%a0) # were any bits shifted off?
+ bne.b case3_set_sticky # yes; go set new sticky
+ tst.b GRS(%a6) # were any bits shifted off?
+ bne.b case3_set_sticky # yes; go set new sticky
+
+#
+# no bits were shifted off so don't set the sticky bit.
+# the guard and
+# the entire mantissa is zero.
+#
+ clr.l FTEMP_HI(%a0) # clear hi(mantissa)
+ clr.l FTEMP_LO(%a0) # clear lo(mantissa)
+ rts
+
+#
+# some bits were shifted off so set the sticky bit.
+# the entire mantissa is zero.
+#
+case3_set_sticky:
+ bset &rnd_stky_bit,%d0 # set new sticky bit
+ clr.l FTEMP_HI(%a0) # clear hi(mantissa)
+ clr.l FTEMP_LO(%a0) # clear lo(mantissa)
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _round(): round result according to precision/mode #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = ptr to input operand in internal extended format #
+# d1(hi) = contains rounding precision: #
+# ext = $0000xxxx #
+# sgl = $0004xxxx #
+# dbl = $0008xxxx #
+# d1(lo) = contains rounding mode: #
+# RN = $xxxx0000 #
+# RZ = $xxxx0001 #
+# RM = $xxxx0002 #
+# RP = $xxxx0003 #
+# d0{31:29} = contains the g,r,s bits (extended) #
+# #
+# OUTPUT ************************************************************** #
+# a0 = pointer to rounded result #
+# #
+# ALGORITHM *********************************************************** #
+# On return the value pointed to by a0 is correctly rounded, #
+# a0 is preserved and the g-r-s bits in d0 are cleared. #
+# The result is not typed - the tag field is invalid. The #
+# result is still in the internal extended format. #
+# #
+# The INEX bit of USER_FPSR will be set if the rounded result was #
+# inexact (i.e. if any of the g-r-s bits were set). #
+# #
+#########################################################################
+
+ global _round
+_round:
+#
+# ext_grs() looks at the rounding precision and sets the appropriate
+# G,R,S bits.
+# If (G,R,S == 0) then result is exact and round is done, else set
+# the inex flag in status reg and continue.
+#
+ bsr.l ext_grs # extract G,R,S
+
+ tst.l %d0 # are G,R,S zero?
+ beq.w truncate # yes; round is complete
+
+ or.w &inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
+
+#
+# Use rounding mode as an index into a jump table for these modes.
+# All of the following assumes grs != 0.
+#
+ mov.w (tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
+ jmp (tbl_mode.b,%pc,%a1) # jmp to rnd mode handler
+
+tbl_mode:
+ short rnd_near - tbl_mode
+ short truncate - tbl_mode # RZ always truncates
+ short rnd_mnus - tbl_mode
+ short rnd_plus - tbl_mode
+
+#################################################################
+# ROUND PLUS INFINITY #
+# #
+# If sign of fp number = 0 (positive), then add 1 to l. #
+#################################################################
+rnd_plus:
+ tst.b FTEMP_SGN(%a0) # check for sign
+ bmi.w truncate # if positive then truncate
+
+ mov.l &0xffffffff, %d0 # force g,r,s to be all f's
+ swap %d1 # set up d1 for round prec.
+
+ cmpi.b %d1, &s_mode # is prec = sgl?
+ beq.w add_sgl # yes
+ bgt.w add_dbl # no; it's dbl
+ bra.w add_ext # no; it's ext
+
+#################################################################
+# ROUND MINUS INFINITY #
+# #
+# If sign of fp number = 1 (negative), then add 1 to l. #
+#################################################################
+rnd_mnus:
+ tst.b FTEMP_SGN(%a0) # check for sign
+ bpl.w truncate # if negative then truncate
+
+ mov.l &0xffffffff, %d0 # force g,r,s to be all f's
+ swap %d1 # set up d1 for round prec.
+
+ cmpi.b %d1, &s_mode # is prec = sgl?
+ beq.w add_sgl # yes
+ bgt.w add_dbl # no; it's dbl
+ bra.w add_ext # no; it's ext
+
+#################################################################
+# ROUND NEAREST #
+# #
+# If (g=1), then add 1 to l and if (r=s=0), then clear l #
+# Note that this will round to even in case of a tie. #
+#################################################################
+rnd_near:
+ asl.l &0x1, %d0 # shift g-bit to c-bit
+ bcc.w truncate # if (g=1) then
+
+ swap %d1 # set up d1 for round prec.
+
+ cmpi.b %d1, &s_mode # is prec = sgl?
+ beq.w add_sgl # yes
+ bgt.w add_dbl # no; it's dbl
+ bra.w add_ext # no; it's ext
+
+# *** LOCAL EQUATES ***
+set ad_1_sgl, 0x00000100 # constant to add 1 to l-bit in sgl prec
+set ad_1_dbl, 0x00000800 # constant to add 1 to l-bit in dbl prec
+
+#########################
+# ADD SINGLE #
+#########################
+add_sgl:
+ add.l &ad_1_sgl, FTEMP_HI(%a0)
+ bcc.b scc_clr # no mantissa overflow
+ roxr.w FTEMP_HI(%a0) # shift v-bit back in
+ roxr.w FTEMP_HI+2(%a0) # shift v-bit back in
+ add.w &0x1, FTEMP_EX(%a0) # and incr exponent
+scc_clr:
+ tst.l %d0 # test for rs = 0
+ bne.b sgl_done
+ and.w &0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
+sgl_done:
+ and.l &0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
+ clr.l FTEMP_LO(%a0) # clear d2
+ rts
+
+#########################
+# ADD EXTENDED #
+#########################
+add_ext:
+ addq.l &1,FTEMP_LO(%a0) # add 1 to l-bit
+ bcc.b xcc_clr # test for carry out
+ addq.l &1,FTEMP_HI(%a0) # propagate carry
+ bcc.b xcc_clr
+ roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
+ roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
+ roxr.w FTEMP_LO(%a0)
+ roxr.w FTEMP_LO+2(%a0)
+ add.w &0x1,FTEMP_EX(%a0) # and inc exp
+xcc_clr:
+ tst.l %d0 # test rs = 0
+ bne.b add_ext_done
+ and.b &0xfe,FTEMP_LO+3(%a0) # clear the l bit
+add_ext_done:
+ rts
+
+#########################
+# ADD DOUBLE #
+#########################
+add_dbl:
+ add.l &ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
+ bcc.b dcc_clr # no carry
+ addq.l &0x1, FTEMP_HI(%a0) # propagate carry
+ bcc.b dcc_clr # no carry
+
+ roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
+ roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
+ roxr.w FTEMP_LO(%a0)
+ roxr.w FTEMP_LO+2(%a0)
+ addq.w &0x1, FTEMP_EX(%a0) # incr exponent
+dcc_clr:
+ tst.l %d0 # test for rs = 0
+ bne.b dbl_done
+ and.w &0xf000, FTEMP_LO+2(%a0) # clear the l-bit
+
+dbl_done:
+ and.l &0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
+ rts
+
+###########################
+# Truncate all other bits #
+###########################
+truncate:
+ swap %d1 # select rnd prec
+
+ cmpi.b %d1, &s_mode # is prec sgl?
+ beq.w sgl_done # yes
+ bgt.b dbl_done # no; it's dbl
+ rts # no; it's ext
+
+
+#
+# ext_grs(): extract guard, round and sticky bits according to
+# rounding precision.
+#
+# INPUT
+# d0 = extended precision g,r,s (in d0{31:29})
+# d1 = {PREC,ROUND}
+# OUTPUT
+# d0{31:29} = guard, round, sticky
+#
+# The ext_grs extract the guard/round/sticky bits according to the
+# selected rounding precision. It is called by the round subroutine
+# only. All registers except d0 are kept intact. d0 becomes an
+# updated guard,round,sticky in d0{31:29}
+#
+# Notes: the ext_grs uses the round PREC, and therefore has to swap d1
+# prior to usage, and needs to restore d1 to original. this
+# routine is tightly tied to the round routine and not meant to
+# uphold standard subroutine calling practices.
+#
+
+ext_grs:
+ swap %d1 # have d1.w point to round precision
+ tst.b %d1 # is rnd prec = extended?
+ bne.b ext_grs_not_ext # no; go handle sgl or dbl
+
+#
+# %d0 actually already hold g,r,s since _round() had it before calling
+# this function. so, as long as we don't disturb it, we are "returning" it.
+#
+ext_grs_ext:
+ swap %d1 # yes; return to correct positions
+ rts
+
+ext_grs_not_ext:
+ movm.l &0x3000, -(%sp) # make some temp registers {d2/d3}
+
+ cmpi.b %d1, &s_mode # is rnd prec = sgl?
+ bne.b ext_grs_dbl # no; go handle dbl
+
+#
+# sgl:
+# 96 64 40 32 0
+# -----------------------------------------------------
+# | EXP |XXXXXXX| |xx | |grs|
+# -----------------------------------------------------
+# <--(24)--->nn\ /
+# ee ---------------------
+# ww |
+# v
+# gr new sticky
+#
+ext_grs_sgl:
+ bfextu FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
+ mov.l &30, %d2 # of the sgl prec. limits
+ lsl.l %d2, %d3 # shift g-r bits to MSB of d3
+ mov.l FTEMP_HI(%a0), %d2 # get word 2 for s-bit test
+ and.l &0x0000003f, %d2 # s bit is the or of all other
+ bne.b ext_grs_st_stky # bits to the right of g-r
+ tst.l FTEMP_LO(%a0) # test lower mantissa
+ bne.b ext_grs_st_stky # if any are set, set sticky
+ tst.l %d0 # test original g,r,s
+ bne.b ext_grs_st_stky # if any are set, set sticky
+ bra.b ext_grs_end_sd # if words 3 and 4 are clr, exit
+
+#
+# dbl:
+# 96 64 32 11 0
+# -----------------------------------------------------
+# | EXP |XXXXXXX| | |xx |grs|
+# -----------------------------------------------------
+# nn\ /
+# ee -------
+# ww |
+# v
+# gr new sticky
+#
+ext_grs_dbl:
+ bfextu FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
+ mov.l &30, %d2 # of the dbl prec. limits
+ lsl.l %d2, %d3 # shift g-r bits to the MSB of d3
+ mov.l FTEMP_LO(%a0), %d2 # get lower mantissa for s-bit test
+ and.l &0x000001ff, %d2 # s bit is the or-ing of all
+ bne.b ext_grs_st_stky # other bits to the right of g-r
+ tst.l %d0 # test word original g,r,s
+ bne.b ext_grs_st_stky # if any are set, set sticky
+ bra.b ext_grs_end_sd # if clear, exit
+
+ext_grs_st_stky:
+ bset &rnd_stky_bit, %d3 # set sticky bit
+ext_grs_end_sd:
+ mov.l %d3, %d0 # return grs to d0
+
+ movm.l (%sp)+, &0xc # restore scratch registers {d2/d3}
+
+ swap %d1 # restore d1 to original
+ rts
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the #
+# input operand should not be normalized already. #
+# #
+# XDEF **************************************************************** #
+# norm() #
+# #
+# XREF **************************************************************** #
+# none #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer fp extended precision operand to normalize #
+# #
+# OUTPUT ************************************************************** #
+# d0 = number of bit positions the mantissa was shifted #
+# a0 = the input operand's mantissa is normalized; the exponent #
+# is unchanged. #
+# #
+#########################################################################
+ global norm
+norm:
+ mov.l %d2, -(%sp) # create some temp regs
+ mov.l %d3, -(%sp)
+
+ mov.l FTEMP_HI(%a0), %d0 # load hi(mantissa)
+ mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa)
+
+ bfffo %d0{&0:&32}, %d2 # how many places to shift?
+ beq.b norm_lo # hi(man) is all zeroes!
+
+norm_hi:
+ lsl.l %d2, %d0 # left shift hi(man)
+ bfextu %d1{&0:%d2}, %d3 # extract lo bits
+
+ or.l %d3, %d0 # create hi(man)
+ lsl.l %d2, %d1 # create lo(man)
+
+ mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
+ mov.l %d1, FTEMP_LO(%a0) # store new lo(man)
+
+ mov.l %d2, %d0 # return shift amount
+
+ mov.l (%sp)+, %d3 # restore temp regs
+ mov.l (%sp)+, %d2
+
+ rts
+
+norm_lo:
+ bfffo %d1{&0:&32}, %d2 # how many places to shift?
+ lsl.l %d2, %d1 # shift lo(man)
+ add.l &32, %d2 # add 32 to shft amount
+
+ mov.l %d1, FTEMP_HI(%a0) # store hi(man)
+ clr.l FTEMP_LO(%a0) # lo(man) is now zero
+
+ mov.l %d2, %d0 # return shift amount
+
+ mov.l (%sp)+, %d3 # restore temp regs
+ mov.l (%sp)+, %d2
+
+ rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO #
+# - returns corresponding optype tag #
+# #
+# XDEF **************************************************************** #
+# unnorm_fix() #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to unnormalized extended precision number #
+# #
+# OUTPUT ************************************************************** #
+# d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO #
+# a0 = input operand has been converted to a norm, denorm, or #
+# zero; both the exponent and mantissa are changed. #
+# #
+#########################################################################
+
+ global unnorm_fix
+unnorm_fix:
+ bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+ bne.b unnorm_shift # hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+ bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+ beq.w unnorm_zero # yes
+
+ add.w &32, %d0 # no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+ clr.l %d1 # clear top word
+ mov.w FTEMP_EX(%a0), %d1 # extract exponent
+ and.w &0x7fff, %d1 # strip off sgn
+
+ cmp.w %d0, %d1 # will denorm push exp < 0?
+ bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+ sub.w %d0, %d1 # shift exponent value
+ mov.w FTEMP_EX(%a0), %d0 # load old exponent
+ and.w &0x8000, %d0 # save old sign
+ or.w %d0, %d1 # {sgn,new exp}
+ mov.w %d1, FTEMP_EX(%a0) # insert new exponent
+
+ bsr.l norm # normalize UNNORM
+
+ mov.b &NORM, %d0 # return new optype tag
+ rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+ cmp.b %d1, &32 # is exp <= 32?
+ bgt.b unnorm_nrm_zero_lrg # no; go handle large exponent
+
+ bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+ mov.l %d0, FTEMP_HI(%a0) # save new hi(man)
+
+ mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
+ lsl.l %d1, %d0 # extract new lo(man)
+ mov.l %d0, FTEMP_LO(%a0) # save new lo(man)
+
+ and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
+
+ mov.b &DENORM, %d0 # return new optype tag
+ rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+ sub.w &32, %d1 # adjust shft amt by 32
+
+ mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
+ lsl.l %d1, %d0 # left shift lo(man)
+
+ mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
+ clr.l FTEMP_LO(%a0) # lo(man) = 0
+
+ and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
+
+ mov.b &DENORM, %d0 # return new optype tag
+ rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+ and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
+
+ mov.b &ZERO, %d0 # fix optype tag
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# set_tag_x(): return the optype of the input ext fp number #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision operand #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of type tag #
+# one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO #
+# #
+# ALGORITHM *********************************************************** #
+# Simply test the exponent, j-bit, and mantissa values to #
+# determine the type of operand. #
+# If it's an unnormalized zero, alter the operand and force it #
+# to be a normal zero. #
+# #
+#########################################################################
+
+ global set_tag_x
+set_tag_x:
+ mov.w FTEMP_EX(%a0), %d0 # extract exponent
+ andi.w &0x7fff, %d0 # strip off sign
+ cmpi.w %d0, &0x7fff # is (EXP == MAX)?
+ beq.b inf_or_nan_x
+not_inf_or_nan_x:
+ btst &0x7,FTEMP_HI(%a0)
+ beq.b not_norm_x
+is_norm_x:
+ mov.b &NORM, %d0
+ rts
+not_norm_x:
+ tst.w %d0 # is exponent = 0?
+ bne.b is_unnorm_x
+not_unnorm_x:
+ tst.l FTEMP_HI(%a0)
+ bne.b is_denorm_x
+ tst.l FTEMP_LO(%a0)
+ bne.b is_denorm_x
+is_zero_x:
+ mov.b &ZERO, %d0
+ rts
+is_denorm_x:
+ mov.b &DENORM, %d0
+ rts
+# must distinguish now "Unnormalized zeroes" which we
+# must convert to zero.
+is_unnorm_x:
+ tst.l FTEMP_HI(%a0)
+ bne.b is_unnorm_reg_x
+ tst.l FTEMP_LO(%a0)
+ bne.b is_unnorm_reg_x
+# it's an "unnormalized zero". let's convert it to an actual zero...
+ andi.w &0x8000,FTEMP_EX(%a0) # clear exponent
+ mov.b &ZERO, %d0
+ rts
+is_unnorm_reg_x:
+ mov.b &UNNORM, %d0
+ rts
+inf_or_nan_x:
+ tst.l FTEMP_LO(%a0)
+ bne.b is_nan_x
+ mov.l FTEMP_HI(%a0), %d0
+ and.l &0x7fffffff, %d0 # msb is a don't care!
+ bne.b is_nan_x
+is_inf_x:
+ mov.b &INF, %d0
+ rts
+is_nan_x:
+ btst &0x6, FTEMP_HI(%a0)
+ beq.b is_snan_x
+ mov.b &QNAN, %d0
+ rts
+is_snan_x:
+ mov.b &SNAN, %d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# set_tag_d(): return the optype of the input dbl fp number #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = points to double precision operand #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of type tag #
+# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
+# #
+# ALGORITHM *********************************************************** #
+# Simply test the exponent, j-bit, and mantissa values to #
+# determine the type of operand. #
+# #
+#########################################################################
+
+ global set_tag_d
+set_tag_d:
+ mov.l FTEMP(%a0), %d0
+ mov.l %d0, %d1
+
+ andi.l &0x7ff00000, %d0
+ beq.b zero_or_denorm_d
+
+ cmpi.l %d0, &0x7ff00000
+ beq.b inf_or_nan_d
+
+is_norm_d:
+ mov.b &NORM, %d0
+ rts
+zero_or_denorm_d:
+ and.l &0x000fffff, %d1
+ bne is_denorm_d
+ tst.l 4+FTEMP(%a0)
+ bne is_denorm_d
+is_zero_d:
+ mov.b &ZERO, %d0
+ rts
+is_denorm_d:
+ mov.b &DENORM, %d0
+ rts
+inf_or_nan_d:
+ and.l &0x000fffff, %d1
+ bne is_nan_d
+ tst.l 4+FTEMP(%a0)
+ bne is_nan_d
+is_inf_d:
+ mov.b &INF, %d0
+ rts
+is_nan_d:
+ btst &19, %d1
+ bne is_qnan_d
+is_snan_d:
+ mov.b &SNAN, %d0
+ rts
+is_qnan_d:
+ mov.b &QNAN, %d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# set_tag_s(): return the optype of the input sgl fp number #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to single precision operand #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of type tag #
+# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
+# #
+# ALGORITHM *********************************************************** #
+# Simply test the exponent, j-bit, and mantissa values to #
+# determine the type of operand. #
+# #
+#########################################################################
+
+ global set_tag_s
+set_tag_s:
+ mov.l FTEMP(%a0), %d0
+ mov.l %d0, %d1
+
+ andi.l &0x7f800000, %d0
+ beq.b zero_or_denorm_s
+
+ cmpi.l %d0, &0x7f800000
+ beq.b inf_or_nan_s
+
+is_norm_s:
+ mov.b &NORM, %d0
+ rts
+zero_or_denorm_s:
+ and.l &0x007fffff, %d1
+ bne is_denorm_s
+is_zero_s:
+ mov.b &ZERO, %d0
+ rts
+is_denorm_s:
+ mov.b &DENORM, %d0
+ rts
+inf_or_nan_s:
+ and.l &0x007fffff, %d1
+ bne is_nan_s
+is_inf_s:
+ mov.b &INF, %d0
+ rts
+is_nan_s:
+ btst &22, %d1
+ bne is_qnan_s
+is_snan_s:
+ mov.b &SNAN, %d0
+ rts
+is_qnan_s:
+ mov.b &QNAN, %d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# unf_res(): routine to produce default underflow result of a #
+# scaled extended precision number; this is used by #
+# fadd/fdiv/fmul/etc. emulation routines. #
+# unf_res4(): same as above but for fsglmul/fsgldiv which use #
+# single round prec and extended prec mode. #
+# #
+# XREF **************************************************************** #
+# _denorm() - denormalize according to scale factor #
+# _round() - round denormalized number according to rnd prec #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precison operand #
+# d0 = scale factor #
+# d1 = rounding precision/mode #
+# #
+# OUTPUT ************************************************************** #
+# a0 = pointer to default underflow result in extended precision #
+# d0.b = result FPSR_cc which caller may or may not want to save #
+# #
+# ALGORITHM *********************************************************** #
+# Convert the input operand to "internal format" which means the #
+# exponent is extended to 16 bits and the sign is stored in the unused #
+# portion of the extended precison operand. Denormalize the number #
+# according to the scale factor passed in d0. Then, round the #
+# denormalized result. #
+# Set the FPSR_exc bits as appropriate but return the cc bits in #
+# d0 in case the caller doesn't want to save them (as is the case for #
+# fmove out). #
+# unf_res4() for fsglmul/fsgldiv forces the denorm to extended #
+# precision and the rounding mode to single. #
+# #
+#########################################################################
+ global unf_res
+unf_res:
+ mov.l %d1, -(%sp) # save rnd prec,mode on stack
+
+ btst &0x7, FTEMP_EX(%a0) # make "internal" format
+ sne FTEMP_SGN(%a0)
+
+ mov.w FTEMP_EX(%a0), %d1 # extract exponent
+ and.w &0x7fff, %d1
+ sub.w %d0, %d1
+ mov.w %d1, FTEMP_EX(%a0) # insert 16 bit exponent
+
+ mov.l %a0, -(%sp) # save operand ptr during calls
+
+ mov.l 0x4(%sp),%d0 # pass rnd prec.
+ andi.w &0x00c0,%d0
+ lsr.w &0x4,%d0
+ bsr.l _denorm # denorm result
+
+ mov.l (%sp),%a0
+ mov.w 0x6(%sp),%d1 # load prec:mode into %d1
+ andi.w &0xc0,%d1 # extract rnd prec
+ lsr.w &0x4,%d1
+ swap %d1
+ mov.w 0x6(%sp),%d1
+ andi.w &0x30,%d1
+ lsr.w &0x4,%d1
+ bsr.l _round # round the denorm
+
+ mov.l (%sp)+, %a0
+
+# result is now rounded properly. convert back to normal format
+ bclr &0x7, FTEMP_EX(%a0) # clear sgn first; may have residue
+ tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
+ beq.b unf_res_chkifzero # no; result is positive
+ bset &0x7, FTEMP_EX(%a0) # set result sgn
+ clr.b FTEMP_SGN(%a0) # clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res_chkifzero:
+ clr.l %d0
+ tst.l FTEMP_HI(%a0) # is value now a zero?
+ bne.b unf_res_cont # no
+ tst.l FTEMP_LO(%a0)
+ bne.b unf_res_cont # no
+# bset &z_bit, FPSR_CC(%a6) # yes; set zero ccode bit
+ bset &z_bit, %d0 # yes; set zero ccode bit
+
+unf_res_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+ btst &inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
+ beq.b unf_res_end # no
+ bset &aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res_end:
+ add.l &0x4, %sp # clear stack
+ rts
+
+# unf_res() for fsglmul() and fsgldiv().
+ global unf_res4
+unf_res4:
+ mov.l %d1,-(%sp) # save rnd prec,mode on stack
+
+ btst &0x7,FTEMP_EX(%a0) # make "internal" format
+ sne FTEMP_SGN(%a0)
+
+ mov.w FTEMP_EX(%a0),%d1 # extract exponent
+ and.w &0x7fff,%d1
+ sub.w %d0,%d1
+ mov.w %d1,FTEMP_EX(%a0) # insert 16 bit exponent
+
+ mov.l %a0,-(%sp) # save operand ptr during calls
+
+ clr.l %d0 # force rnd prec = ext
+ bsr.l _denorm # denorm result
+
+ mov.l (%sp),%a0
+ mov.w &s_mode,%d1 # force rnd prec = sgl
+ swap %d1
+ mov.w 0x6(%sp),%d1 # load rnd mode
+ andi.w &0x30,%d1 # extract rnd prec
+ lsr.w &0x4,%d1
+ bsr.l _round # round the denorm
+
+ mov.l (%sp)+,%a0
+
+# result is now rounded properly. convert back to normal format
+ bclr &0x7,FTEMP_EX(%a0) # clear sgn first; may have residue
+ tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
+ beq.b unf_res4_chkifzero # no; result is positive
+ bset &0x7,FTEMP_EX(%a0) # set result sgn
+ clr.b FTEMP_SGN(%a0) # clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res4_chkifzero:
+ clr.l %d0
+ tst.l FTEMP_HI(%a0) # is value now a zero?
+ bne.b unf_res4_cont # no
+ tst.l FTEMP_LO(%a0)
+ bne.b unf_res4_cont # no
+# bset &z_bit,FPSR_CC(%a6) # yes; set zero ccode bit
+ bset &z_bit,%d0 # yes; set zero ccode bit
+
+unf_res4_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+ btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+ beq.b unf_res4_end # no
+ bset &aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res4_end:
+ add.l &0x4,%sp # clear stack
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# ovf_res(): routine to produce the default overflow result of #
+# an overflowing number. #
+# ovf_res2(): same as above but the rnd mode/prec are passed #
+# differently. #
+# #
+# XREF **************************************************************** #
+# none #
+# #
+# INPUT *************************************************************** #
+# d1.b = '-1' => (-); '0' => (+) #
+# ovf_res(): #
+# d0 = rnd mode/prec #
+# ovf_res2(): #
+# hi(d0) = rnd prec #
+# lo(d0) = rnd mode #
+# #
+# OUTPUT ************************************************************** #
+# a0 = points to extended precision result #
+# d0.b = condition code bits #
+# #
+# ALGORITHM *********************************************************** #
+# The default overflow result can be determined by the sign of #
+# the result and the rounding mode/prec in effect. These bits are #
+# concatenated together to create an index into the default result #
+# table. A pointer to the correct result is returned in a0. The #
+# resulting condition codes are returned in d0 in case the caller #
+# doesn't want FPSR_cc altered (as is the case for fmove out). #
+# #
+#########################################################################
+
+ global ovf_res
+ovf_res:
+ andi.w &0x10,%d1 # keep result sign
+ lsr.b &0x4,%d0 # shift prec/mode
+ or.b %d0,%d1 # concat the two
+ mov.w %d1,%d0 # make a copy
+ lsl.b &0x1,%d1 # multiply d1 by 2
+ bra.b ovf_res_load
+
+ global ovf_res2
+ovf_res2:
+ and.w &0x10, %d1 # keep result sign
+ or.b %d0, %d1 # insert rnd mode
+ swap %d0
+ or.b %d0, %d1 # insert rnd prec
+ mov.w %d1, %d0 # make a copy
+ lsl.b &0x1, %d1 # shift left by 1
+
+#
+# use the rounding mode, precision, and result sign as in index into the
+# two tables below to fetch the default result and the result ccodes.
+#
+ovf_res_load:
+ mov.b (tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
+ lea (tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
+
+ rts
+
+tbl_ovfl_cc:
+ byte 0x2, 0x0, 0x0, 0x2
+ byte 0x2, 0x0, 0x0, 0x2
+ byte 0x2, 0x0, 0x0, 0x2
+ byte 0x0, 0x0, 0x0, 0x0
+ byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
+ byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
+ byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
+
+tbl_ovfl_result:
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+ long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
+ long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+ long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
+ long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+ long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
+ long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+ long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+ long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
+
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+ long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+ long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
+
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+ long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+ long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
+
+#########################################################################
+# XDEF **************************************************************** #
+# get_packed(): fetch a packed operand from memory and then #
+# convert it to a floating-point binary number. #
+# #
+# XREF **************************************************************** #
+# _dcalc_ea() - calculate the correct <ea> #
+# _mem_read() - fetch the packed operand from memory #
+# facc_in_x() - the fetch failed so jump to special exit code #
+# decbin() - convert packed to binary extended precision #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# If no failure on _mem_read(): #
+# FP_SRC(a6) = packed operand now as a binary FP number #
+# #
+# ALGORITHM *********************************************************** #
+# Get the correct <ea> whihc is the value on the exception stack #
+# frame w/ maybe a correction factor if the <ea> is -(an) or (an)+. #
+# Then, fetch the operand from memory. If the fetch fails, exit #
+# through facc_in_x(). #
+# If the packed operand is a ZERO,NAN, or INF, convert it to #
+# its binary representation here. Else, call decbin() which will #
+# convert the packed value to an extended precision binary value. #
+# #
+#########################################################################
+
+# the stacked <ea> for packed is correct except for -(An).
+# the base reg must be updated for both -(An) and (An)+.
+ global get_packed
+get_packed:
+ mov.l &0xc,%d0 # packed is 12 bytes
+ bsr.l _dcalc_ea # fetch <ea>; correct An
+
+ lea FP_SRC(%a6),%a1 # pass: ptr to super dst
+ mov.l &0xc,%d0 # pass: 12 bytes
+ bsr.l _dmem_read # read packed operand
+
+ tst.l %d1 # did dfetch fail?
+ bne.l facc_in_x # yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+ bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
+ cmpi.w %d0,&0x7fff # INF or NAN?
+ bne.b gp_try_zero # no
+ rts # operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+gp_try_zero:
+ mov.b 3+FP_SRC(%a6),%d0 # get byte 4
+ andi.b &0x0f,%d0 # clear all but last nybble
+ bne.b gp_not_spec # not a zero
+ tst.l FP_SRC_HI(%a6) # is lw 2 zero?
+ bne.b gp_not_spec # not a zero
+ tst.l FP_SRC_LO(%a6) # is lw 3 zero?
+ bne.b gp_not_spec # not a zero
+ rts # operand is a ZERO
+gp_not_spec:
+ lea FP_SRC(%a6),%a0 # pass: ptr to packed op
+ bsr.l decbin # convert to extended
+ fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
+ rts
+
+#########################################################################
+# decbin(): Converts normalized packed bcd value pointed to by register #
+# a0 to extended-precision value in fp0. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to normalized packed bcd value #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = exact fp representation of the packed bcd value. #
+# #
+# ALGORITHM *********************************************************** #
+# Expected is a normal bcd (i.e. non-exceptional; all inf, zero, #
+# and NaN operands are dispatched without entering this routine) #
+# value in 68881/882 format at location (a0). #
+# #
+# A1. Convert the bcd exponent to binary by successive adds and #
+# muls. Set the sign according to SE. Subtract 16 to compensate #
+# for the mantissa which is to be interpreted as 17 integer #
+# digits, rather than 1 integer and 16 fraction digits. #
+# Note: this operation can never overflow. #
+# #
+# A2. Convert the bcd mantissa to binary by successive #
+# adds and muls in FP0. Set the sign according to SM. #
+# The mantissa digits will be converted with the decimal point #
+# assumed following the least-significant digit. #
+# Note: this operation can never overflow. #
+# #
+# A3. Count the number of leading/trailing zeros in the #
+# bcd string. If SE is positive, count the leading zeros; #
+# if negative, count the trailing zeros. Set the adjusted #
+# exponent equal to the exponent from A1 and the zero count #
+# added if SM = 1 and subtracted if SM = 0. Scale the #
+# mantissa the equivalent of forcing in the bcd value: #
+# #
+# SM = 0 a non-zero digit in the integer position #
+# SM = 1 a non-zero digit in Mant0, lsd of the fraction #
+# #
+# this will insure that any value, regardless of its #
+# representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted #
+# consistently. #
+# #
+# A4. Calculate the factor 10^exp in FP1 using a table of #
+# 10^(2^n) values. To reduce the error in forming factors #
+# greater than 10^27, a directed rounding scheme is used with #
+# tables rounded to RN, RM, and RP, according to the table #
+# in the comments of the pwrten section. #
+# #
+# A5. Form the final binary number by scaling the mantissa by #
+# the exponent factor. This is done by multiplying the #
+# mantissa in FP0 by the factor in FP1 if the adjusted #
+# exponent sign is positive, and dividing FP0 by FP1 if #
+# it is negative. #
+# #
+# Clean up and return. Check if the final mul or div was inexact. #
+# If so, set INEX1 in USER_FPSR. #
+# #
+#########################################################################
+
+#
+# PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
+# to nearest, minus, and plus, respectively. The tables include
+# 10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}. No rounding
+# is required until the power is greater than 27, however, all
+# tables include the first 5 for ease of indexing.
+#
+RTABLE:
+ byte 0,0,0,0
+ byte 2,3,2,3
+ byte 2,3,3,2
+ byte 3,2,2,3
+
+ set FNIBS,7
+ set FSTRT,0
+
+ set ESTRT,4
+ set EDIGITS,2
+
+ global decbin
+decbin:
+ mov.l 0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
+ mov.l 0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
+ mov.l 0x8(%a0),FP_SCR0_LO(%a6)
+
+ lea FP_SCR0(%a6),%a0
+
+ movm.l &0x3c00,-(%sp) # save d2-d5
+ fmovm.x &0x1,-(%sp) # save fp1
+#
+# Calculate exponent:
+# 1. Copy bcd value in memory for use as a working copy.
+# 2. Calculate absolute value of exponent in d1 by mul and add.
+# 3. Correct for exponent sign.
+# 4. Subtract 16 to compensate for interpreting the mant as all integer digits.
+# (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+# calc_e:
+# (*) d0: temp digit storage
+# (*) d1: accumulator for binary exponent
+# (*) d2: digit count
+# (*) d3: offset pointer
+# ( ) d4: first word of bcd
+# ( ) a0: pointer to working bcd value
+# ( ) a6: pointer to original bcd value
+# (*) FP_SCR1: working copy of original bcd value
+# (*) L_SCR1: copy of original exponent word
+#
+calc_e:
+ mov.l &EDIGITS,%d2 # # of nibbles (digits) in fraction part
+ mov.l &ESTRT,%d3 # counter to pick up digits
+ mov.l (%a0),%d4 # get first word of bcd
+ clr.l %d1 # zero d1 for accumulator
+e_gd:
+ mulu.l &0xa,%d1 # mul partial product by one digit place
+ bfextu %d4{%d3:&4},%d0 # get the digit and zero extend into d0
+ add.l %d0,%d1 # d1 = d1 + d0
+ addq.b &4,%d3 # advance d3 to the next digit
+ dbf.w %d2,e_gd # if we have used all 3 digits, exit loop
+ btst &30,%d4 # get SE
+ beq.b e_pos # don't negate if pos
+ neg.l %d1 # negate before subtracting
+e_pos:
+ sub.l &16,%d1 # sub to compensate for shift of mant
+ bge.b e_save # if still pos, do not neg
+ neg.l %d1 # now negative, make pos and set SE
+ or.l &0x40000000,%d4 # set SE in d4,
+ or.l &0x40000000,(%a0) # and in working bcd
+e_save:
+ mov.l %d1,-(%sp) # save exp on stack
+#
+#
+# Calculate mantissa:
+# 1. Calculate absolute value of mantissa in fp0 by mul and add.
+# 2. Correct for mantissa sign.
+# (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+# calc_m:
+# (*) d0: temp digit storage
+# (*) d1: lword counter
+# (*) d2: digit count
+# (*) d3: offset pointer
+# ( ) d4: words 2 and 3 of bcd
+# ( ) a0: pointer to working bcd value
+# ( ) a6: pointer to original bcd value
+# (*) fp0: mantissa accumulator
+# ( ) FP_SCR1: working copy of original bcd value
+# ( ) L_SCR1: copy of original exponent word
+#
+calc_m:
+ mov.l &1,%d1 # word counter, init to 1
+ fmov.s &0x00000000,%fp0 # accumulator
+#
+#
+# Since the packed number has a long word between the first & second parts,
+# get the integer digit then skip down & get the rest of the
+# mantissa. We will unroll the loop once.
+#
+ bfextu (%a0){&28:&4},%d0 # integer part is ls digit in long word
+ fadd.b %d0,%fp0 # add digit to sum in fp0
+#
+#
+# Get the rest of the mantissa.
+#
+loadlw:
+ mov.l (%a0,%d1.L*4),%d4 # load mantissa lonqword into d4
+ mov.l &FSTRT,%d3 # counter to pick up digits
+ mov.l &FNIBS,%d2 # reset number of digits per a0 ptr
+md2b:
+ fmul.s &0x41200000,%fp0 # fp0 = fp0 * 10
+ bfextu %d4{%d3:&4},%d0 # get the digit and zero extend
+ fadd.b %d0,%fp0 # fp0 = fp0 + digit
+#
+#
+# If all the digits (8) in that long word have been converted (d2=0),
+# then inc d1 (=2) to point to the next long word and reset d3 to 0
+# to initialize the digit offset, and set d2 to 7 for the digit count;
+# else continue with this long word.
+#
+ addq.b &4,%d3 # advance d3 to the next digit
+ dbf.w %d2,md2b # check for last digit in this lw
+nextlw:
+ addq.l &1,%d1 # inc lw pointer in mantissa
+ cmp.l %d1,&2 # test for last lw
+ ble.b loadlw # if not, get last one
+#
+# Check the sign of the mant and make the value in fp0 the same sign.
+#
+m_sign:
+ btst &31,(%a0) # test sign of the mantissa
+ beq.b ap_st_z # if clear, go to append/strip zeros
+ fneg.x %fp0 # if set, negate fp0
+#
+# Append/strip zeros:
+#
+# For adjusted exponents which have an absolute value greater than 27*,
+# this routine calculates the amount needed to normalize the mantissa
+# for the adjusted exponent. That number is subtracted from the exp
+# if the exp was positive, and added if it was negative. The purpose
+# of this is to reduce the value of the exponent and the possibility
+# of error in calculation of pwrten.
+#
+# 1. Branch on the sign of the adjusted exponent.
+# 2p.(positive exp)
+# 2. Check M16 and the digits in lwords 2 and 3 in decending order.
+# 3. Add one for each zero encountered until a non-zero digit.
+# 4. Subtract the count from the exp.
+# 5. Check if the exp has crossed zero in #3 above; make the exp abs
+# and set SE.
+# 6. Multiply the mantissa by 10**count.
+# 2n.(negative exp)
+# 2. Check the digits in lwords 3 and 2 in decending order.
+# 3. Add one for each zero encountered until a non-zero digit.
+# 4. Add the count to the exp.
+# 5. Check if the exp has crossed zero in #3 above; clear SE.
+# 6. Divide the mantissa by 10**count.
+#
+# *Why 27? If the adjusted exponent is within -28 < expA < 28, than
+# any adjustment due to append/strip zeros will drive the resultane
+# exponent towards zero. Since all pwrten constants with a power
+# of 27 or less are exact, there is no need to use this routine to
+# attempt to lessen the resultant exponent.
+#
+# Register usage:
+#
+# ap_st_z:
+# (*) d0: temp digit storage
+# (*) d1: zero count
+# (*) d2: digit count
+# (*) d3: offset pointer
+# ( ) d4: first word of bcd
+# (*) d5: lword counter
+# ( ) a0: pointer to working bcd value
+# ( ) FP_SCR1: working copy of original bcd value
+# ( ) L_SCR1: copy of original exponent word
+#
+#
+# First check the absolute value of the exponent to see if this
+# routine is necessary. If so, then check the sign of the exponent
+# and do append (+) or strip (-) zeros accordingly.
+# This section handles a positive adjusted exponent.
+#
+ap_st_z:
+ mov.l (%sp),%d1 # load expA for range test
+ cmp.l %d1,&27 # test is with 27
+ ble.w pwrten # if abs(expA) <28, skip ap/st zeros
+ btst &30,(%a0) # check sign of exp
+ bne.b ap_st_n # if neg, go to neg side
+ clr.l %d1 # zero count reg
+ mov.l (%a0),%d4 # load lword 1 to d4
+ bfextu %d4{&28:&4},%d0 # get M16 in d0
+ bne.b ap_p_fx # if M16 is non-zero, go fix exp
+ addq.l &1,%d1 # inc zero count
+ mov.l &1,%d5 # init lword counter
+ mov.l (%a0,%d5.L*4),%d4 # get lword 2 to d4
+ bne.b ap_p_cl # if lw 2 is zero, skip it
+ addq.l &8,%d1 # and inc count by 8
+ addq.l &1,%d5 # inc lword counter
+ mov.l (%a0,%d5.L*4),%d4 # get lword 3 to d4
+ap_p_cl:
+ clr.l %d3 # init offset reg
+ mov.l &7,%d2 # init digit counter
+ap_p_gd:
+ bfextu %d4{%d3:&4},%d0 # get digit
+ bne.b ap_p_fx # if non-zero, go to fix exp
+ addq.l &4,%d3 # point to next digit
+ addq.l &1,%d1 # inc digit counter
+ dbf.w %d2,ap_p_gd # get next digit
+ap_p_fx:
+ mov.l %d1,%d0 # copy counter to d2
+ mov.l (%sp),%d1 # get adjusted exp from memory
+ sub.l %d0,%d1 # subtract count from exp
+ bge.b ap_p_fm # if still pos, go to pwrten
+ neg.l %d1 # now its neg; get abs
+ mov.l (%a0),%d4 # load lword 1 to d4
+ or.l &0x40000000,%d4 # and set SE in d4
+ or.l &0x40000000,(%a0) # and in memory
+#
+# Calculate the mantissa multiplier to compensate for the striping of
+# zeros from the mantissa.
+#
+ap_p_fm:
+ lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
+ clr.l %d3 # init table index
+ fmov.s &0x3f800000,%fp1 # init fp1 to 1
+ mov.l &3,%d2 # init d2 to count bits in counter
+ap_p_el:
+ asr.l &1,%d0 # shift lsb into carry
+ bcc.b ap_p_en # if 1, mul fp1 by pwrten factor
+ fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
+ap_p_en:
+ add.l &12,%d3 # inc d3 to next rtable entry
+ tst.l %d0 # check if d0 is zero
+ bne.b ap_p_el # if not, get next bit
+ fmul.x %fp1,%fp0 # mul mantissa by 10**(no_bits_shifted)
+ bra.b pwrten # go calc pwrten
+#
+# This section handles a negative adjusted exponent.
+#
+ap_st_n:
+ clr.l %d1 # clr counter
+ mov.l &2,%d5 # set up d5 to point to lword 3
+ mov.l (%a0,%d5.L*4),%d4 # get lword 3
+ bne.b ap_n_cl # if not zero, check digits
+ sub.l &1,%d5 # dec d5 to point to lword 2
+ addq.l &8,%d1 # inc counter by 8
+ mov.l (%a0,%d5.L*4),%d4 # get lword 2
+ap_n_cl:
+ mov.l &28,%d3 # point to last digit
+ mov.l &7,%d2 # init digit counter
+ap_n_gd:
+ bfextu %d4{%d3:&4},%d0 # get digit
+ bne.b ap_n_fx # if non-zero, go to exp fix
+ subq.l &4,%d3 # point to previous digit
+ addq.l &1,%d1 # inc digit counter
+ dbf.w %d2,ap_n_gd # get next digit
+ap_n_fx:
+ mov.l %d1,%d0 # copy counter to d0
+ mov.l (%sp),%d1 # get adjusted exp from memory
+ sub.l %d0,%d1 # subtract count from exp
+ bgt.b ap_n_fm # if still pos, go fix mantissa
+ neg.l %d1 # take abs of exp and clr SE
+ mov.l (%a0),%d4 # load lword 1 to d4
+ and.l &0xbfffffff,%d4 # and clr SE in d4
+ and.l &0xbfffffff,(%a0) # and in memory
+#
+# Calculate the mantissa multiplier to compensate for the appending of
+# zeros to the mantissa.
+#
+ap_n_fm:
+ lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
+ clr.l %d3 # init table index
+ fmov.s &0x3f800000,%fp1 # init fp1 to 1
+ mov.l &3,%d2 # init d2 to count bits in counter
+ap_n_el:
+ asr.l &1,%d0 # shift lsb into carry
+ bcc.b ap_n_en # if 1, mul fp1 by pwrten factor
+ fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
+ap_n_en:
+ add.l &12,%d3 # inc d3 to next rtable entry
+ tst.l %d0 # check if d0 is zero
+ bne.b ap_n_el # if not, get next bit
+ fdiv.x %fp1,%fp0 # div mantissa by 10**(no_bits_shifted)
+#
+#
+# Calculate power-of-ten factor from adjusted and shifted exponent.
+#
+# Register usage:
+#
+# pwrten:
+# (*) d0: temp
+# ( ) d1: exponent
+# (*) d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
+# (*) d3: FPCR work copy
+# ( ) d4: first word of bcd
+# (*) a1: RTABLE pointer
+# calc_p:
+# (*) d0: temp
+# ( ) d1: exponent
+# (*) d3: PWRTxx table index
+# ( ) a0: pointer to working copy of bcd
+# (*) a1: PWRTxx pointer
+# (*) fp1: power-of-ten accumulator
+#
+# Pwrten calculates the exponent factor in the selected rounding mode
+# according to the following table:
+#
+# Sign of Mant Sign of Exp Rounding Mode PWRTEN Rounding Mode
+#
+# ANY ANY RN RN
+#
+# + + RP RP
+# - + RP RM
+# + - RP RM
+# - - RP RP
+#
+# + + RM RM
+# - + RM RP
+# + - RM RP
+# - - RM RM
+#
+# + + RZ RM
+# - + RZ RM
+# + - RZ RP
+# - - RZ RP
+#
+#
+pwrten:
+ mov.l USER_FPCR(%a6),%d3 # get user's FPCR
+ bfextu %d3{&26:&2},%d2 # isolate rounding mode bits
+ mov.l (%a0),%d4 # reload 1st bcd word to d4
+ asl.l &2,%d2 # format d2 to be
+ bfextu %d4{&0:&2},%d0 # {FPCR[6],FPCR[5],SM,SE}
+ add.l %d0,%d2 # in d2 as index into RTABLE
+ lea.l RTABLE(%pc),%a1 # load rtable base
+ mov.b (%a1,%d2),%d0 # load new rounding bits from table
+ clr.l %d3 # clear d3 to force no exc and extended
+ bfins %d0,%d3{&26:&2} # stuff new rounding bits in FPCR
+ fmov.l %d3,%fpcr # write new FPCR
+ asr.l &1,%d0 # write correct PTENxx table
+ bcc.b not_rp # to a1
+ lea.l PTENRP(%pc),%a1 # it is RP
+ bra.b calc_p # go to init section
+not_rp:
+ asr.l &1,%d0 # keep checking
+ bcc.b not_rm
+ lea.l PTENRM(%pc),%a1 # it is RM
+ bra.b calc_p # go to init section
+not_rm:
+ lea.l PTENRN(%pc),%a1 # it is RN
+calc_p:
+ mov.l %d1,%d0 # copy exp to d0;use d0
+ bpl.b no_neg # if exp is negative,
+ neg.l %d0 # invert it
+ or.l &0x40000000,(%a0) # and set SE bit
+no_neg:
+ clr.l %d3 # table index
+ fmov.s &0x3f800000,%fp1 # init fp1 to 1
+e_loop:
+ asr.l &1,%d0 # shift next bit into carry
+ bcc.b e_next # if zero, skip the mul
+ fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
+e_next:
+ add.l &12,%d3 # inc d3 to next rtable entry
+ tst.l %d0 # check if d0 is zero
+ bne.b e_loop # not zero, continue shifting
+#
+#
+# Check the sign of the adjusted exp and make the value in fp0 the
+# same sign. If the exp was pos then multiply fp1*fp0;
+# else divide fp0/fp1.
+#
+# Register Usage:
+# norm:
+# ( ) a0: pointer to working bcd value
+# (*) fp0: mantissa accumulator
+# ( ) fp1: scaling factor - 10**(abs(exp))
+#
+pnorm:
+ btst &30,(%a0) # test the sign of the exponent
+ beq.b mul # if clear, go to multiply
+div:
+ fdiv.x %fp1,%fp0 # exp is negative, so divide mant by exp
+ bra.b end_dec
+mul:
+ fmul.x %fp1,%fp0 # exp is positive, so multiply by exp
+#
+#
+# Clean up and return with result in fp0.
+#
+# If the final mul/div in decbin incurred an inex exception,
+# it will be inex2, but will be reported as inex1 by get_op.
+#
+end_dec:
+ fmov.l %fpsr,%d0 # get status register
+ bclr &inex2_bit+8,%d0 # test for inex2 and clear it
+ beq.b no_exc # skip this if no exc
+ ori.w &inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
+no_exc:
+ add.l &0x4,%sp # clear 1 lw param
+ fmovm.x (%sp)+,&0x40 # restore fp1
+ movm.l (%sp)+,&0x3c # restore d2-d5
+ fmov.l &0x0,%fpcr
+ fmov.l &0x0,%fpsr
+ rts
+
+#########################################################################
+# bindec(): Converts an input in extended precision format to bcd format#
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to the input extended precision value in memory. #
+# the input may be either normalized, unnormalized, or #
+# denormalized. #
+# d0 = contains the k-factor sign-extended to 32-bits. #
+# #
+# OUTPUT ************************************************************** #
+# FP_SCR0(a6) = bcd format result on the stack. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# A1. Set RM and size ext; Set SIGMA = sign of input. #
+# The k-factor is saved for use in d7. Clear the #
+# BINDEC_FLG for separating normalized/denormalized #
+# input. If input is unnormalized or denormalized, #
+# normalize it. #
+# #
+# A2. Set X = abs(input). #
+# #
+# A3. Compute ILOG. #
+# ILOG is the log base 10 of the input value. It is #
+# approximated by adding e + 0.f when the original #
+# value is viewed as 2^^e * 1.f in extended precision. #
+# This value is stored in d6. #
+# #
+# A4. Clr INEX bit. #
+# The operation in A3 above may have set INEX2. #
+# #
+# A5. Set ICTR = 0; #
+# ICTR is a flag used in A13. It must be set before the #
+# loop entry A6. #
+# #
+# A6. Calculate LEN. #
+# LEN is the number of digits to be displayed. The #
+# k-factor can dictate either the total number of digits, #
+# if it is a positive number, or the number of digits #
+# after the decimal point which are to be included as #
+# significant. See the 68882 manual for examples. #
+# If LEN is computed to be greater than 17, set OPERR in #
+# USER_FPSR. LEN is stored in d4. #
+# #
+# A7. Calculate SCALE. #
+# SCALE is equal to 10^ISCALE, where ISCALE is the number #
+# of decimal places needed to insure LEN integer digits #
+# in the output before conversion to bcd. LAMBDA is the #
+# sign of ISCALE, used in A9. Fp1 contains #
+# 10^^(abs(ISCALE)) using a rounding mode which is a #
+# function of the original rounding mode and the signs #
+# of ISCALE and X. A table is given in the code. #
+# #
+# A8. Clr INEX; Force RZ. #
+# The operation in A3 above may have set INEX2. #
+# RZ mode is forced for the scaling operation to insure #
+# only one rounding error. The grs bits are collected in #
+# the INEX flag for use in A10. #
+# #
+# A9. Scale X -> Y. #
+# The mantissa is scaled to the desired number of #
+# significant digits. The excess digits are collected #
+# in INEX2. #
+# #
+# A10. Or in INEX. #
+# If INEX is set, round error occurred. This is #
+# compensated for by 'or-ing' in the INEX2 flag to #
+# the lsb of Y. #
+# #
+# A11. Restore original FPCR; set size ext. #
+# Perform FINT operation in the user's rounding mode. #
+# Keep the size to extended. #
+# #
+# A12. Calculate YINT = FINT(Y) according to user's rounding #
+# mode. The FPSP routine sintd0 is used. The output #
+# is in fp0. #
+# #
+# A13. Check for LEN digits. #
+# If the int operation results in more than LEN digits, #
+# or less than LEN -1 digits, adjust ILOG and repeat from #
+# A6. This test occurs only on the first pass. If the #
+# result is exactly 10^LEN, decrement ILOG and divide #
+# the mantissa by 10. #
+# #
+# A14. Convert the mantissa to bcd. #
+# The binstr routine is used to convert the LEN digit #
+# mantissa to bcd in memory. The input to binstr is #
+# to be a fraction; i.e. (mantissa)/10^LEN and adjusted #
+# such that the decimal point is to the left of bit 63. #
+# The bcd digits are stored in the correct position in #
+# the final string area in memory. #
+# #
+# A15. Convert the exponent to bcd. #
+# As in A14 above, the exp is converted to bcd and the #
+# digits are stored in the final string. #
+# Test the length of the final exponent string. If the #
+# length is 4, set operr. #
+# #
+# A16. Write sign bits to final string. #
+# #
+#########################################################################
+
+set BINDEC_FLG, EXC_TEMP # DENORM flag
+
+# Constants in extended precision
+PLOG2:
+ long 0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
+PLOG2UP1:
+ long 0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
+
+# Constants in single precision
+FONE:
+ long 0x3F800000,0x00000000,0x00000000,0x00000000
+FTWO:
+ long 0x40000000,0x00000000,0x00000000,0x00000000
+FTEN:
+ long 0x41200000,0x00000000,0x00000000,0x00000000
+F4933:
+ long 0x459A2800,0x00000000,0x00000000,0x00000000
+
+RBDTBL:
+ byte 0,0,0,0
+ byte 3,3,2,2
+ byte 3,2,2,3
+ byte 2,3,3,2
+
+# Implementation Notes:
+#
+# The registers are used as follows:
+#
+# d0: scratch; LEN input to binstr
+# d1: scratch
+# d2: upper 32-bits of mantissa for binstr
+# d3: scratch;lower 32-bits of mantissa for binstr
+# d4: LEN
+# d5: LAMBDA/ICTR
+# d6: ILOG
+# d7: k-factor
+# a0: ptr for original operand/final result
+# a1: scratch pointer
+# a2: pointer to FP_X; abs(original value) in ext
+# fp0: scratch
+# fp1: scratch
+# fp2: scratch
+# F_SCR1:
+# F_SCR2:
+# L_SCR1:
+# L_SCR2:
+
+ global bindec
+bindec:
+ movm.l &0x3f20,-(%sp) # {%d2-%d7/%a2}
+ fmovm.x &0x7,-(%sp) # {%fp0-%fp2}
+
+# A1. Set RM and size ext. Set SIGMA = sign input;
+# The k-factor is saved for use in d7. Clear BINDEC_FLG for
+# separating normalized/denormalized input. If the input
+# is a denormalized number, set the BINDEC_FLG memory word
+# to signal denorm. If the input is unnormalized, normalize
+# the input and test for denormalized result.
+#
+ fmov.l &rm_mode*0x10,%fpcr # set RM and ext
+ mov.l (%a0),L_SCR2(%a6) # save exponent for sign check
+ mov.l %d0,%d7 # move k-factor to d7
+
+ clr.b BINDEC_FLG(%a6) # clr norm/denorm flag
+ cmpi.b STAG(%a6),&DENORM # is input a DENORM?
+ bne.w A2_str # no; input is a NORM
+
+#
+# Normalize the denorm
+#
+un_de_norm:
+ mov.w (%a0),%d0
+ and.w &0x7fff,%d0 # strip sign of normalized exp
+ mov.l 4(%a0),%d1
+ mov.l 8(%a0),%d2
+norm_loop:
+ sub.w &1,%d0
+ lsl.l &1,%d2
+ roxl.l &1,%d1
+ tst.l %d1
+ bge.b norm_loop
+#
+# Test if the normalized input is denormalized
+#
+ tst.w %d0
+ bgt.b pos_exp # if greater than zero, it is a norm
+ st BINDEC_FLG(%a6) # set flag for denorm
+pos_exp:
+ and.w &0x7fff,%d0 # strip sign of normalized exp
+ mov.w %d0,(%a0)
+ mov.l %d1,4(%a0)
+ mov.l %d2,8(%a0)
+
+# A2. Set X = abs(input).
+#
+A2_str:
+ mov.l (%a0),FP_SCR1(%a6) # move input to work space
+ mov.l 4(%a0),FP_SCR1+4(%a6) # move input to work space
+ mov.l 8(%a0),FP_SCR1+8(%a6) # move input to work space
+ and.l &0x7fffffff,FP_SCR1(%a6) # create abs(X)
+
+# A3. Compute ILOG.
+# ILOG is the log base 10 of the input value. It is approx-
+# imated by adding e + 0.f when the original value is viewed
+# as 2^^e * 1.f in extended precision. This value is stored
+# in d6.
+#
+# Register usage:
+# Input/Output
+# d0: k-factor/exponent
+# d2: x/x
+# d3: x/x
+# d4: x/x
+# d5: x/x
+# d6: x/ILOG
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: x/x
+# a2: x/x
+# fp0: x/float(ILOG)
+# fp1: x/x
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Abs(X)/Abs(X) with $3fff exponent
+# L_SCR1:x/x
+# L_SCR2:first word of X packed/Unchanged
+
+ tst.b BINDEC_FLG(%a6) # check for denorm
+ beq.b A3_cont # if clr, continue with norm
+ mov.l &-4933,%d6 # force ILOG = -4933
+ bra.b A4_str
+A3_cont:
+ mov.w FP_SCR1(%a6),%d0 # move exp to d0
+ mov.w &0x3fff,FP_SCR1(%a6) # replace exponent with 0x3fff
+ fmov.x FP_SCR1(%a6),%fp0 # now fp0 has 1.f
+ sub.w &0x3fff,%d0 # strip off bias
+ fadd.w %d0,%fp0 # add in exp
+ fsub.s FONE(%pc),%fp0 # subtract off 1.0
+ fbge.w pos_res # if pos, branch
+ fmul.x PLOG2UP1(%pc),%fp0 # if neg, mul by LOG2UP1
+ fmov.l %fp0,%d6 # put ILOG in d6 as a lword
+ bra.b A4_str # go move out ILOG
+pos_res:
+ fmul.x PLOG2(%pc),%fp0 # if pos, mul by LOG2
+ fmov.l %fp0,%d6 # put ILOG in d6 as a lword
+
+
+# A4. Clr INEX bit.
+# The operation in A3 above may have set INEX2.
+
+A4_str:
+ fmov.l &0,%fpsr # zero all of fpsr - nothing needed
+
+
+# A5. Set ICTR = 0;
+# ICTR is a flag used in A13. It must be set before the
+# loop entry A6. The lower word of d5 is used for ICTR.
+
+ clr.w %d5 # clear ICTR
+
+# A6. Calculate LEN.
+# LEN is the number of digits to be displayed. The k-factor
+# can dictate either the total number of digits, if it is
+# a positive number, or the number of digits after the
+# original decimal point which are to be included as
+# significant. See the 68882 manual for examples.
+# If LEN is computed to be greater than 17, set OPERR in
+# USER_FPSR. LEN is stored in d4.
+#
+# Register usage:
+# Input/Output
+# d0: exponent/Unchanged
+# d2: x/x/scratch
+# d3: x/x
+# d4: exc picture/LEN
+# d5: ICTR/Unchanged
+# d6: ILOG/Unchanged
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: x/x
+# a2: x/x
+# fp0: float(ILOG)/Unchanged
+# fp1: x/x
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Abs(X) with $3fff exponent/Unchanged
+# L_SCR1:x/x
+# L_SCR2:first word of X packed/Unchanged
+
+A6_str:
+ tst.l %d7 # branch on sign of k
+ ble.b k_neg # if k <= 0, LEN = ILOG + 1 - k
+ mov.l %d7,%d4 # if k > 0, LEN = k
+ bra.b len_ck # skip to LEN check
+k_neg:
+ mov.l %d6,%d4 # first load ILOG to d4
+ sub.l %d7,%d4 # subtract off k
+ addq.l &1,%d4 # add in the 1
+len_ck:
+ tst.l %d4 # LEN check: branch on sign of LEN
+ ble.b LEN_ng # if neg, set LEN = 1
+ cmp.l %d4,&17 # test if LEN > 17
+ ble.b A7_str # if not, forget it
+ mov.l &17,%d4 # set max LEN = 17
+ tst.l %d7 # if negative, never set OPERR
+ ble.b A7_str # if positive, continue
+ or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
+ bra.b A7_str # finished here
+LEN_ng:
+ mov.l &1,%d4 # min LEN is 1
+
+
+# A7. Calculate SCALE.
+# SCALE is equal to 10^ISCALE, where ISCALE is the number
+# of decimal places needed to insure LEN integer digits
+# in the output before conversion to bcd. LAMBDA is the sign
+# of ISCALE, used in A9. Fp1 contains 10^^(abs(ISCALE)) using
+# the rounding mode as given in the following table (see
+# Coonen, p. 7.23 as ref.; however, the SCALE variable is
+# of opposite sign in bindec.sa from Coonen).
+#
+# Initial USE
+# FPCR[6:5] LAMBDA SIGN(X) FPCR[6:5]
+# ----------------------------------------------
+# RN 00 0 0 00/0 RN
+# RN 00 0 1 00/0 RN
+# RN 00 1 0 00/0 RN
+# RN 00 1 1 00/0 RN
+# RZ 01 0 0 11/3 RP
+# RZ 01 0 1 11/3 RP
+# RZ 01 1 0 10/2 RM
+# RZ 01 1 1 10/2 RM
+# RM 10 0 0 11/3 RP
+# RM 10 0 1 10/2 RM
+# RM 10 1 0 10/2 RM
+# RM 10 1 1 11/3 RP
+# RP 11 0 0 10/2 RM
+# RP 11 0 1 11/3 RP
+# RP 11 1 0 11/3 RP
+# RP 11 1 1 10/2 RM
+#
+# Register usage:
+# Input/Output
+# d0: exponent/scratch - final is 0
+# d2: x/0 or 24 for A9
+# d3: x/scratch - offset ptr into PTENRM array
+# d4: LEN/Unchanged
+# d5: 0/ICTR:LAMBDA
+# d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: x/ptr to PTENRM array
+# a2: x/x
+# fp0: float(ILOG)/Unchanged
+# fp1: x/10^ISCALE
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Abs(X) with $3fff exponent/Unchanged
+# L_SCR1:x/x
+# L_SCR2:first word of X packed/Unchanged
+
+A7_str:
+ tst.l %d7 # test sign of k
+ bgt.b k_pos # if pos and > 0, skip this
+ cmp.l %d7,%d6 # test k - ILOG
+ blt.b k_pos # if ILOG >= k, skip this
+ mov.l %d7,%d6 # if ((k<0) & (ILOG < k)) ILOG = k
+k_pos:
+ mov.l %d6,%d0 # calc ILOG + 1 - LEN in d0
+ addq.l &1,%d0 # add the 1
+ sub.l %d4,%d0 # sub off LEN
+ swap %d5 # use upper word of d5 for LAMBDA
+ clr.w %d5 # set it zero initially
+ clr.w %d2 # set up d2 for very small case
+ tst.l %d0 # test sign of ISCALE
+ bge.b iscale # if pos, skip next inst
+ addq.w &1,%d5 # if neg, set LAMBDA true
+ cmp.l %d0,&0xffffecd4 # test iscale <= -4908
+ bgt.b no_inf # if false, skip rest
+ add.l &24,%d0 # add in 24 to iscale
+ mov.l &24,%d2 # put 24 in d2 for A9
+no_inf:
+ neg.l %d0 # and take abs of ISCALE
+iscale:
+ fmov.s FONE(%pc),%fp1 # init fp1 to 1
+ bfextu USER_FPCR(%a6){&26:&2},%d1 # get initial rmode bits
+ lsl.w &1,%d1 # put them in bits 2:1
+ add.w %d5,%d1 # add in LAMBDA
+ lsl.w &1,%d1 # put them in bits 3:1
+ tst.l L_SCR2(%a6) # test sign of original x
+ bge.b x_pos # if pos, don't set bit 0
+ addq.l &1,%d1 # if neg, set bit 0
+x_pos:
+ lea.l RBDTBL(%pc),%a2 # load rbdtbl base
+ mov.b (%a2,%d1),%d3 # load d3 with new rmode
+ lsl.l &4,%d3 # put bits in proper position
+ fmov.l %d3,%fpcr # load bits into fpu
+ lsr.l &4,%d3 # put bits in proper position
+ tst.b %d3 # decode new rmode for pten table
+ bne.b not_rn # if zero, it is RN
+ lea.l PTENRN(%pc),%a1 # load a1 with RN table base
+ bra.b rmode # exit decode
+not_rn:
+ lsr.b &1,%d3 # get lsb in carry
+ bcc.b not_rp2 # if carry clear, it is RM
+ lea.l PTENRP(%pc),%a1 # load a1 with RP table base
+ bra.b rmode # exit decode
+not_rp2:
+ lea.l PTENRM(%pc),%a1 # load a1 with RM table base
+rmode:
+ clr.l %d3 # clr table index
+e_loop2:
+ lsr.l &1,%d0 # shift next bit into carry
+ bcc.b e_next2 # if zero, skip the mul
+ fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
+e_next2:
+ add.l &12,%d3 # inc d3 to next pwrten table entry
+ tst.l %d0 # test if ISCALE is zero
+ bne.b e_loop2 # if not, loop
+
+# A8. Clr INEX; Force RZ.
+# The operation in A3 above may have set INEX2.
+# RZ mode is forced for the scaling operation to insure
+# only one rounding error. The grs bits are collected in
+# the INEX flag for use in A10.
+#
+# Register usage:
+# Input/Output
+
+ fmov.l &0,%fpsr # clr INEX
+ fmov.l &rz_mode*0x10,%fpcr # set RZ rounding mode
+
+# A9. Scale X -> Y.
+# The mantissa is scaled to the desired number of significant
+# digits. The excess digits are collected in INEX2. If mul,
+# Check d2 for excess 10 exponential value. If not zero,
+# the iscale value would have caused the pwrten calculation
+# to overflow. Only a negative iscale can cause this, so
+# multiply by 10^(d2), which is now only allowed to be 24,
+# with a multiply by 10^8 and 10^16, which is exact since
+# 10^24 is exact. If the input was denormalized, we must
+# create a busy stack frame with the mul command and the
+# two operands, and allow the fpu to complete the multiply.
+#
+# Register usage:
+# Input/Output
+# d0: FPCR with RZ mode/Unchanged
+# d2: 0 or 24/unchanged
+# d3: x/x
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA
+# d6: ILOG/Unchanged
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: ptr to PTENRM array/Unchanged
+# a2: x/x
+# fp0: float(ILOG)/X adjusted for SCALE (Y)
+# fp1: 10^ISCALE/Unchanged
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Abs(X) with $3fff exponent/Unchanged
+# L_SCR1:x/x
+# L_SCR2:first word of X packed/Unchanged
+
+A9_str:
+ fmov.x (%a0),%fp0 # load X from memory
+ fabs.x %fp0 # use abs(X)
+ tst.w %d5 # LAMBDA is in lower word of d5
+ bne.b sc_mul # if neg (LAMBDA = 1), scale by mul
+ fdiv.x %fp1,%fp0 # calculate X / SCALE -> Y to fp0
+ bra.w A10_st # branch to A10
+
+sc_mul:
+ tst.b BINDEC_FLG(%a6) # check for denorm
+ beq.w A9_norm # if norm, continue with mul
+
+# for DENORM, we must calculate:
+# fp0 = input_op * 10^ISCALE * 10^24
+# since the input operand is a DENORM, we can't multiply it directly.
+# so, we do the multiplication of the exponents and mantissas separately.
+# in this way, we avoid underflow on intermediate stages of the
+# multiplication and guarantee a result without exception.
+ fmovm.x &0x2,-(%sp) # save 10^ISCALE to stack
+
+ mov.w (%sp),%d3 # grab exponent
+ andi.w &0x7fff,%d3 # clear sign
+ ori.w &0x8000,(%a0) # make DENORM exp negative
+ add.w (%a0),%d3 # add DENORM exp to 10^ISCALE exp
+ subi.w &0x3fff,%d3 # subtract BIAS
+ add.w 36(%a1),%d3
+ subi.w &0x3fff,%d3 # subtract BIAS
+ add.w 48(%a1),%d3
+ subi.w &0x3fff,%d3 # subtract BIAS
+
+ bmi.w sc_mul_err # is result is DENORM, punt!!!
+
+ andi.w &0x8000,(%sp) # keep sign
+ or.w %d3,(%sp) # insert new exponent
+ andi.w &0x7fff,(%a0) # clear sign bit on DENORM again
+ mov.l 0x8(%a0),-(%sp) # put input op mantissa on stk
+ mov.l 0x4(%a0),-(%sp)
+ mov.l &0x3fff0000,-(%sp) # force exp to zero
+ fmovm.x (%sp)+,&0x80 # load normalized DENORM into fp0
+ fmul.x (%sp)+,%fp0
+
+# fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
+# fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
+ mov.l 36+8(%a1),-(%sp) # get 10^8 mantissa
+ mov.l 36+4(%a1),-(%sp)
+ mov.l &0x3fff0000,-(%sp) # force exp to zero
+ mov.l 48+8(%a1),-(%sp) # get 10^16 mantissa
+ mov.l 48+4(%a1),-(%sp)
+ mov.l &0x3fff0000,-(%sp)# force exp to zero
+ fmul.x (%sp)+,%fp0 # multiply fp0 by 10^8
+ fmul.x (%sp)+,%fp0 # multiply fp0 by 10^16
+ bra.b A10_st
+
+sc_mul_err:
+ bra.b sc_mul_err
+
+A9_norm:
+ tst.w %d2 # test for small exp case
+ beq.b A9_con # if zero, continue as normal
+ fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
+ fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
+A9_con:
+ fmul.x %fp1,%fp0 # calculate X * SCALE -> Y to fp0
+
+# A10. Or in INEX.
+# If INEX is set, round error occurred. This is compensated
+# for by 'or-ing' in the INEX2 flag to the lsb of Y.
+#
+# Register usage:
+# Input/Output
+# d0: FPCR with RZ mode/FPSR with INEX2 isolated
+# d2: x/x
+# d3: x/x
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA
+# d6: ILOG/Unchanged
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: ptr to PTENxx array/Unchanged
+# a2: x/ptr to FP_SCR1(a6)
+# fp0: Y/Y with lsb adjusted
+# fp1: 10^ISCALE/Unchanged
+# fp2: x/x
+
+A10_st:
+ fmov.l %fpsr,%d0 # get FPSR
+ fmov.x %fp0,FP_SCR1(%a6) # move Y to memory
+ lea.l FP_SCR1(%a6),%a2 # load a2 with ptr to FP_SCR1
+ btst &9,%d0 # check if INEX2 set
+ beq.b A11_st # if clear, skip rest
+ or.l &1,8(%a2) # or in 1 to lsb of mantissa
+ fmov.x FP_SCR1(%a6),%fp0 # write adjusted Y back to fpu
+
+
+# A11. Restore original FPCR; set size ext.
+# Perform FINT operation in the user's rounding mode. Keep
+# the size to extended. The sintdo entry point in the sint
+# routine expects the FPCR value to be in USER_FPCR for
+# mode and precision. The original FPCR is saved in L_SCR1.
+
+A11_st:
+ mov.l USER_FPCR(%a6),L_SCR1(%a6) # save it for later
+ and.l &0x00000030,USER_FPCR(%a6) # set size to ext,
+# ;block exceptions
+
+
+# A12. Calculate YINT = FINT(Y) according to user's rounding mode.
+# The FPSP routine sintd0 is used. The output is in fp0.
+#
+# Register usage:
+# Input/Output
+# d0: FPSR with AINEX cleared/FPCR with size set to ext
+# d2: x/x/scratch
+# d3: x/x
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA/Unchanged
+# d6: ILOG/Unchanged
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/src ptr for sintdo
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
+# fp0: Y/YINT
+# fp1: 10^ISCALE/Unchanged
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Y adjusted for inex/Y with original exponent
+# L_SCR1:x/original USER_FPCR
+# L_SCR2:first word of X packed/Unchanged
+
+A12_st:
+ movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1}
+ mov.l L_SCR1(%a6),-(%sp)
+ mov.l L_SCR2(%a6),-(%sp)
+
+ lea.l FP_SCR1(%a6),%a0 # a0 is ptr to FP_SCR1(a6)
+ fmov.x %fp0,(%a0) # move Y to memory at FP_SCR1(a6)
+ tst.l L_SCR2(%a6) # test sign of original operand
+ bge.b do_fint12 # if pos, use Y
+ or.l &0x80000000,(%a0) # if neg, use -Y
+do_fint12:
+ mov.l USER_FPSR(%a6),-(%sp)
+# bsr sintdo # sint routine returns int in fp0
+
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmov.l &0x0,%fpsr # clear the AEXC bits!!!
+## mov.l USER_FPCR(%a6),%d0 # ext prec/keep rnd mode
+## andi.l &0x00000030,%d0
+## fmov.l %d0,%fpcr
+ fint.x FP_SCR1(%a6),%fp0 # do fint()
+ fmov.l %fpsr,%d0
+ or.w %d0,FPSR_EXCEPT(%a6)
+## fmov.l &0x0,%fpcr
+## fmov.l %fpsr,%d0 # don't keep ccodes
+## or.w %d0,FPSR_EXCEPT(%a6)
+
+ mov.b (%sp),USER_FPSR(%a6)
+ add.l &4,%sp
+
+ mov.l (%sp)+,L_SCR2(%a6)
+ mov.l (%sp)+,L_SCR1(%a6)
+ movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1}
+
+ mov.l L_SCR2(%a6),FP_SCR1(%a6) # restore original exponent
+ mov.l L_SCR1(%a6),USER_FPCR(%a6) # restore user's FPCR
+
+# A13. Check for LEN digits.
+# If the int operation results in more than LEN digits,
+# or less than LEN -1 digits, adjust ILOG and repeat from
+# A6. This test occurs only on the first pass. If the
+# result is exactly 10^LEN, decrement ILOG and divide
+# the mantissa by 10. The calculation of 10^LEN cannot
+# be inexact, since all powers of ten upto 10^27 are exact
+# in extended precision, so the use of a previous power-of-ten
+# table will introduce no error.
+#
+#
+# Register usage:
+# Input/Output
+# d0: FPCR with size set to ext/scratch final = 0
+# d2: x/x
+# d3: x/scratch final = x
+# d4: LEN/LEN adjusted
+# d5: ICTR:LAMBDA/LAMBDA:ICTR
+# d6: ILOG/ILOG adjusted
+# d7: k-factor/Unchanged
+# a0: pointer into memory for packed bcd string formation
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# fp0: int portion of Y/abs(YINT) adjusted
+# fp1: 10^ISCALE/Unchanged
+# fp2: x/10^LEN
+# F_SCR1:x/x
+# F_SCR2:Y with original exponent/Unchanged
+# L_SCR1:original USER_FPCR/Unchanged
+# L_SCR2:first word of X packed/Unchanged
+
+A13_st:
+ swap %d5 # put ICTR in lower word of d5
+ tst.w %d5 # check if ICTR = 0
+ bne not_zr # if non-zero, go to second test
+#
+# Compute 10^(LEN-1)
+#
+ fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
+ mov.l %d4,%d0 # put LEN in d0
+ subq.l &1,%d0 # d0 = LEN -1
+ clr.l %d3 # clr table index
+l_loop:
+ lsr.l &1,%d0 # shift next bit into carry
+ bcc.b l_next # if zero, skip the mul
+ fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
+l_next:
+ add.l &12,%d3 # inc d3 to next pwrten table entry
+ tst.l %d0 # test if LEN is zero
+ bne.b l_loop # if not, loop
+#
+# 10^LEN-1 is computed for this test and A14. If the input was
+# denormalized, check only the case in which YINT > 10^LEN.
+#
+ tst.b BINDEC_FLG(%a6) # check if input was norm
+ beq.b A13_con # if norm, continue with checking
+ fabs.x %fp0 # take abs of YINT
+ bra test_2
+#
+# Compare abs(YINT) to 10^(LEN-1) and 10^LEN
+#
+A13_con:
+ fabs.x %fp0 # take abs of YINT
+ fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^(LEN-1)
+ fbge.w test_2 # if greater, do next test
+ subq.l &1,%d6 # subtract 1 from ILOG
+ mov.w &1,%d5 # set ICTR
+ fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
+ fmul.s FTEN(%pc),%fp2 # compute 10^LEN
+ bra.w A6_str # return to A6 and recompute YINT
+test_2:
+ fmul.s FTEN(%pc),%fp2 # compute 10^LEN
+ fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^LEN
+ fblt.w A14_st # if less, all is ok, go to A14
+ fbgt.w fix_ex # if greater, fix and redo
+ fdiv.s FTEN(%pc),%fp0 # if equal, divide by 10
+ addq.l &1,%d6 # and inc ILOG
+ bra.b A14_st # and continue elsewhere
+fix_ex:
+ addq.l &1,%d6 # increment ILOG by 1
+ mov.w &1,%d5 # set ICTR
+ fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
+ bra.w A6_str # return to A6 and recompute YINT
+#
+# Since ICTR <> 0, we have already been through one adjustment,
+# and shouldn't have another; this is to check if abs(YINT) = 10^LEN
+# 10^LEN is again computed using whatever table is in a1 since the
+# value calculated cannot be inexact.
+#
+not_zr:
+ fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
+ mov.l %d4,%d0 # put LEN in d0
+ clr.l %d3 # clr table index
+z_loop:
+ lsr.l &1,%d0 # shift next bit into carry
+ bcc.b z_next # if zero, skip the mul
+ fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
+z_next:
+ add.l &12,%d3 # inc d3 to next pwrten table entry
+ tst.l %d0 # test if LEN is zero
+ bne.b z_loop # if not, loop
+ fabs.x %fp0 # get abs(YINT)
+ fcmp.x %fp0,%fp2 # check if abs(YINT) = 10^LEN
+ fbneq.w A14_st # if not, skip this
+ fdiv.s FTEN(%pc),%fp0 # divide abs(YINT) by 10
+ addq.l &1,%d6 # and inc ILOG by 1
+ addq.l &1,%d4 # and inc LEN
+ fmul.s FTEN(%pc),%fp2 # if LEN++, the get 10^^LEN
+
+# A14. Convert the mantissa to bcd.
+# The binstr routine is used to convert the LEN digit
+# mantissa to bcd in memory. The input to binstr is
+# to be a fraction; i.e. (mantissa)/10^LEN and adjusted
+# such that the decimal point is to the left of bit 63.
+# The bcd digits are stored in the correct position in
+# the final string area in memory.
+#
+#
+# Register usage:
+# Input/Output
+# d0: x/LEN call to binstr - final is 0
+# d1: x/0
+# d2: x/ms 32-bits of mant of abs(YINT)
+# d3: x/ls 32-bits of mant of abs(YINT)
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA/LAMBDA:ICTR
+# d6: ILOG
+# d7: k-factor/Unchanged
+# a0: pointer into memory for packed bcd string formation
+# /ptr to first mantissa byte in result string
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# fp0: int portion of Y/abs(YINT) adjusted
+# fp1: 10^ISCALE/Unchanged
+# fp2: 10^LEN/Unchanged
+# F_SCR1:x/Work area for final result
+# F_SCR2:Y with original exponent/Unchanged
+# L_SCR1:original USER_FPCR/Unchanged
+# L_SCR2:first word of X packed/Unchanged
+
+A14_st:
+ fmov.l &rz_mode*0x10,%fpcr # force rz for conversion
+ fdiv.x %fp2,%fp0 # divide abs(YINT) by 10^LEN
+ lea.l FP_SCR0(%a6),%a0
+ fmov.x %fp0,(%a0) # move abs(YINT)/10^LEN to memory
+ mov.l 4(%a0),%d2 # move 2nd word of FP_RES to d2
+ mov.l 8(%a0),%d3 # move 3rd word of FP_RES to d3
+ clr.l 4(%a0) # zero word 2 of FP_RES
+ clr.l 8(%a0) # zero word 3 of FP_RES
+ mov.l (%a0),%d0 # move exponent to d0
+ swap %d0 # put exponent in lower word
+ beq.b no_sft # if zero, don't shift
+ sub.l &0x3ffd,%d0 # sub bias less 2 to make fract
+ tst.l %d0 # check if > 1
+ bgt.b no_sft # if so, don't shift
+ neg.l %d0 # make exp positive
+m_loop:
+ lsr.l &1,%d2 # shift d2:d3 right, add 0s
+ roxr.l &1,%d3 # the number of places
+ dbf.w %d0,m_loop # given in d0
+no_sft:
+ tst.l %d2 # check for mantissa of zero
+ bne.b no_zr # if not, go on
+ tst.l %d3 # continue zero check
+ beq.b zer_m # if zero, go directly to binstr
+no_zr:
+ clr.l %d1 # put zero in d1 for addx
+ add.l &0x00000080,%d3 # inc at bit 7
+ addx.l %d1,%d2 # continue inc
+ and.l &0xffffff80,%d3 # strip off lsb not used by 882
+zer_m:
+ mov.l %d4,%d0 # put LEN in d0 for binstr call
+ addq.l &3,%a0 # a0 points to M16 byte in result
+ bsr binstr # call binstr to convert mant
+
+
+# A15. Convert the exponent to bcd.
+# As in A14 above, the exp is converted to bcd and the
+# digits are stored in the final string.
+#
+# Digits are stored in L_SCR1(a6) on return from BINDEC as:
+#
+# 32 16 15 0
+# -----------------------------------------
+# | 0 | e3 | e2 | e1 | e4 | X | X | X |
+# -----------------------------------------
+#
+# And are moved into their proper places in FP_SCR0. If digit e4
+# is non-zero, OPERR is signaled. In all cases, all 4 digits are
+# written as specified in the 881/882 manual for packed decimal.
+#
+# Register usage:
+# Input/Output
+# d0: x/LEN call to binstr - final is 0
+# d1: x/scratch (0);shift count for final exponent packing
+# d2: x/ms 32-bits of exp fraction/scratch
+# d3: x/ls 32-bits of exp fraction
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA/LAMBDA:ICTR
+# d6: ILOG
+# d7: k-factor/Unchanged
+# a0: ptr to result string/ptr to L_SCR1(a6)
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# fp0: abs(YINT) adjusted/float(ILOG)
+# fp1: 10^ISCALE/Unchanged
+# fp2: 10^LEN/Unchanged
+# F_SCR1:Work area for final result/BCD result
+# F_SCR2:Y with original exponent/ILOG/10^4
+# L_SCR1:original USER_FPCR/Exponent digits on return from binstr
+# L_SCR2:first word of X packed/Unchanged
+
+A15_st:
+ tst.b BINDEC_FLG(%a6) # check for denorm
+ beq.b not_denorm
+ ftest.x %fp0 # test for zero
+ fbeq.w den_zero # if zero, use k-factor or 4933
+ fmov.l %d6,%fp0 # float ILOG
+ fabs.x %fp0 # get abs of ILOG
+ bra.b convrt
+den_zero:
+ tst.l %d7 # check sign of the k-factor
+ blt.b use_ilog # if negative, use ILOG
+ fmov.s F4933(%pc),%fp0 # force exponent to 4933
+ bra.b convrt # do it
+use_ilog:
+ fmov.l %d6,%fp0 # float ILOG
+ fabs.x %fp0 # get abs of ILOG
+ bra.b convrt
+not_denorm:
+ ftest.x %fp0 # test for zero
+ fbneq.w not_zero # if zero, force exponent
+ fmov.s FONE(%pc),%fp0 # force exponent to 1
+ bra.b convrt # do it
+not_zero:
+ fmov.l %d6,%fp0 # float ILOG
+ fabs.x %fp0 # get abs of ILOG
+convrt:
+ fdiv.x 24(%a1),%fp0 # compute ILOG/10^4
+ fmov.x %fp0,FP_SCR1(%a6) # store fp0 in memory
+ mov.l 4(%a2),%d2 # move word 2 to d2
+ mov.l 8(%a2),%d3 # move word 3 to d3
+ mov.w (%a2),%d0 # move exp to d0
+ beq.b x_loop_fin # if zero, skip the shift
+ sub.w &0x3ffd,%d0 # subtract off bias
+ neg.w %d0 # make exp positive
+x_loop:
+ lsr.l &1,%d2 # shift d2:d3 right
+ roxr.l &1,%d3 # the number of places
+ dbf.w %d0,x_loop # given in d0
+x_loop_fin:
+ clr.l %d1 # put zero in d1 for addx
+ add.l &0x00000080,%d3 # inc at bit 6
+ addx.l %d1,%d2 # continue inc
+ and.l &0xffffff80,%d3 # strip off lsb not used by 882
+ mov.l &4,%d0 # put 4 in d0 for binstr call
+ lea.l L_SCR1(%a6),%a0 # a0 is ptr to L_SCR1 for exp digits
+ bsr binstr # call binstr to convert exp
+ mov.l L_SCR1(%a6),%d0 # load L_SCR1 lword to d0
+ mov.l &12,%d1 # use d1 for shift count
+ lsr.l %d1,%d0 # shift d0 right by 12
+ bfins %d0,FP_SCR0(%a6){&4:&12} # put e3:e2:e1 in FP_SCR0
+ lsr.l %d1,%d0 # shift d0 right by 12
+ bfins %d0,FP_SCR0(%a6){&16:&4} # put e4 in FP_SCR0
+ tst.b %d0 # check if e4 is zero
+ beq.b A16_st # if zero, skip rest
+ or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
+
+
+# A16. Write sign bits to final string.
+# Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
+#
+# Register usage:
+# Input/Output
+# d0: x/scratch - final is x
+# d2: x/x
+# d3: x/x
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA/LAMBDA:ICTR
+# d6: ILOG/ILOG adjusted
+# d7: k-factor/Unchanged
+# a0: ptr to L_SCR1(a6)/Unchanged
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# fp0: float(ILOG)/Unchanged
+# fp1: 10^ISCALE/Unchanged
+# fp2: 10^LEN/Unchanged
+# F_SCR1:BCD result with correct signs
+# F_SCR2:ILOG/10^4
+# L_SCR1:Exponent digits on return from binstr
+# L_SCR2:first word of X packed/Unchanged
+
+A16_st:
+ clr.l %d0 # clr d0 for collection of signs
+ and.b &0x0f,FP_SCR0(%a6) # clear first nibble of FP_SCR0
+ tst.l L_SCR2(%a6) # check sign of original mantissa
+ bge.b mant_p # if pos, don't set SM
+ mov.l &2,%d0 # move 2 in to d0 for SM
+mant_p:
+ tst.l %d6 # check sign of ILOG
+ bge.b wr_sgn # if pos, don't set SE
+ addq.l &1,%d0 # set bit 0 in d0 for SE
+wr_sgn:
+ bfins %d0,FP_SCR0(%a6){&0:&2} # insert SM and SE into FP_SCR0
+
+# Clean up and restore all registers used.
+
+ fmov.l &0,%fpsr # clear possible inex2/ainex bits
+ fmovm.x (%sp)+,&0xe0 # {%fp0-%fp2}
+ movm.l (%sp)+,&0x4fc # {%d2-%d7/%a2}
+ rts
+
+ global PTENRN
+PTENRN:
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
+
+ global PTENRP
+PTENRP:
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
+
+ global PTENRM
+PTENRM:
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
+
+#########################################################################
+# binstr(): Converts a 64-bit binary integer to bcd. #
+# #
+# INPUT *************************************************************** #
+# d2:d3 = 64-bit binary integer #
+# d0 = desired length (LEN) #
+# a0 = pointer to start in memory for bcd characters #
+# (This pointer must point to byte 4 of the first #
+# lword of the packed decimal memory string.) #
+# #
+# OUTPUT ************************************************************** #
+# a0 = pointer to LEN bcd digits representing the 64-bit integer. #
+# #
+# ALGORITHM *********************************************************** #
+# The 64-bit binary is assumed to have a decimal point before #
+# bit 63. The fraction is multiplied by 10 using a mul by 2 #
+# shift and a mul by 8 shift. The bits shifted out of the #
+# msb form a decimal digit. This process is iterated until #
+# LEN digits are formed. #
+# #
+# A1. Init d7 to 1. D7 is the byte digit counter, and if 1, the #
+# digit formed will be assumed the least significant. This is #
+# to force the first byte formed to have a 0 in the upper 4 bits. #
+# #
+# A2. Beginning of the loop: #
+# Copy the fraction in d2:d3 to d4:d5. #
+# #
+# A3. Multiply the fraction in d2:d3 by 8 using bit-field #
+# extracts and shifts. The three msbs from d2 will go into d1. #
+# #
+# A4. Multiply the fraction in d4:d5 by 2 using shifts. The msb #
+# will be collected by the carry. #
+# #
+# A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5 #
+# into d2:d3. D1 will contain the bcd digit formed. #
+# #
+# A6. Test d7. If zero, the digit formed is the ms digit. If non- #
+# zero, it is the ls digit. Put the digit in its place in the #
+# upper word of d0. If it is the ls digit, write the word #
+# from d0 to memory. #
+# #
+# A7. Decrement d6 (LEN counter) and repeat the loop until zero. #
+# #
+#########################################################################
+
+# Implementation Notes:
+#
+# The registers are used as follows:
+#
+# d0: LEN counter
+# d1: temp used to form the digit
+# d2: upper 32-bits of fraction for mul by 8
+# d3: lower 32-bits of fraction for mul by 8
+# d4: upper 32-bits of fraction for mul by 2
+# d5: lower 32-bits of fraction for mul by 2
+# d6: temp for bit-field extracts
+# d7: byte digit formation word;digit count {0,1}
+# a0: pointer into memory for packed bcd string formation
+#
+
+ global binstr
+binstr:
+ movm.l &0xff00,-(%sp) # {%d0-%d7}
+
+#
+# A1: Init d7
+#
+ mov.l &1,%d7 # init d7 for second digit
+ subq.l &1,%d0 # for dbf d0 would have LEN+1 passes
+#
+# A2. Copy d2:d3 to d4:d5. Start loop.
+#
+loop:
+ mov.l %d2,%d4 # copy the fraction before muls
+ mov.l %d3,%d5 # to d4:d5
+#
+# A3. Multiply d2:d3 by 8; extract msbs into d1.
+#
+ bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1
+ asl.l &3,%d2 # shift d2 left by 3 places
+ bfextu %d3{&0:&3},%d6 # copy 3 msbs of d3 into d6
+ asl.l &3,%d3 # shift d3 left by 3 places
+ or.l %d6,%d2 # or in msbs from d3 into d2
+#
+# A4. Multiply d4:d5 by 2; add carry out to d1.
+#
+ asl.l &1,%d5 # mul d5 by 2
+ roxl.l &1,%d4 # mul d4 by 2
+ swap %d6 # put 0 in d6 lower word
+ addx.w %d6,%d1 # add in extend from mul by 2
+#
+# A5. Add mul by 8 to mul by 2. D1 contains the digit formed.
+#
+ add.l %d5,%d3 # add lower 32 bits
+ nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
+ addx.l %d4,%d2 # add with extend upper 32 bits
+ nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
+ addx.w %d6,%d1 # add in extend from add to d1
+ swap %d6 # with d6 = 0; put 0 in upper word
+#
+# A6. Test d7 and branch.
+#
+ tst.w %d7 # if zero, store digit & to loop
+ beq.b first_d # if non-zero, form byte & write
+sec_d:
+ swap %d7 # bring first digit to word d7b
+ asl.w &4,%d7 # first digit in upper 4 bits d7b
+ add.w %d1,%d7 # add in ls digit to d7b
+ mov.b %d7,(%a0)+ # store d7b byte in memory
+ swap %d7 # put LEN counter in word d7a
+ clr.w %d7 # set d7a to signal no digits done
+ dbf.w %d0,loop # do loop some more!
+ bra.b end_bstr # finished, so exit
+first_d:
+ swap %d7 # put digit word in d7b
+ mov.w %d1,%d7 # put new digit in d7b
+ swap %d7 # put LEN counter in word d7a
+ addq.w &1,%d7 # set d7a to signal first digit done
+ dbf.w %d0,loop # do loop some more!
+ swap %d7 # put last digit in string
+ lsl.w &4,%d7 # move it to upper 4 bits
+ mov.b %d7,(%a0)+ # store it in memory string
+#
+# Clean up and return with result in fp0.
+#
+end_bstr:
+ movm.l (%sp)+,&0xff # {%d0-%d7}
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# facc_in_b(): dmem_read_byte failed #
+# facc_in_w(): dmem_read_word failed #
+# facc_in_l(): dmem_read_long failed #
+# facc_in_d(): dmem_read of dbl prec failed #
+# facc_in_x(): dmem_read of ext prec failed #
+# #
+# facc_out_b(): dmem_write_byte failed #
+# facc_out_w(): dmem_write_word failed #
+# facc_out_l(): dmem_write_long failed #
+# facc_out_d(): dmem_write of dbl prec failed #
+# facc_out_x(): dmem_write of ext prec failed #
+# #
+# XREF **************************************************************** #
+# _real_access() - exit through access error handler #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# None #
+# #
+# ALGORITHM *********************************************************** #
+# Flow jumps here when an FP data fetch call gets an error #
+# result. This means the operating system wants an access error frame #
+# made out of the current exception stack frame. #
+# So, we first call restore() which makes sure that any updated #
+# -(an)+ register gets returned to its pre-exception value and then #
+# we change the stack to an access error stack frame. #
+# #
+#########################################################################
+
+facc_in_b:
+ movq.l &0x1,%d0 # one byte
+ bsr.w restore # fix An
+
+ mov.w &0x0121,EXC_VOFF(%a6) # set FSLW
+ bra.w facc_finish
+
+facc_in_w:
+ movq.l &0x2,%d0 # two bytes
+ bsr.w restore # fix An
+
+ mov.w &0x0141,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_in_l:
+ movq.l &0x4,%d0 # four bytes
+ bsr.w restore # fix An
+
+ mov.w &0x0101,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_in_d:
+ movq.l &0x8,%d0 # eight bytes
+ bsr.w restore # fix An
+
+ mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_in_x:
+ movq.l &0xc,%d0 # twelve bytes
+ bsr.w restore # fix An
+
+ mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+################################################################
+
+facc_out_b:
+ movq.l &0x1,%d0 # one byte
+ bsr.w restore # restore An
+
+ mov.w &0x00a1,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_out_w:
+ movq.l &0x2,%d0 # two bytes
+ bsr.w restore # restore An
+
+ mov.w &0x00c1,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_out_l:
+ movq.l &0x4,%d0 # four bytes
+ bsr.w restore # restore An
+
+ mov.w &0x0081,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_out_d:
+ movq.l &0x8,%d0 # eight bytes
+ bsr.w restore # restore An
+
+ mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_out_x:
+ mov.l &0xc,%d0 # twelve bytes
+ bsr.w restore # restore An
+
+ mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
+
+# here's where we actually create the access error frame from the
+# current exception stack frame.
+facc_finish:
+ mov.l USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ mov.l (%sp),-(%sp) # store SR, hi(PC)
+ mov.l 0x8(%sp),0x4(%sp) # store lo(PC)
+ mov.l 0xc(%sp),0x8(%sp) # store EA
+ mov.l &0x00000001,0xc(%sp) # store FSLW
+ mov.w 0x6(%sp),0xc(%sp) # fix FSLW (size)
+ mov.w &0x4008,0x6(%sp) # store voff
+
+ btst &0x5,(%sp) # supervisor or user mode?
+ beq.b facc_out2 # user
+ bset &0x2,0xd(%sp) # set supervisor TM bit
+
+facc_out2:
+ bra.l _real_access
+
+##################################################################
+
+# if the effective addressing mode was predecrement or postincrement,
+# the emulation has already changed its value to the correct post-
+# instruction value. but since we're exiting to the access error
+# handler, then AN must be returned to its pre-instruction value.
+# we do that here.
+restore:
+ mov.b EXC_OPWORD+0x1(%a6),%d1
+ andi.b &0x38,%d1 # extract opmode
+ cmpi.b %d1,&0x18 # postinc?
+ beq.w rest_inc
+ cmpi.b %d1,&0x20 # predec?
+ beq.w rest_dec
+ rts
+
+rest_inc:
+ mov.b EXC_OPWORD+0x1(%a6),%d1
+ andi.w &0x0007,%d1 # fetch An
+
+ mov.w (tbl_rest_inc.b,%pc,%d1.w*2),%d1
+ jmp (tbl_rest_inc.b,%pc,%d1.w*1)
+
+tbl_rest_inc:
+ short ri_a0 - tbl_rest_inc
+ short ri_a1 - tbl_rest_inc
+ short ri_a2 - tbl_rest_inc
+ short ri_a3 - tbl_rest_inc
+ short ri_a4 - tbl_rest_inc
+ short ri_a5 - tbl_rest_inc
+ short ri_a6 - tbl_rest_inc
+ short ri_a7 - tbl_rest_inc
+
+ri_a0:
+ sub.l %d0,EXC_DREGS+0x8(%a6) # fix stacked a0
+ rts
+ri_a1:
+ sub.l %d0,EXC_DREGS+0xc(%a6) # fix stacked a1
+ rts
+ri_a2:
+ sub.l %d0,%a2 # fix a2
+ rts
+ri_a3:
+ sub.l %d0,%a3 # fix a3
+ rts
+ri_a4:
+ sub.l %d0,%a4 # fix a4
+ rts
+ri_a5:
+ sub.l %d0,%a5 # fix a5
+ rts
+ri_a6:
+ sub.l %d0,(%a6) # fix stacked a6
+ rts
+# if it's a fmove out instruction, we don't have to fix a7
+# because we hadn't changed it yet. if it's an opclass two
+# instruction (data moved in) and the exception was in supervisor
+# mode, then also also wasn't updated. if it was user mode, then
+# restore the correct a7 which is in the USP currently.
+ri_a7:
+ cmpi.b EXC_VOFF(%a6),&0x30 # move in or out?
+ bne.b ri_a7_done # out
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor?
+ bne.b ri_a7_done # supervisor
+ movc %usp,%a0 # restore USP
+ sub.l %d0,%a0
+ movc %a0,%usp
+ri_a7_done:
+ rts
+
+# need to invert adjustment value if the <ea> was predec
+rest_dec:
+ neg.l %d0
+ bra.b rest_inc
diff --git a/arch/m68k/ifpsp060/src/ftest.S b/arch/m68k/ifpsp060/src/ftest.S
new file mode 100644
index 00000000000..2edcbae0fd5
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/ftest.S
@@ -0,0 +1,1456 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#############################################
+set SREGS, -64
+set IREGS, -128
+set IFPREGS, -224
+set SFPREGS, -320
+set IFPCREGS, -332
+set SFPCREGS, -344
+set ICCR, -346
+set SCCR, -348
+set TESTCTR, -352
+set DATA, -384
+
+#############################################
+TESTTOP:
+ bra.l _060TESTS_
+ short 0x0000
+
+ bra.l _060TESTS_unimp
+ short 0x0000
+
+ bra.l _060TESTS_enable
+ short 0x0000
+
+start_str:
+ string "Testing 68060 FPSP started:\n"
+
+start_str_unimp:
+ string "Testing 68060 FPSP unimplemented instruction started:\n"
+
+start_str_enable:
+ string "Testing 68060 FPSP exception enabled started:\n"
+
+pass_str:
+ string "passed\n"
+
+fail_str:
+ string " failed\n"
+
+ align 0x4
+chk_test:
+ tst.l %d0
+ bne.b test_fail
+test_pass:
+ pea pass_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+ rts
+test_fail:
+ mov.l %d1,-(%sp)
+ bsr.l _print_num
+ addq.l &0x4,%sp
+
+ pea fail_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+ rts
+
+#############################################
+_060TESTS_:
+ link %a6,&-384
+
+ movm.l &0x3f3c,-(%sp)
+ fmovm.x &0xff,-(%sp)
+
+ pea start_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+### effadd
+ clr.l TESTCTR(%a6)
+ pea effadd_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l effadd_0
+
+ bsr.l chk_test
+
+### unsupp
+ clr.l TESTCTR(%a6)
+ pea unsupp_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l unsupp_0
+
+ bsr.l chk_test
+
+### ovfl non-maskable
+ clr.l TESTCTR(%a6)
+ pea ovfl_nm_str(%pc)
+ bsr.l _print_str
+ bsr.l ovfl_nm_0
+
+ bsr.l chk_test
+
+### unfl non-maskable
+ clr.l TESTCTR(%a6)
+ pea unfl_nm_str(%pc)
+ bsr.l _print_str
+ bsr.l unfl_nm_0
+
+ bsr.l chk_test
+
+ movm.l (%sp)+,&0x3cfc
+ fmovm.x (%sp)+,&0xff
+
+ unlk %a6
+ rts
+
+_060TESTS_unimp:
+ link %a6,&-384
+
+ movm.l &0x3f3c,-(%sp)
+ fmovm.x &0xff,-(%sp)
+
+ pea start_str_unimp(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+### unimp
+ clr.l TESTCTR(%a6)
+ pea unimp_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l unimp_0
+
+ bsr.l chk_test
+
+ movm.l (%sp)+,&0x3cfc
+ fmovm.x (%sp)+,&0xff
+
+ unlk %a6
+ rts
+
+_060TESTS_enable:
+ link %a6,&-384
+
+ movm.l &0x3f3c,-(%sp)
+ fmovm.x &0xff,-(%sp)
+
+ pea start_str_enable(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+### snan
+ clr.l TESTCTR(%a6)
+ pea snan_str(%pc)
+ bsr.l _print_str
+ bsr.l snan_0
+
+ bsr.l chk_test
+
+### operr
+ clr.l TESTCTR(%a6)
+ pea operr_str(%pc)
+ bsr.l _print_str
+ bsr.l operr_0
+
+ bsr.l chk_test
+
+### ovfl
+ clr.l TESTCTR(%a6)
+ pea ovfl_str(%pc)
+ bsr.l _print_str
+ bsr.l ovfl_0
+
+ bsr.l chk_test
+
+### unfl
+ clr.l TESTCTR(%a6)
+ pea unfl_str(%pc)
+ bsr.l _print_str
+ bsr.l unfl_0
+
+ bsr.l chk_test
+
+### dz
+ clr.l TESTCTR(%a6)
+ pea dz_str(%pc)
+ bsr.l _print_str
+ bsr.l dz_0
+
+ bsr.l chk_test
+
+### inexact
+ clr.l TESTCTR(%a6)
+ pea inex_str(%pc)
+ bsr.l _print_str
+ bsr.l inex_0
+
+ bsr.l chk_test
+
+ movm.l (%sp)+,&0x3cfc
+ fmovm.x (%sp)+,&0xff
+
+ unlk %a6
+ rts
+
+#############################################
+#############################################
+
+unimp_str:
+ string "\tUnimplemented FP instructions..."
+
+ align 0x4
+unimp_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0x40000000,DATA+0x0(%a6)
+ mov.l &0xc90fdaa2,DATA+0x4(%a6)
+ mov.l &0x2168c235,DATA+0x8(%a6)
+
+ mov.w &0x0000,%cc
+unimp_0_pc:
+ fsin.x DATA(%a6),%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0xbfbf0000,IFPREGS+0x0(%a6)
+ mov.l &0x80000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x08000208,IFPCREGS+0x4(%a6)
+ lea unimp_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+unimp_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0x3ffe0000,DATA+0x0(%a6)
+ mov.l &0xc90fdaa2,DATA+0x4(%a6)
+ mov.l &0x2168c235,DATA+0x8(%a6)
+
+ mov.w &0x0000,%cc
+unimp_1_pc:
+ ftan.x DATA(%a6),%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x3fff0000,IFPREGS+0x0(%a6)
+ mov.l &0x80000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x00000208,IFPCREGS+0x4(%a6)
+ lea unimp_1_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+# fmovecr
+unimp_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.w &0x0000,%cc
+unimp_2_pc:
+ fmovcr.x &0x31,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x40000000,IFPREGS+0x0(%a6)
+ mov.l &0x935d8ddd,IFPREGS+0x4(%a6)
+ mov.l &0xaaa8ac17,IFPREGS+0x8(%a6)
+ mov.l &0x00000208,IFPCREGS+0x4(%a6)
+ lea unimp_2_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+# fscc
+unimp_3:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ fmov.l &0x0f000000,%fpsr
+ mov.l &0x00,%d7
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.w &0x0000,%cc
+unimp_3_pc:
+ fsgt %d7
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+ mov.l &0x0f008080,IFPCREGS+0x4(%a6)
+ lea unimp_3_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+# fdbcc
+unimp_4:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ fmov.l &0x0f000000,%fpsr
+ mov.l &0x2,%d7
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.w &0x0000,%cc
+unimp_4_pc:
+ fdbgt.w %d7,unimp_4_pc
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+ mov.w &0xffff,IREGS+28+2(%a6)
+ mov.l &0x0f008080,IFPCREGS+0x4(%a6)
+ lea unimp_4_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+# ftrapcc
+unimp_5:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ fmov.l &0x0f000000,%fpsr
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.w &0x0000,%cc
+unimp_5_pc:
+ ftpgt.l &0xabcdef01
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+ mov.l &0x0f008080,IFPCREGS+0x4(%a6)
+ lea unimp_5_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+#############################################
+
+effadd_str:
+ string "\tUnimplemented <ea>..."
+
+ align 0x4
+effadd_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ fmov.b &0x2,%fp0
+
+ mov.w &0x0000,%cc
+effadd_0_pc:
+ fmul.x &0xc00000008000000000000000,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0xc0010000,IFPREGS+0x0(%a6)
+ mov.l &0x80000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x08000000,IFPCREGS+0x4(%a6)
+ lea effadd_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+effadd_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.w &0x0000,%cc
+effadd_1_pc:
+ fabs.p &0xc12300012345678912345678,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x3e660000,IFPREGS+0x0(%a6)
+ mov.l &0xd0ed23e8,IFPREGS+0x4(%a6)
+ mov.l &0xd14035bc,IFPREGS+0x8(%a6)
+ mov.l &0x00000108,IFPCREGS+0x4(%a6)
+ lea effadd_1_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+fmovml_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ fmovm.l &0xffffffffffffffff,%fpcr,%fpsr
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+ mov.l &0x0000fff0,IFPCREGS+0x0(%a6)
+ mov.l &0x0ffffff8,IFPCREGS+0x4(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+fmovml_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ fmovm.l &0xffffffffffffffff,%fpcr,%fpiar
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+ mov.l &0x0000fff0,IFPCREGS+0x0(%a6)
+ mov.l &0xffffffff,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+fmovml_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ fmovm.l &0xffffffffffffffff,%fpsr,%fpiar
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+ mov.l &0x0ffffff8,IFPCREGS+0x4(%a6)
+ mov.l &0xffffffff,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+fmovml_3:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ fmovm.l &0xffffffffffffffffffffffff,%fpcr,%fpsr,%fpiar
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+ mov.l &0x0000fff0,IFPCREGS+0x0(%a6)
+ mov.l &0x0ffffff8,IFPCREGS+0x4(%a6)
+ mov.l &0xffffffff,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+# fmovmx dynamic
+fmovmx_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ fmov.b &0x1,%fp0
+ fmov.b &0x2,%fp1
+ fmov.b &0x3,%fp2
+ fmov.b &0x4,%fp3
+ fmov.b &0x5,%fp4
+ fmov.b &0x6,%fp5
+ fmov.b &0x7,%fp6
+ fmov.b &0x8,%fp7
+
+ fmov.l &0x0,%fpiar
+ mov.l &0xffffffaa,%d0
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0xffff,IREGS(%a6)
+
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+
+ mov.w &0x0000,%cc
+
+ fmovm.x %d0,-(%sp)
+
+ mov.w %cc,SCCR(%a6)
+
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ fmov.s &0x7f800000,%fp1
+ fmov.s &0x7f800000,%fp3
+ fmov.s &0x7f800000,%fp5
+ fmov.s &0x7f800000,%fp7
+
+ fmov.x (%sp)+,%fp1
+ fmov.x (%sp)+,%fp3
+ fmov.x (%sp)+,%fp5
+ fmov.x (%sp)+,%fp7
+
+ movm.l &0xffff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+fmovmx_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ fmov.b &0x1,%fp0
+ fmov.b &0x2,%fp1
+ fmov.b &0x3,%fp2
+ fmov.b &0x4,%fp3
+ fmov.b &0x5,%fp4
+ fmov.b &0x6,%fp5
+ fmov.b &0x7,%fp6
+ fmov.b &0x8,%fp7
+
+ fmov.x %fp6,-(%sp)
+ fmov.x %fp4,-(%sp)
+ fmov.x %fp2,-(%sp)
+ fmov.x %fp0,-(%sp)
+
+ fmovm.x &0xff,IFPREGS(%a6)
+
+ fmov.s &0x7f800000,%fp6
+ fmov.s &0x7f800000,%fp4
+ fmov.s &0x7f800000,%fp2
+ fmov.s &0x7f800000,%fp0
+
+ fmov.l &0x0,%fpiar
+ fmov.l &0x0,%fpsr
+ mov.l &0xffffffaa,%d0
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0xffff,IREGS(%a6)
+
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.w &0x0000,%cc
+
+ fmovm.x (%sp)+,%d0
+
+ mov.w %cc,SCCR(%a6)
+
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ movm.l &0xffff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+fmovmx_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ fmov.b &0x1,%fp0
+ fmov.b &0x2,%fp1
+ fmov.b &0x3,%fp2
+ fmov.b &0x4,%fp3
+ fmov.b &0x5,%fp4
+ fmov.b &0x6,%fp5
+ fmov.b &0x7,%fp6
+ fmov.b &0x8,%fp7
+
+ fmov.l &0x0,%fpiar
+ mov.l &0xffffff00,%d0
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0xffff,IREGS(%a6)
+
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+
+ mov.w &0x0000,%cc
+
+ fmovm.x %d0,-(%sp)
+
+ mov.w %cc,SCCR(%a6)
+
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ movm.l &0xffff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+###########################################################
+
+# This test will take a non-maskable overflow directly.
+ovfl_nm_str:
+ string "\tNon-maskable overflow..."
+
+ align 0x4
+ovfl_nm_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ fmov.b &0x2,%fp0
+ mov.l &0x7ffe0000,DATA+0x0(%a6)
+ mov.l &0x80000000,DATA+0x4(%a6)
+ mov.l &0x00000000,DATA+0x8(%a6)
+
+ mov.w &0x0000,%cc
+ovfl_nm_0_pc:
+ fmul.x DATA(%a6),%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x7fff0000,IFPREGS+0x0(%a6)
+ mov.l &0x00000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x02001048,IFPCREGS+0x4(%a6)
+ lea ovfl_nm_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+###########################################################
+
+# This test will take an overflow directly.
+ovfl_str:
+ string "\tEnabled overflow..."
+
+ align 0x4
+ovfl_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmov.l &0x00001000,%fpcr
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ fmov.b &0x2,%fp0
+ mov.l &0x7ffe0000,DATA+0x0(%a6)
+ mov.l &0x80000000,DATA+0x4(%a6)
+ mov.l &0x00000000,DATA+0x8(%a6)
+
+ mov.w &0x0000,%cc
+ovfl_0_pc:
+ fmul.x DATA(%a6),%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x7fff0000,IFPREGS+0x0(%a6)
+ mov.l &0x00000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x02001048,IFPCREGS+0x4(%a6)
+ lea ovfl_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+#####################################################################
+
+# This test will take an underflow directly.
+unfl_str:
+ string "\tEnabled underflow..."
+
+ align 0x4
+unfl_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmov.l &0x00000800,%fpcr
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0x00000000,DATA+0x0(%a6)
+ mov.l &0x80000000,DATA+0x4(%a6)
+ mov.l &0x00000000,DATA+0x8(%a6)
+ fmovm.x DATA(%a6),&0x80
+
+ mov.w &0x0000,%cc
+unfl_0_pc:
+ fdiv.b &0x2,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x00000000,IFPREGS+0x0(%a6)
+ mov.l &0x40000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x00000800,IFPCREGS+0x4(%a6)
+ lea unfl_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+#####################################################################
+
+# This test will take a non-maskable underflow directly.
+unfl_nm_str:
+ string "\tNon-maskable underflow..."
+
+ align 0x4
+unfl_nm_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0x00000000,DATA+0x0(%a6)
+ mov.l &0x80000000,DATA+0x4(%a6)
+ mov.l &0x00000000,DATA+0x8(%a6)
+ fmovm.x DATA(%a6),&0x80
+
+ mov.w &0x0000,%cc
+unfl_nm_0_pc:
+ fdiv.b &0x2,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x00000000,IFPREGS+0x0(%a6)
+ mov.l &0x40000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x00000800,IFPCREGS+0x4(%a6)
+ lea unfl_nm_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+#####################################################################
+
+inex_str:
+ string "\tEnabled inexact..."
+
+ align 0x4
+inex_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmov.l &0x00000200,%fpcr # enable inexact
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0x50000000,DATA+0x0(%a6)
+ mov.l &0x80000000,DATA+0x4(%a6)
+ mov.l &0x00000000,DATA+0x8(%a6)
+ fmovm.x DATA(%a6),&0x80
+
+ mov.w &0x0000,%cc
+inex_0_pc:
+ fadd.b &0x2,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x50000000,IFPREGS+0x0(%a6)
+ mov.l &0x80000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x00000208,IFPCREGS+0x4(%a6)
+ lea inex_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+#####################################################################
+
+snan_str:
+ string "\tEnabled SNAN..."
+
+ align 0x4
+snan_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmov.l &0x00004000,%fpcr # enable SNAN
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0xffff0000,DATA+0x0(%a6)
+ mov.l &0x00000000,DATA+0x4(%a6)
+ mov.l &0x00000001,DATA+0x8(%a6)
+ fmovm.x DATA(%a6),&0x80
+
+ mov.w &0x0000,%cc
+snan_0_pc:
+ fadd.b &0x2,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0xffff0000,IFPREGS+0x0(%a6)
+ mov.l &0x00000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000001,IFPREGS+0x8(%a6)
+ mov.l &0x09004080,IFPCREGS+0x4(%a6)
+ lea snan_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+#####################################################################
+
+operr_str:
+ string "\tEnabled OPERR..."
+
+ align 0x4
+operr_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmov.l &0x00002000,%fpcr # enable OPERR
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0xffff0000,DATA+0x0(%a6)
+ mov.l &0x00000000,DATA+0x4(%a6)
+ mov.l &0x00000000,DATA+0x8(%a6)
+ fmovm.x DATA(%a6),&0x80
+
+ mov.w &0x0000,%cc
+operr_0_pc:
+ fadd.s &0x7f800000,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0xffff0000,IFPREGS+0x0(%a6)
+ mov.l &0x00000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x01002080,IFPCREGS+0x4(%a6)
+ lea operr_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+#####################################################################
+
+dz_str:
+ string "\tEnabled DZ..."
+
+ align 0x4
+dz_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmov.l &0x00000400,%fpcr # enable DZ
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0x40000000,DATA+0x0(%a6)
+ mov.l &0x80000000,DATA+0x4(%a6)
+ mov.l &0x00000000,DATA+0x8(%a6)
+ fmovm.x DATA(%a6),&0x80
+
+ mov.w &0x0000,%cc
+dz_0_pc:
+ fdiv.b &0x0,%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x40000000,IFPREGS+0x0(%a6)
+ mov.l &0x80000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x02000410,IFPCREGS+0x4(%a6)
+ lea dz_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+#####################################################################
+
+unsupp_str:
+ string "\tUnimplemented data type/format..."
+
+# an unnormalized number
+ align 0x4
+unsupp_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0xc03f0000,DATA+0x0(%a6)
+ mov.l &0x00000000,DATA+0x4(%a6)
+ mov.l &0x00000001,DATA+0x8(%a6)
+ fmov.b &0x2,%fp0
+ mov.w &0x0000,%cc
+unsupp_0_pc:
+ fmul.x DATA(%a6),%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0xc0010000,IFPREGS+0x0(%a6)
+ mov.l &0x80000000,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x08000000,IFPCREGS+0x4(%a6)
+ lea unsupp_0_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+# a denormalized number
+unsupp_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0x80000000,DATA+0x0(%a6)
+ mov.l &0x01000000,DATA+0x4(%a6)
+ mov.l &0x00000000,DATA+0x8(%a6)
+ fmov.l &0x7fffffff,%fp0
+
+ mov.w &0x0000,%cc
+unsupp_1_pc:
+ fmul.x DATA(%a6),%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x80170000,IFPREGS+0x0(%a6)
+ mov.l &0xfffffffe,IFPREGS+0x4(%a6)
+ mov.l &0x00000000,IFPREGS+0x8(%a6)
+ mov.l &0x08000000,IFPCREGS+0x4(%a6)
+ lea unsupp_1_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+# packed
+unsupp_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ fmovm.x DEF_FPREGS(%pc),&0xff
+ fmovm.l DEF_FPCREGS(%pc),%fpcr,%fpsr,%fpiar
+
+ mov.w &0x0000,ICCR(%a6)
+ movm.l &0x7fff,IREGS(%a6)
+ fmovm.x &0xff,IFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,IFPCREGS(%a6)
+
+ mov.l &0xc1230001,DATA+0x0(%a6)
+ mov.l &0x23456789,DATA+0x4(%a6)
+ mov.l &0x12345678,DATA+0x8(%a6)
+
+ mov.w &0x0000,%cc
+unsupp_2_pc:
+ fabs.p DATA(%a6),%fp0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ fmovm.x &0xff,SFPREGS(%a6)
+ fmovm.l %fpcr,%fpsr,%fpiar,SFPCREGS(%a6)
+
+ mov.l &0x3e660000,IFPREGS+0x0(%a6)
+ mov.l &0xd0ed23e8,IFPREGS+0x4(%a6)
+ mov.l &0xd14035bc,IFPREGS+0x8(%a6)
+ mov.l &0x00000108,IFPCREGS+0x4(%a6)
+ lea unsupp_2_pc(%pc),%a0
+ mov.l %a0,IFPCREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ bsr.l chkfpregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+###########################################################
+###########################################################
+
+chkregs:
+ lea IREGS(%a6),%a0
+ lea SREGS(%a6),%a1
+ mov.l &14,%d0
+chkregs_loop:
+ cmp.l (%a0)+,(%a1)+
+ bne.l chkregs_error
+ dbra.w %d0,chkregs_loop
+
+ mov.w ICCR(%a6),%d0
+ mov.w SCCR(%a6),%d1
+ cmp.w %d0,%d1
+ bne.l chkregs_error
+
+ clr.l %d0
+ rts
+
+chkregs_error:
+ movq.l &0x1,%d0
+ rts
+
+error:
+ mov.l TESTCTR(%a6),%d1
+ movq.l &0x1,%d0
+ rts
+
+chkfpregs:
+ lea IFPREGS(%a6),%a0
+ lea SFPREGS(%a6),%a1
+ mov.l &23,%d0
+chkfpregs_loop:
+ cmp.l (%a0)+,(%a1)+
+ bne.l chkfpregs_error
+ dbra.w %d0,chkfpregs_loop
+
+ lea IFPCREGS(%a6),%a0
+ lea SFPCREGS(%a6),%a1
+ cmp.l (%a0)+,(%a1)+
+ bne.l chkfpregs_error
+ cmp.l (%a0)+,(%a1)+
+ bne.l chkfpregs_error
+ cmp.l (%a0)+,(%a1)+
+ bne.l chkfpregs_error
+
+ clr.l %d0
+ rts
+
+chkfpregs_error:
+ movq.l &0x1,%d0
+ rts
+
+DEF_REGS:
+ long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+ long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+ long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+ long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+DEF_FPREGS:
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+
+DEF_FPCREGS:
+ long 0x00000000, 0x00000000, 0x00000000
+
+############################################################
+
+_print_str:
+ mov.l %d0,-(%sp)
+ mov.l (TESTTOP-0x80+0x0,%pc),%d0
+ pea (TESTTOP-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+_print_num:
+ mov.l %d0,-(%sp)
+ mov.l (TESTTOP-0x80+0x4,%pc),%d0
+ pea (TESTTOP-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+############################################################
diff --git a/arch/m68k/ifpsp060/src/ilsp.S b/arch/m68k/ifpsp060/src/ilsp.S
new file mode 100644
index 00000000000..afa7422cddb
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/ilsp.S
@@ -0,0 +1,932 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# litop.s:
+# This file is appended to the top of the 060FPLSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located here.
+#
+
+ bra.l _060LSP__idivs64_
+ short 0x0000
+ bra.l _060LSP__idivu64_
+ short 0x0000
+
+ bra.l _060LSP__imuls64_
+ short 0x0000
+ bra.l _060LSP__imulu64_
+ short 0x0000
+
+ bra.l _060LSP__cmp2_Ab_
+ short 0x0000
+ bra.l _060LSP__cmp2_Aw_
+ short 0x0000
+ bra.l _060LSP__cmp2_Al_
+ short 0x0000
+ bra.l _060LSP__cmp2_Db_
+ short 0x0000
+ bra.l _060LSP__cmp2_Dw_
+ short 0x0000
+ bra.l _060LSP__cmp2_Dl_
+ short 0x0000
+
+# leave room for future possible aditions.
+ align 0x200
+
+#########################################################################
+# XDEF **************************************************************** #
+# _060LSP__idivu64_(): Emulate 64-bit unsigned div instruction. #
+# _060LSP__idivs64_(): Emulate 64-bit signed div instruction. #
+# #
+# This is the library version which is accessed as a subroutine #
+# and therefore does not work exactly like the 680X0 div{s,u}.l #
+# 64-bit divide instruction. #
+# #
+# XREF **************************************************************** #
+# None. #
+# #
+# INPUT *************************************************************** #
+# 0x4(sp) = divisor #
+# 0x8(sp) = hi(dividend) #
+# 0xc(sp) = lo(dividend) #
+# 0x10(sp) = pointer to location to place quotient/remainder #
+# #
+# OUTPUT ************************************************************** #
+# 0x10(sp) = points to location of remainder/quotient. #
+# remainder is in first longword, quotient is in 2nd. #
+# #
+# ALGORITHM *********************************************************** #
+# If the operands are signed, make them unsigned and save the #
+# sign info for later. Separate out special cases like divide-by-zero #
+# or 32-bit divides if possible. Else, use a special math algorithm #
+# to calculate the result. #
+# Restore sign info if signed instruction. Set the condition #
+# codes before performing the final "rts". If the divisor was equal to #
+# zero, then perform a divide-by-zero using a 16-bit implemented #
+# divide instruction. This way, the operating system can record that #
+# the event occurred even though it may not point to the correct place. #
+# #
+#########################################################################
+
+set POSNEG, -1
+set NDIVISOR, -2
+set NDIVIDEND, -3
+set DDSECOND, -4
+set DDNORMAL, -8
+set DDQUOTIENT, -12
+set DIV64_CC, -16
+
+##########
+# divs.l #
+##########
+ global _060LSP__idivs64_
+_060LSP__idivs64_:
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-16
+ movm.l &0x3f00,-(%sp) # save d2-d7
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,DIV64_CC(%a6)
+ st POSNEG(%a6) # signed operation
+ bra.b ldiv64_cont
+
+##########
+# divu.l #
+##########
+ global _060LSP__idivu64_
+_060LSP__idivu64_:
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-16
+ movm.l &0x3f00,-(%sp) # save d2-d7
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,DIV64_CC(%a6)
+ sf POSNEG(%a6) # unsigned operation
+
+ldiv64_cont:
+ mov.l 0x8(%a6),%d7 # fetch divisor
+
+ beq.w ldiv64eq0 # divisor is = 0!!!
+
+ mov.l 0xc(%a6), %d5 # get dividend hi
+ mov.l 0x10(%a6), %d6 # get dividend lo
+
+# separate signed and unsigned divide
+ tst.b POSNEG(%a6) # signed or unsigned?
+ beq.b ldspecialcases # use positive divide
+
+# save the sign of the divisor
+# make divisor unsigned if it's negative
+ tst.l %d7 # chk sign of divisor
+ slt NDIVISOR(%a6) # save sign of divisor
+ bpl.b ldsgndividend
+ neg.l %d7 # complement negative divisor
+
+# save the sign of the dividend
+# make dividend unsigned if it's negative
+ldsgndividend:
+ tst.l %d5 # chk sign of hi(dividend)
+ slt NDIVIDEND(%a6) # save sign of dividend
+ bpl.b ldspecialcases
+
+ mov.w &0x0, %cc # clear 'X' cc bit
+ negx.l %d6 # complement signed dividend
+ negx.l %d5
+
+# extract some special cases:
+# - is (dividend == 0) ?
+# - is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
+ldspecialcases:
+ tst.l %d5 # is (hi(dividend) == 0)
+ bne.b ldnormaldivide # no, so try it the long way
+
+ tst.l %d6 # is (lo(dividend) == 0), too
+ beq.w lddone # yes, so (dividend == 0)
+
+ cmp.l %d7,%d6 # is (divisor <= lo(dividend))
+ bls.b ld32bitdivide # yes, so use 32 bit divide
+
+ exg %d5,%d6 # q = 0, r = dividend
+ bra.w ldivfinish # can't divide, we're done.
+
+ld32bitdivide:
+ tdivu.l %d7, %d5:%d6 # it's only a 32/32 bit div!
+
+ bra.b ldivfinish
+
+ldnormaldivide:
+# last special case:
+# - is hi(dividend) >= divisor ? if yes, then overflow
+ cmp.l %d7,%d5
+ bls.b lddovf # answer won't fit in 32 bits
+
+# perform the divide algorithm:
+ bsr.l ldclassical # do int divide
+
+# separate into signed and unsigned finishes.
+ldivfinish:
+ tst.b POSNEG(%a6) # do divs, divu separately
+ beq.b lddone # divu has no processing!!!
+
+# it was a divs.l, so ccode setting is a little more complicated...
+ tst.b NDIVIDEND(%a6) # remainder has same sign
+ beq.b ldcc # as dividend.
+ neg.l %d5 # sgn(rem) = sgn(dividend)
+ldcc:
+ mov.b NDIVISOR(%a6), %d0
+ eor.b %d0, NDIVIDEND(%a6) # chk if quotient is negative
+ beq.b ldqpos # branch to quot positive
+
+# 0x80000000 is the largest number representable as a 32-bit negative
+# number. the negative of 0x80000000 is 0x80000000.
+ cmpi.l %d6, &0x80000000 # will (-quot) fit in 32 bits?
+ bhi.b lddovf
+
+ neg.l %d6 # make (-quot) 2's comp
+
+ bra.b lddone
+
+ldqpos:
+ btst &0x1f, %d6 # will (+quot) fit in 32 bits?
+ bne.b lddovf
+
+lddone:
+# if the register numbers are the same, only the quotient gets saved.
+# so, if we always save the quotient second, we save ourselves a cmp&beq
+ andi.w &0x10,DIV64_CC(%a6)
+ mov.w DIV64_CC(%a6),%cc
+ tst.l %d6 # may set 'N' ccode bit
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+ldexit:
+ movm.l &0x0060,([0x14,%a6]) # save result
+
+# EPILOGUE BEGIN ########################################################
+# fmovm.l (%sp)+,&0x0 # restore no fpregs
+ movm.l (%sp)+,&0x00fc # restore d2-d7
+ unlk %a6
+# EPILOGUE END ##########################################################
+
+ rts
+
+# the result should be the unchanged dividend
+lddovf:
+ mov.l 0xc(%a6), %d5 # get dividend hi
+ mov.l 0x10(%a6), %d6 # get dividend lo
+
+ andi.w &0x1c,DIV64_CC(%a6)
+ ori.w &0x02,DIV64_CC(%a6) # set 'V' ccode bit
+ mov.w DIV64_CC(%a6),%cc
+
+ bra.b ldexit
+
+ldiv64eq0:
+ mov.l 0xc(%a6),([0x14,%a6])
+ mov.l 0x10(%a6),([0x14,%a6],0x4)
+
+ mov.w DIV64_CC(%a6),%cc
+
+# EPILOGUE BEGIN ########################################################
+# fmovm.l (%sp)+,&0x0 # restore no fpregs
+ movm.l (%sp)+,&0x00fc # restore d2-d7
+ unlk %a6
+# EPILOGUE END ##########################################################
+
+ divu.w &0x0,%d0 # force a divbyzero exception
+ rts
+
+###########################################################################
+#########################################################################
+# This routine uses the 'classical' Algorithm D from Donald Knuth's #
+# Art of Computer Programming, vol II, Seminumerical Algorithms. #
+# For this implementation b=2**16, and the target is U1U2U3U4/V1V2, #
+# where U,V are words of the quadword dividend and longword divisor, #
+# and U1, V1 are the most significant words. #
+# #
+# The most sig. longword of the 64 bit dividend must be in %d5, least #
+# in %d6. The divisor must be in the variable ddivisor, and the #
+# signed/unsigned flag ddusign must be set (0=unsigned,1=signed). #
+# The quotient is returned in %d6, remainder in %d5, unless the #
+# v (overflow) bit is set in the saved %ccr. If overflow, the dividend #
+# is unchanged. #
+#########################################################################
+ldclassical:
+# if the divisor msw is 0, use simpler algorithm then the full blown
+# one at ddknuth:
+
+ cmpi.l %d7, &0xffff
+ bhi.b lddknuth # go use D. Knuth algorithm
+
+# Since the divisor is only a word (and larger than the mslw of the dividend),
+# a simpler algorithm may be used :
+# In the general case, four quotient words would be created by
+# dividing the divisor word into each dividend word. In this case,
+# the first two quotient words must be zero, or overflow would occur.
+# Since we already checked this case above, we can treat the most significant
+# longword of the dividend as (0) remainder (see Knuth) and merely complete
+# the last two divisions to get a quotient longword and word remainder:
+
+ clr.l %d1
+ swap %d5 # same as r*b if previous step rqd
+ swap %d6 # get u3 to lsw position
+ mov.w %d6, %d5 # rb + u3
+
+ divu.w %d7, %d5
+
+ mov.w %d5, %d1 # first quotient word
+ swap %d6 # get u4
+ mov.w %d6, %d5 # rb + u4
+
+ divu.w %d7, %d5
+
+ swap %d1
+ mov.w %d5, %d1 # 2nd quotient 'digit'
+ clr.w %d5
+ swap %d5 # now remainder
+ mov.l %d1, %d6 # and quotient
+
+ rts
+
+lddknuth:
+# In this algorithm, the divisor is treated as a 2 digit (word) number
+# which is divided into a 3 digit (word) dividend to get one quotient
+# digit (word). After subtraction, the dividend is shifted and the
+# process repeated. Before beginning, the divisor and quotient are
+# 'normalized' so that the process of estimating the quotient digit
+# will yield verifiably correct results..
+
+ clr.l DDNORMAL(%a6) # count of shifts for normalization
+ clr.b DDSECOND(%a6) # clear flag for quotient digits
+ clr.l %d1 # %d1 will hold trial quotient
+lddnchk:
+ btst &31, %d7 # must we normalize? first word of
+ bne.b lddnormalized # divisor (V1) must be >= 65536/2
+ addq.l &0x1, DDNORMAL(%a6) # count normalization shifts
+ lsl.l &0x1, %d7 # shift the divisor
+ lsl.l &0x1, %d6 # shift u4,u3 with overflow to u2
+ roxl.l &0x1, %d5 # shift u1,u2
+ bra.w lddnchk
+lddnormalized:
+
+# Now calculate an estimate of the quotient words (msw first, then lsw).
+# The comments use subscripts for the first quotient digit determination.
+ mov.l %d7, %d3 # divisor
+ mov.l %d5, %d2 # dividend mslw
+ swap %d2
+ swap %d3
+ cmp.w %d2, %d3 # V1 = U1 ?
+ bne.b lddqcalc1
+ mov.w &0xffff, %d1 # use max trial quotient word
+ bra.b lddadj0
+lddqcalc1:
+ mov.l %d5, %d1
+
+ divu.w %d3, %d1 # use quotient of mslw/msw
+
+ andi.l &0x0000ffff, %d1 # zero any remainder
+lddadj0:
+
+# now test the trial quotient and adjust. This step plus the
+# normalization assures (according to Knuth) that the trial
+# quotient will be at worst 1 too large.
+ mov.l %d6, -(%sp)
+ clr.w %d6 # word u3 left
+ swap %d6 # in lsw position
+lddadj1: mov.l %d7, %d3
+ mov.l %d1, %d2
+ mulu.w %d7, %d2 # V2q
+ swap %d3
+ mulu.w %d1, %d3 # V1q
+ mov.l %d5, %d4 # U1U2
+ sub.l %d3, %d4 # U1U2 - V1q
+
+ swap %d4
+
+ mov.w %d4,%d0
+ mov.w %d6,%d4 # insert lower word (U3)
+
+ tst.w %d0 # is upper word set?
+ bne.w lddadjd1
+
+# add.l %d6, %d4 # (U1U2 - V1q) + U3
+
+ cmp.l %d2, %d4
+ bls.b lddadjd1 # is V2q > (U1U2-V1q) + U3 ?
+ subq.l &0x1, %d1 # yes, decrement and recheck
+ bra.b lddadj1
+lddadjd1:
+# now test the word by multiplying it by the divisor (V1V2) and comparing
+# the 3 digit (word) result with the current dividend words
+ mov.l %d5, -(%sp) # save %d5 (%d6 already saved)
+ mov.l %d1, %d6
+ swap %d6 # shift answer to ms 3 words
+ mov.l %d7, %d5
+ bsr.l ldmm2
+ mov.l %d5, %d2 # now %d2,%d3 are trial*divisor
+ mov.l %d6, %d3
+ mov.l (%sp)+, %d5 # restore dividend
+ mov.l (%sp)+, %d6
+ sub.l %d3, %d6
+ subx.l %d2, %d5 # subtract double precision
+ bcc ldd2nd # no carry, do next quotient digit
+ subq.l &0x1, %d1 # q is one too large
+# need to add back divisor longword to current ms 3 digits of dividend
+# - according to Knuth, this is done only 2 out of 65536 times for random
+# divisor, dividend selection.
+ clr.l %d2
+ mov.l %d7, %d3
+ swap %d3
+ clr.w %d3 # %d3 now ls word of divisor
+ add.l %d3, %d6 # aligned with 3rd word of dividend
+ addx.l %d2, %d5
+ mov.l %d7, %d3
+ clr.w %d3 # %d3 now ms word of divisor
+ swap %d3 # aligned with 2nd word of dividend
+ add.l %d3, %d5
+ldd2nd:
+ tst.b DDSECOND(%a6) # both q words done?
+ bne.b lddremain
+# first quotient digit now correct. store digit and shift the
+# (subtracted) dividend
+ mov.w %d1, DDQUOTIENT(%a6)
+ clr.l %d1
+ swap %d5
+ swap %d6
+ mov.w %d6, %d5
+ clr.w %d6
+ st DDSECOND(%a6) # second digit
+ bra.w lddnormalized
+lddremain:
+# add 2nd word to quotient, get the remainder.
+ mov.w %d1, DDQUOTIENT+2(%a6)
+# shift down one word/digit to renormalize remainder.
+ mov.w %d5, %d6
+ swap %d6
+ swap %d5
+ mov.l DDNORMAL(%a6), %d7 # get norm shift count
+ beq.b lddrn
+ subq.l &0x1, %d7 # set for loop count
+lddnlp:
+ lsr.l &0x1, %d5 # shift into %d6
+ roxr.l &0x1, %d6
+ dbf %d7, lddnlp
+lddrn:
+ mov.l %d6, %d5 # remainder
+ mov.l DDQUOTIENT(%a6), %d6 # quotient
+
+ rts
+ldmm2:
+# factors for the 32X32->64 multiplication are in %d5 and %d6.
+# returns 64 bit result in %d5 (hi) %d6(lo).
+# destroys %d2,%d3,%d4.
+
+# multiply hi,lo words of each factor to get 4 intermediate products
+ mov.l %d6, %d2
+ mov.l %d6, %d3
+ mov.l %d5, %d4
+ swap %d3
+ swap %d4
+ mulu.w %d5, %d6 # %d6 <- lsw*lsw
+ mulu.w %d3, %d5 # %d5 <- msw-dest*lsw-source
+ mulu.w %d4, %d2 # %d2 <- msw-source*lsw-dest
+ mulu.w %d4, %d3 # %d3 <- msw*msw
+# now use swap and addx to consolidate to two longwords
+ clr.l %d4
+ swap %d6
+ add.w %d5, %d6 # add msw of l*l to lsw of m*l product
+ addx.w %d4, %d3 # add any carry to m*m product
+ add.w %d2, %d6 # add in lsw of other m*l product
+ addx.w %d4, %d3 # add any carry to m*m product
+ swap %d6 # %d6 is low 32 bits of final product
+ clr.w %d5
+ clr.w %d2 # lsw of two mixed products used,
+ swap %d5 # now use msws of longwords
+ swap %d2
+ add.l %d2, %d5
+ add.l %d3, %d5 # %d5 now ms 32 bits of final product
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _060LSP__imulu64_(): Emulate 64-bit unsigned mul instruction #
+# _060LSP__imuls64_(): Emulate 64-bit signed mul instruction. #
+# #
+# This is the library version which is accessed as a subroutine #
+# and therefore does not work exactly like the 680X0 mul{s,u}.l #
+# 64-bit multiply instruction. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# 0x4(sp) = multiplier #
+# 0x8(sp) = multiplicand #
+# 0xc(sp) = pointer to location to place 64-bit result #
+# #
+# OUTPUT ************************************************************** #
+# 0xc(sp) = points to location of 64-bit result #
+# #
+# ALGORITHM *********************************************************** #
+# Perform the multiply in pieces using 16x16->32 unsigned #
+# multiplies and "add" instructions. #
+# Set the condition codes as appropriate before performing an #
+# "rts". #
+# #
+#########################################################################
+
+set MUL64_CC, -4
+
+ global _060LSP__imulu64_
+_060LSP__imulu64_:
+
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-4
+ movm.l &0x3800,-(%sp) # save d2-d4
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,MUL64_CC(%a6) # save incoming ccodes
+
+ mov.l 0x8(%a6),%d0 # store multiplier in d0
+ beq.w mulu64_zero # handle zero separately
+
+ mov.l 0xc(%a6),%d1 # get multiplicand in d1
+ beq.w mulu64_zero # handle zero separately
+
+#########################################################################
+# 63 32 0 #
+# ---------------------------- #
+# | hi(mplier) * hi(mplicand)| #
+# ---------------------------- #
+# ----------------------------- #
+# | hi(mplier) * lo(mplicand) | #
+# ----------------------------- #
+# ----------------------------- #
+# | lo(mplier) * hi(mplicand) | #
+# ----------------------------- #
+# | ----------------------------- #
+# --|-- | lo(mplier) * lo(mplicand) | #
+# | ----------------------------- #
+# ======================================================== #
+# -------------------------------------------------------- #
+# | hi(result) | lo(result) | #
+# -------------------------------------------------------- #
+#########################################################################
+mulu64_alg:
+# load temp registers with operands
+ mov.l %d0,%d2 # mr in d2
+ mov.l %d0,%d3 # mr in d3
+ mov.l %d1,%d4 # md in d4
+ swap %d3 # hi(mr) in lo d3
+ swap %d4 # hi(md) in lo d4
+
+# complete necessary multiplies:
+ mulu.w %d1,%d0 # [1] lo(mr) * lo(md)
+ mulu.w %d3,%d1 # [2] hi(mr) * lo(md)
+ mulu.w %d4,%d2 # [3] lo(mr) * hi(md)
+ mulu.w %d4,%d3 # [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+ clr.l %d4 # load d4 w/ zero value
+ swap %d0 # hi([1]) <==> lo([1])
+ add.w %d1,%d0 # hi([1]) + lo([2])
+ addx.l %d4,%d3 # [4] + carry
+ add.w %d2,%d0 # hi([1]) + lo([3])
+ addx.l %d4,%d3 # [4] + carry
+ swap %d0 # lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+ clr.w %d1 # clear lo([2])
+ clr.w %d2 # clear hi([3])
+ swap %d1 # hi([2]) in lo d1
+ swap %d2 # hi([3]) in lo d2
+ add.l %d2,%d1 # [4] + hi([2])
+ add.l %d3,%d1 # [4] + hi([3])
+
+# now, grab the condition codes. only one that can be set is 'N'.
+# 'N' CAN be set if the operation is unsigned if bit 63 is set.
+ mov.w MUL64_CC(%a6),%d4
+ andi.b &0x10,%d4 # keep old 'X' bit
+ tst.l %d1 # may set 'N' bit
+ bpl.b mulu64_ddone
+ ori.b &0x8,%d4 # set 'N' bit
+mulu64_ddone:
+ mov.w %d4,%cc
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+mulu64_end:
+ exg %d1,%d0
+ movm.l &0x0003,([0x10,%a6]) # save result
+
+# EPILOGUE BEGIN ########################################################
+# fmovm.l (%sp)+,&0x0 # restore no fpregs
+ movm.l (%sp)+,&0x001c # restore d2-d4
+ unlk %a6
+# EPILOGUE END ##########################################################
+
+ rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+mulu64_zero:
+ clr.l %d0
+ clr.l %d1
+
+ mov.w MUL64_CC(%a6),%d4
+ andi.b &0x10,%d4
+ ori.b &0x4,%d4
+ mov.w %d4,%cc # set 'Z' ccode bit
+
+ bra.b mulu64_end
+
+##########
+# muls.l #
+##########
+ global _060LSP__imuls64_
+_060LSP__imuls64_:
+
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-4
+ movm.l &0x3c00,-(%sp) # save d2-d5
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,MUL64_CC(%a6) # save incoming ccodes
+
+ mov.l 0x8(%a6),%d0 # store multiplier in d0
+ beq.b mulu64_zero # handle zero separately
+
+ mov.l 0xc(%a6),%d1 # get multiplicand in d1
+ beq.b mulu64_zero # handle zero separately
+
+ clr.b %d5 # clear sign tag
+ tst.l %d0 # is multiplier negative?
+ bge.b muls64_chk_md_sgn # no
+ neg.l %d0 # make multiplier positive
+
+ ori.b &0x1,%d5 # save multiplier sgn
+
+# the result sign is the exclusive or of the operand sign bits.
+muls64_chk_md_sgn:
+ tst.l %d1 # is multiplicand negative?
+ bge.b muls64_alg # no
+ neg.l %d1 # make multiplicand positive
+
+ eori.b &0x1,%d5 # calculate correct sign
+
+#########################################################################
+# 63 32 0 #
+# ---------------------------- #
+# | hi(mplier) * hi(mplicand)| #
+# ---------------------------- #
+# ----------------------------- #
+# | hi(mplier) * lo(mplicand) | #
+# ----------------------------- #
+# ----------------------------- #
+# | lo(mplier) * hi(mplicand) | #
+# ----------------------------- #
+# | ----------------------------- #
+# --|-- | lo(mplier) * lo(mplicand) | #
+# | ----------------------------- #
+# ======================================================== #
+# -------------------------------------------------------- #
+# | hi(result) | lo(result) | #
+# -------------------------------------------------------- #
+#########################################################################
+muls64_alg:
+# load temp registers with operands
+ mov.l %d0,%d2 # mr in d2
+ mov.l %d0,%d3 # mr in d3
+ mov.l %d1,%d4 # md in d4
+ swap %d3 # hi(mr) in lo d3
+ swap %d4 # hi(md) in lo d4
+
+# complete necessary multiplies:
+ mulu.w %d1,%d0 # [1] lo(mr) * lo(md)
+ mulu.w %d3,%d1 # [2] hi(mr) * lo(md)
+ mulu.w %d4,%d2 # [3] lo(mr) * hi(md)
+ mulu.w %d4,%d3 # [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+ clr.l %d4 # load d4 w/ zero value
+ swap %d0 # hi([1]) <==> lo([1])
+ add.w %d1,%d0 # hi([1]) + lo([2])
+ addx.l %d4,%d3 # [4] + carry
+ add.w %d2,%d0 # hi([1]) + lo([3])
+ addx.l %d4,%d3 # [4] + carry
+ swap %d0 # lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+ clr.w %d1 # clear lo([2])
+ clr.w %d2 # clear hi([3])
+ swap %d1 # hi([2]) in lo d1
+ swap %d2 # hi([3]) in lo d2
+ add.l %d2,%d1 # [4] + hi([2])
+ add.l %d3,%d1 # [4] + hi([3])
+
+ tst.b %d5 # should result be signed?
+ beq.b muls64_done # no
+
+# result should be a signed negative number.
+# compute 2's complement of the unsigned number:
+# -negate all bits and add 1
+muls64_neg:
+ not.l %d0 # negate lo(result) bits
+ not.l %d1 # negate hi(result) bits
+ addq.l &1,%d0 # add 1 to lo(result)
+ addx.l %d4,%d1 # add carry to hi(result)
+
+muls64_done:
+ mov.w MUL64_CC(%a6),%d4
+ andi.b &0x10,%d4 # keep old 'X' bit
+ tst.l %d1 # may set 'N' bit
+ bpl.b muls64_ddone
+ ori.b &0x8,%d4 # set 'N' bit
+muls64_ddone:
+ mov.w %d4,%cc
+
+# here, the result is in d1 and d0. the current strategy is to save
+# the values at the location pointed to by a0.
+# use movm here to not disturb the condition codes.
+muls64_end:
+ exg %d1,%d0
+ movm.l &0x0003,([0x10,%a6]) # save result at (a0)
+
+# EPILOGUE BEGIN ########################################################
+# fmovm.l (%sp)+,&0x0 # restore no fpregs
+ movm.l (%sp)+,&0x003c # restore d2-d5
+ unlk %a6
+# EPILOGUE END ##########################################################
+
+ rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+muls64_zero:
+ clr.l %d0
+ clr.l %d1
+
+ mov.w MUL64_CC(%a6),%d4
+ andi.b &0x10,%d4
+ ori.b &0x4,%d4
+ mov.w %d4,%cc # set 'Z' ccode bit
+
+ bra.b muls64_end
+
+#########################################################################
+# XDEF **************************************************************** #
+# _060LSP__cmp2_Ab_(): Emulate "cmp2.b An,<ea>". #
+# _060LSP__cmp2_Aw_(): Emulate "cmp2.w An,<ea>". #
+# _060LSP__cmp2_Al_(): Emulate "cmp2.l An,<ea>". #
+# _060LSP__cmp2_Db_(): Emulate "cmp2.b Dn,<ea>". #
+# _060LSP__cmp2_Dw_(): Emulate "cmp2.w Dn,<ea>". #
+# _060LSP__cmp2_Dl_(): Emulate "cmp2.l Dn,<ea>". #
+# #
+# This is the library version which is accessed as a subroutine #
+# and therefore does not work exactly like the 680X0 "cmp2" #
+# instruction. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# 0x4(sp) = Rn #
+# 0x8(sp) = pointer to boundary pair #
+# #
+# OUTPUT ************************************************************** #
+# cc = condition codes are set correctly #
+# #
+# ALGORITHM *********************************************************** #
+# In the interest of simplicity, all operands are converted to #
+# longword size whether the operation is byte, word, or long. The #
+# bounds are sign extended accordingly. If Rn is a data regsiter, Rn is #
+# also sign extended. If Rn is an address register, it need not be sign #
+# extended since the full register is always used. #
+# The condition codes are set correctly before the final "rts". #
+# #
+#########################################################################
+
+set CMP2_CC, -4
+
+ global _060LSP__cmp2_Ab_
+_060LSP__cmp2_Ab_:
+
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-4
+ movm.l &0x3800,-(%sp) # save d2-d4
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,CMP2_CC(%a6)
+ mov.l 0x8(%a6), %d2 # get regval
+
+ mov.b ([0xc,%a6],0x0),%d0
+ mov.b ([0xc,%a6],0x1),%d1
+
+ extb.l %d0 # sign extend lo bnd
+ extb.l %d1 # sign extend hi bnd
+ bra.w l_cmp2_cmp # go do the compare emulation
+
+ global _060LSP__cmp2_Aw_
+_060LSP__cmp2_Aw_:
+
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-4
+ movm.l &0x3800,-(%sp) # save d2-d4
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,CMP2_CC(%a6)
+ mov.l 0x8(%a6), %d2 # get regval
+
+ mov.w ([0xc,%a6],0x0),%d0
+ mov.w ([0xc,%a6],0x2),%d1
+
+ ext.l %d0 # sign extend lo bnd
+ ext.l %d1 # sign extend hi bnd
+ bra.w l_cmp2_cmp # go do the compare emulation
+
+ global _060LSP__cmp2_Al_
+_060LSP__cmp2_Al_:
+
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-4
+ movm.l &0x3800,-(%sp) # save d2-d4
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,CMP2_CC(%a6)
+ mov.l 0x8(%a6), %d2 # get regval
+
+ mov.l ([0xc,%a6],0x0),%d0
+ mov.l ([0xc,%a6],0x4),%d1
+ bra.w l_cmp2_cmp # go do the compare emulation
+
+ global _060LSP__cmp2_Db_
+_060LSP__cmp2_Db_:
+
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-4
+ movm.l &0x3800,-(%sp) # save d2-d4
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,CMP2_CC(%a6)
+ mov.l 0x8(%a6), %d2 # get regval
+
+ mov.b ([0xc,%a6],0x0),%d0
+ mov.b ([0xc,%a6],0x1),%d1
+
+ extb.l %d0 # sign extend lo bnd
+ extb.l %d1 # sign extend hi bnd
+
+# operation is a data register compare.
+# sign extend byte to long so we can do simple longword compares.
+ extb.l %d2 # sign extend data byte
+ bra.w l_cmp2_cmp # go do the compare emulation
+
+ global _060LSP__cmp2_Dw_
+_060LSP__cmp2_Dw_:
+
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-4
+ movm.l &0x3800,-(%sp) # save d2-d4
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,CMP2_CC(%a6)
+ mov.l 0x8(%a6), %d2 # get regval
+
+ mov.w ([0xc,%a6],0x0),%d0
+ mov.w ([0xc,%a6],0x2),%d1
+
+ ext.l %d0 # sign extend lo bnd
+ ext.l %d1 # sign extend hi bnd
+
+# operation is a data register compare.
+# sign extend word to long so we can do simple longword compares.
+ ext.l %d2 # sign extend data word
+ bra.w l_cmp2_cmp # go emulate compare
+
+ global _060LSP__cmp2_Dl_
+_060LSP__cmp2_Dl_:
+
+# PROLOGUE BEGIN ########################################################
+ link.w %a6,&-4
+ movm.l &0x3800,-(%sp) # save d2-d4
+# fmovm.l &0x0,-(%sp) # save no fpregs
+# PROLOGUE END ##########################################################
+
+ mov.w %cc,CMP2_CC(%a6)
+ mov.l 0x8(%a6), %d2 # get regval
+
+ mov.l ([0xc,%a6],0x0),%d0
+ mov.l ([0xc,%a6],0x4),%d1
+
+#
+# To set the ccodes correctly:
+# (1) save 'Z' bit from (Rn - lo)
+# (2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
+# (3) keep 'X', 'N', and 'V' from before instruction
+# (4) combine ccodes
+#
+l_cmp2_cmp:
+ sub.l %d0, %d2 # (Rn - lo)
+ mov.w %cc, %d3 # fetch resulting ccodes
+ andi.b &0x4, %d3 # keep 'Z' bit
+ sub.l %d0, %d1 # (hi - lo)
+ cmp.l %d1,%d2 # ((hi - lo) - (Rn - hi))
+
+ mov.w %cc, %d4 # fetch resulting ccodes
+ or.b %d4, %d3 # combine w/ earlier ccodes
+ andi.b &0x5, %d3 # keep 'Z' and 'N'
+
+ mov.w CMP2_CC(%a6), %d4 # fetch old ccodes
+ andi.b &0x1a, %d4 # keep 'X','N','V' bits
+ or.b %d3, %d4 # insert new ccodes
+ mov.w %d4,%cc # save new ccodes
+
+# EPILOGUE BEGIN ########################################################
+# fmovm.l (%sp)+,&0x0 # restore no fpregs
+ movm.l (%sp)+,&0x001c # restore d2-d4
+ unlk %a6
+# EPILOGUE END ##########################################################
+
+ rts
diff --git a/arch/m68k/ifpsp060/src/isp.S b/arch/m68k/ifpsp060/src/isp.S
new file mode 100644
index 00000000000..b269091d9df
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/isp.S
@@ -0,0 +1,4299 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# ireal.s:
+# This file is appended to the top of the 060ISP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060ISP_TABLE.
+# Also, subroutine stubs exist in this file (_isp_done for
+# example) that are referenced by the ISP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The ISP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the ISP code easier to read and more mainatinable.
+#
+
+set _off_chk, 0x00
+set _off_divbyzero, 0x04
+set _off_trace, 0x08
+set _off_access, 0x0c
+set _off_done, 0x10
+
+set _off_cas, 0x14
+set _off_cas2, 0x18
+set _off_lock, 0x1c
+set _off_unlock, 0x20
+
+set _off_imr, 0x40
+set _off_dmr, 0x44
+set _off_dmw, 0x48
+set _off_irw, 0x4c
+set _off_irl, 0x50
+set _off_drb, 0x54
+set _off_drw, 0x58
+set _off_drl, 0x5c
+set _off_dwb, 0x60
+set _off_dww, 0x64
+set _off_dwl, 0x68
+
+_060ISP_TABLE:
+
+# Here's the table of ENTRY POINTS for those linking the package.
+ bra.l _isp_unimp
+ short 0x0000
+
+ bra.l _isp_cas
+ short 0x0000
+
+ bra.l _isp_cas2
+ short 0x0000
+
+ bra.l _isp_cas_finish
+ short 0x0000
+
+ bra.l _isp_cas2_finish
+ short 0x0000
+
+ bra.l _isp_cas_inrange
+ short 0x0000
+
+ bra.l _isp_cas_terminate
+ short 0x0000
+
+ bra.l _isp_cas_restart
+ short 0x0000
+
+ space 64
+
+#############################################################
+
+ global _real_chk
+_real_chk:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_chk,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_divbyzero
+_real_divbyzero:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_divbyzero,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_trace
+_real_trace:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_trace,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_access
+_real_access:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_access,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _isp_done
+_isp_done:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_done,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+#######################################
+
+ global _real_cas
+_real_cas:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_cas,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_cas2
+_real_cas2:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_cas2,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_lock_page
+_real_lock_page:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_lock,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_unlock_page
+_real_unlock_page:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_unlock,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+#######################################
+
+ global _imem_read
+_imem_read:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_imr,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read
+_dmem_read:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_dmr,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write
+_dmem_write:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_dmw,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _imem_read_word
+_imem_read_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_irw,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _imem_read_long
+_imem_read_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_irl,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_byte
+_dmem_read_byte:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_drb,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_word
+_dmem_read_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_drw,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_long
+_dmem_read_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_drl,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_byte
+_dmem_write_byte:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_dwb,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_word
+_dmem_write_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_dww,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_long
+_dmem_write_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060ISP_TABLE-0x80+_off_dwl,%pc),%d0
+ pea.l (_060ISP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+#
+# This file contains a set of define statements for constants
+# in oreder to promote readability within the core code itself.
+#
+
+set LOCAL_SIZE, 96 # stack frame size(bytes)
+set LV, -LOCAL_SIZE # stack offset
+
+set EXC_ISR, 0x4 # stack status register
+set EXC_IPC, 0x6 # stack pc
+set EXC_IVOFF, 0xa # stacked vector offset
+
+set EXC_AREGS, LV+64 # offset of all address regs
+set EXC_DREGS, LV+32 # offset of all data regs
+
+set EXC_A7, EXC_AREGS+(7*4) # offset of a7
+set EXC_A6, EXC_AREGS+(6*4) # offset of a6
+set EXC_A5, EXC_AREGS+(5*4) # offset of a5
+set EXC_A4, EXC_AREGS+(4*4) # offset of a4
+set EXC_A3, EXC_AREGS+(3*4) # offset of a3
+set EXC_A2, EXC_AREGS+(2*4) # offset of a2
+set EXC_A1, EXC_AREGS+(1*4) # offset of a1
+set EXC_A0, EXC_AREGS+(0*4) # offset of a0
+set EXC_D7, EXC_DREGS+(7*4) # offset of d7
+set EXC_D6, EXC_DREGS+(6*4) # offset of d6
+set EXC_D5, EXC_DREGS+(5*4) # offset of d5
+set EXC_D4, EXC_DREGS+(4*4) # offset of d4
+set EXC_D3, EXC_DREGS+(3*4) # offset of d3
+set EXC_D2, EXC_DREGS+(2*4) # offset of d2
+set EXC_D1, EXC_DREGS+(1*4) # offset of d1
+set EXC_D0, EXC_DREGS+(0*4) # offset of d0
+
+set EXC_TEMP, LV+16 # offset of temp stack space
+
+set EXC_SAVVAL, LV+12 # offset of old areg value
+set EXC_SAVREG, LV+11 # offset of old areg index
+
+set SPCOND_FLG, LV+10 # offset of spc condition flg
+
+set EXC_CC, LV+8 # offset of cc register
+set EXC_EXTWPTR, LV+4 # offset of current PC
+set EXC_EXTWORD, LV+2 # offset of current ext opword
+set EXC_OPWORD, LV+0 # offset of current opword
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set mia7_flg, 0x04 # (a7)+ flag
+set mda7_flg, 0x08 # -(a7) flag
+set ichk_flg, 0x10 # chk exception flag
+set idbyz_flg, 0x20 # divbyzero flag
+set restore_flg, 0x40 # restore -(an)+ flag
+set immed_flg, 0x80 # immediate data flag
+
+set mia7_bit, 0x2 # (a7)+ bit
+set mda7_bit, 0x3 # -(a7) bit
+set ichk_bit, 0x4 # chk exception bit
+set idbyz_bit, 0x5 # divbyzero bit
+set restore_bit, 0x6 # restore -(a7)+ bit
+set immed_bit, 0x7 # immediate data bit
+
+#########
+# Misc. #
+#########
+set BYTE, 1 # len(byte) == 1 byte
+set WORD, 2 # len(word) == 2 bytes
+set LONG, 4 # len(longword) == 4 bytes
+
+#########################################################################
+# XDEF **************************************************************** #
+# _isp_unimp(): 060ISP entry point for Unimplemented Instruction #
+# #
+# This handler should be the first code executed upon taking the #
+# "Unimplemented Integer Instruction" exception in an operating #
+# system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_{word,long}() - read instruction word/longword #
+# _mul64() - emulate 64-bit multiply #
+# _div64() - emulate 64-bit divide #
+# _moveperipheral() - emulate "movep" #
+# _compandset() - emulate misaligned "cas" #
+# _compandset2() - emulate "cas2" #
+# _chk2_cmp2() - emulate "cmp2" and "chk2" #
+# _isp_done() - "callout" for normal final exit #
+# _real_trace() - "callout" for Trace exception #
+# _real_chk() - "callout" for Chk exception #
+# _real_divbyzero() - "callout" for DZ exception #
+# _real_access() - "callout" for access error exception #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the Unimp Int Instr stack frame #
+# #
+# OUTPUT ************************************************************** #
+# If Trace exception: #
+# - The system stack changed to contain Trace exc stack frame #
+# If Chk exception: #
+# - The system stack changed to contain Chk exc stack frame #
+# If DZ exception: #
+# - The system stack changed to contain DZ exc stack frame #
+# If access error exception: #
+# - The system stack changed to contain access err exc stk frame #
+# Else: #
+# - Results saved as appropriate #
+# #
+# ALGORITHM *********************************************************** #
+# This handler fetches the first instruction longword from #
+# memory and decodes it to determine which of the unimplemented #
+# integer instructions caused this exception. This handler then calls #
+# one of _mul64(), _div64(), _moveperipheral(), _compandset(), #
+# _compandset2(), or _chk2_cmp2() as appropriate. #
+# Some of these instructions, by their nature, may produce other #
+# types of exceptions. "div" can produce a divide-by-zero exception, #
+# and "chk2" can cause a "Chk" exception. In both cases, the current #
+# exception stack frame must be converted to an exception stack frame #
+# of the correct exception type and an exit must be made through #
+# _real_divbyzero() or _real_chk() as appropriate. In addition, all #
+# instructions may be executing while Trace is enabled. If so, then #
+# a Trace exception stack frame must be created and an exit made #
+# through _real_trace(). #
+# Meanwhile, if any read or write to memory using the #
+# _mem_{read,write}() "callout"s returns a failing value, then an #
+# access error frame must be created and an exit made through #
+# _real_access(). #
+# If none of these occur, then a normal exit is made through #
+# _isp_done(). #
+# #
+# This handler, upon entry, saves almost all user-visible #
+# address and data registers to the stack. Although this may seem to #
+# cause excess memory traffic, it was found that due to having to #
+# access these register files for things like data retrieval and <ea> #
+# calculations, it was more efficient to have them on the stack where #
+# they could be accessed by indexing rather than to make subroutine #
+# calls to retrieve a register of a particular index. #
+# #
+#########################################################################
+
+ global _isp_unimp
+_isp_unimp:
+ link.w %a6,&-LOCAL_SIZE # create room for stack frame
+
+ movm.l &0x3fff,EXC_DREGS(%a6) # store d0-d7/a0-a5
+ mov.l (%a6),EXC_A6(%a6) # store a6
+
+ btst &0x5,EXC_ISR(%a6) # from s or u mode?
+ bne.b uieh_s # supervisor mode
+uieh_u:
+ mov.l %usp,%a0 # fetch user stack pointer
+ mov.l %a0,EXC_A7(%a6) # store a7
+ bra.b uieh_cont
+uieh_s:
+ lea 0xc(%a6),%a0
+ mov.l %a0,EXC_A7(%a6) # store corrected sp
+
+###############################################################################
+
+uieh_cont:
+ clr.b SPCOND_FLG(%a6) # clear "special case" flag
+
+ mov.w EXC_ISR(%a6),EXC_CC(%a6) # store cc copy on stack
+ mov.l EXC_IPC(%a6),EXC_EXTWPTR(%a6) # store extwptr on stack
+
+#
+# fetch the opword and first extension word pointed to by the stacked pc
+# and store them to the stack for now
+#
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch opword & extword
+ mov.l %d0,EXC_OPWORD(%a6) # store extword on stack
+
+
+#########################################################################
+# muls.l 0100 1100 00 |<ea>| 0*** 1100 0000 0*** #
+# mulu.l 0100 1100 00 |<ea>| 0*** 0100 0000 0*** #
+# #
+# divs.l 0100 1100 01 |<ea>| 0*** 1100 0000 0*** #
+# divu.l 0100 1100 01 |<ea>| 0*** 0100 0000 0*** #
+# #
+# movep.w m2r 0000 ***1 00 001*** | <displacement> | #
+# movep.l m2r 0000 ***1 01 001*** | <displacement> | #
+# movep.w r2m 0000 ***1 10 001*** | <displacement> | #
+# movep.l r2m 0000 ***1 11 001*** | <displacement> | #
+# #
+# cas.w 0000 1100 11 |<ea>| 0000 000* **00 0*** #
+# cas.l 0000 1110 11 |<ea>| 0000 000* **00 0*** #
+# #
+# cas2.w 0000 1100 11 111100 **** 000* **00 0*** #
+# **** 000* **00 0*** #
+# cas2.l 0000 1110 11 111100 **** 000* **00 0*** #
+# **** 000* **00 0*** #
+# #
+# chk2.b 0000 0000 11 |<ea>| **** 1000 0000 0000 #
+# chk2.w 0000 0010 11 |<ea>| **** 1000 0000 0000 #
+# chk2.l 0000 0100 11 |<ea>| **** 1000 0000 0000 #
+# #
+# cmp2.b 0000 0000 11 |<ea>| **** 0000 0000 0000 #
+# cmp2.w 0000 0010 11 |<ea>| **** 0000 0000 0000 #
+# cmp2.l 0000 0100 11 |<ea>| **** 0000 0000 0000 #
+#########################################################################
+
+#
+# using bit 14 of the operation word, separate into 2 groups:
+# (group1) mul64, div64
+# (group2) movep, chk2, cmp2, cas2, cas
+#
+ btst &0x1e,%d0 # group1 or group2
+ beq.b uieh_group2 # go handle group2
+
+#
+# now, w/ group1, make mul64's decode the fastest since it will
+# most likely be used the most.
+#
+uieh_group1:
+ btst &0x16,%d0 # test for div64
+ bne.b uieh_div64 # go handle div64
+
+uieh_mul64:
+# mul64() may use ()+ addressing and may, therefore, alter a7
+
+ bsr.l _mul64 # _mul64()
+
+ btst &0x5,EXC_ISR(%a6) # supervisor mode?
+ beq.w uieh_done
+ btst &mia7_bit,SPCOND_FLG(%a6) # was a7 changed?
+ beq.w uieh_done # no
+ btst &0x7,EXC_ISR(%a6) # is trace enabled?
+ bne.w uieh_trace_a7 # yes
+ bra.w uieh_a7 # no
+
+uieh_div64:
+# div64() may use ()+ addressing and may, therefore, alter a7.
+# div64() may take a divide by zero exception.
+
+ bsr.l _div64 # _div64()
+
+# here, we sort out all of the special cases that may have happened.
+ btst &mia7_bit,SPCOND_FLG(%a6) # was a7 changed?
+ bne.b uieh_div64_a7 # yes
+uieh_div64_dbyz:
+ btst &idbyz_bit,SPCOND_FLG(%a6) # did divide-by-zero occur?
+ bne.w uieh_divbyzero # yes
+ bra.w uieh_done # no
+uieh_div64_a7:
+ btst &0x5,EXC_ISR(%a6) # supervisor mode?
+ beq.b uieh_div64_dbyz # no
+# here, a7 has been incremented by 4 bytes in supervisor mode. we still
+# may have the following 3 cases:
+# (i) (a7)+
+# (ii) (a7)+; trace
+# (iii) (a7)+; divide-by-zero
+#
+ btst &idbyz_bit,SPCOND_FLG(%a6) # did divide-by-zero occur?
+ bne.w uieh_divbyzero_a7 # yes
+ tst.b EXC_ISR(%a6) # no; is trace enabled?
+ bmi.w uieh_trace_a7 # yes
+ bra.w uieh_a7 # no
+
+#
+# now, w/ group2, make movep's decode the fastest since it will
+# most likely be used the most.
+#
+uieh_group2:
+ btst &0x18,%d0 # test for not movep
+ beq.b uieh_not_movep
+
+
+ bsr.l _moveperipheral # _movep()
+ bra.w uieh_done
+
+uieh_not_movep:
+ btst &0x1b,%d0 # test for chk2,cmp2
+ beq.b uieh_chk2cmp2 # go handle chk2,cmp2
+
+ swap %d0 # put opword in lo word
+ cmpi.b %d0,&0xfc # test for cas2
+ beq.b uieh_cas2 # go handle cas2
+
+uieh_cas:
+
+ bsr.l _compandset # _cas()
+
+# the cases of "cas Dc,Du,(a7)+" and "cas Dc,Du,-(a7)" used from supervisor
+# mode are simply not considered valid and therefore are not handled.
+
+ bra.w uieh_done
+
+uieh_cas2:
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # read extension word
+
+ tst.l %d1 # ifetch error?
+ bne.w isp_iacc # yes
+
+ bsr.l _compandset2 # _cas2()
+ bra.w uieh_done
+
+uieh_chk2cmp2:
+# chk2 may take a chk exception
+
+ bsr.l _chk2_cmp2 # _chk2_cmp2()
+
+# here we check to see if a chk trap should be taken
+ cmpi.b SPCOND_FLG(%a6),&ichk_flg
+ bne.w uieh_done
+ bra.b uieh_chk_trap
+
+###########################################################################
+
+#
+# the required emulation has been completed. now, clean up the necessary stack
+# info and prepare for rte
+#
+uieh_done:
+ mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+
+# if exception occurred in user mode, then we have to restore a7 in case it
+# changed. we don't have to update a7 for supervisor mose because that case
+# doesn't flow through here
+ btst &0x5,EXC_ISR(%a6) # user or supervisor?
+ bne.b uieh_finish # supervisor
+
+ mov.l EXC_A7(%a6),%a0 # fetch user stack pointer
+ mov.l %a0,%usp # restore it
+
+uieh_finish:
+ movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
+
+ btst &0x7,EXC_ISR(%a6) # is trace mode on?
+ bne.b uieh_trace # yes;go handle trace mode
+
+ mov.l EXC_EXTWPTR(%a6),EXC_IPC(%a6) # new pc on stack frame
+ mov.l EXC_A6(%a6),(%a6) # prepare new a6 for unlink
+ unlk %a6 # unlink stack frame
+ bra.l _isp_done
+
+#
+# The instruction that was just emulated was also being traced. The trace
+# trap for this instruction will be lost unless we jump to the trace handler.
+# So, here we create a Trace Exception format number two exception stack
+# frame from the Unimplemented Integer Intruction Exception stack frame
+# format number zero and jump to the user supplied hook "_real_trace()".
+#
+# UIEH FRAME TRACE FRAME
+# ***************** *****************
+# * 0x0 * 0x0f4 * * Current *
+# ***************** * PC *
+# * Current * *****************
+# * PC * * 0x2 * 0x024 *
+# ***************** *****************
+# * SR * * Next *
+# ***************** * PC *
+# ->* Old * *****************
+# from link -->* A6 * * SR *
+# ***************** *****************
+# /* A7 * * New * <-- for final unlink
+# / * * * A6 *
+# link frame < ***************** *****************
+# \ ~ ~ ~ ~
+# \***************** *****************
+#
+uieh_trace:
+ mov.l EXC_A6(%a6),-0x4(%a6)
+ mov.w EXC_ISR(%a6),0x0(%a6)
+ mov.l EXC_IPC(%a6),0x8(%a6)
+ mov.l EXC_EXTWPTR(%a6),0x2(%a6)
+ mov.w &0x2024,0x6(%a6)
+ sub.l &0x4,%a6
+ unlk %a6
+ bra.l _real_trace
+
+#
+# UIEH FRAME CHK FRAME
+# ***************** *****************
+# * 0x0 * 0x0f4 * * Current *
+# ***************** * PC *
+# * Current * *****************
+# * PC * * 0x2 * 0x018 *
+# ***************** *****************
+# * SR * * Next *
+# ***************** * PC *
+# (4 words) *****************
+# * SR *
+# *****************
+# (6 words)
+#
+# the chk2 instruction should take a chk trap. so, here we must create a
+# chk stack frame from an unimplemented integer instruction exception frame
+# and jump to the user supplied entry point "_real_chk()".
+#
+uieh_chk_trap:
+ mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+ movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
+
+ mov.w EXC_ISR(%a6),(%a6) # put new SR on stack
+ mov.l EXC_IPC(%a6),0x8(%a6) # put "Current PC" on stack
+ mov.l EXC_EXTWPTR(%a6),0x2(%a6) # put "Next PC" on stack
+ mov.w &0x2018,0x6(%a6) # put Vector Offset on stack
+
+ mov.l EXC_A6(%a6),%a6 # restore a6
+ add.l &LOCAL_SIZE,%sp # clear stack frame
+
+ bra.l _real_chk
+
+#
+# UIEH FRAME DIVBYZERO FRAME
+# ***************** *****************
+# * 0x0 * 0x0f4 * * Current *
+# ***************** * PC *
+# * Current * *****************
+# * PC * * 0x2 * 0x014 *
+# ***************** *****************
+# * SR * * Next *
+# ***************** * PC *
+# (4 words) *****************
+# * SR *
+# *****************
+# (6 words)
+#
+# the divide instruction should take an integer divide by zero trap. so, here
+# we must create a divbyzero stack frame from an unimplemented integer
+# instruction exception frame and jump to the user supplied entry point
+# "_real_divbyzero()".
+#
+uieh_divbyzero:
+ mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+ movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
+
+ mov.w EXC_ISR(%a6),(%a6) # put new SR on stack
+ mov.l EXC_IPC(%a6),0x8(%a6) # put "Current PC" on stack
+ mov.l EXC_EXTWPTR(%a6),0x2(%a6) # put "Next PC" on stack
+ mov.w &0x2014,0x6(%a6) # put Vector Offset on stack
+
+ mov.l EXC_A6(%a6),%a6 # restore a6
+ add.l &LOCAL_SIZE,%sp # clear stack frame
+
+ bra.l _real_divbyzero
+
+#
+# DIVBYZERO FRAME
+# *****************
+# * Current *
+# UIEH FRAME * PC *
+# ***************** *****************
+# * 0x0 * 0x0f4 * * 0x2 * 0x014 *
+# ***************** *****************
+# * Current * * Next *
+# * PC * * PC *
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+# (4 words) (6 words)
+#
+# the divide instruction should take an integer divide by zero trap. so, here
+# we must create a divbyzero stack frame from an unimplemented integer
+# instruction exception frame and jump to the user supplied entry point
+# "_real_divbyzero()".
+#
+# However, we must also deal with the fact that (a7)+ was used from supervisor
+# mode, thereby shifting the stack frame up 4 bytes.
+#
+uieh_divbyzero_a7:
+ mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+ movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
+
+ mov.l EXC_IPC(%a6),0xc(%a6) # put "Current PC" on stack
+ mov.w &0x2014,0xa(%a6) # put Vector Offset on stack
+ mov.l EXC_EXTWPTR(%a6),0x6(%a6) # put "Next PC" on stack
+
+ mov.l EXC_A6(%a6),%a6 # restore a6
+ add.l &4+LOCAL_SIZE,%sp # clear stack frame
+
+ bra.l _real_divbyzero
+
+#
+# TRACE FRAME
+# *****************
+# * Current *
+# UIEH FRAME * PC *
+# ***************** *****************
+# * 0x0 * 0x0f4 * * 0x2 * 0x024 *
+# ***************** *****************
+# * Current * * Next *
+# * PC * * PC *
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+# (4 words) (6 words)
+#
+#
+# The instruction that was just emulated was also being traced. The trace
+# trap for this instruction will be lost unless we jump to the trace handler.
+# So, here we create a Trace Exception format number two exception stack
+# frame from the Unimplemented Integer Intruction Exception stack frame
+# format number zero and jump to the user supplied hook "_real_trace()".
+#
+# However, we must also deal with the fact that (a7)+ was used from supervisor
+# mode, thereby shifting the stack frame up 4 bytes.
+#
+uieh_trace_a7:
+ mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+ movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
+
+ mov.l EXC_IPC(%a6),0xc(%a6) # put "Current PC" on stack
+ mov.w &0x2024,0xa(%a6) # put Vector Offset on stack
+ mov.l EXC_EXTWPTR(%a6),0x6(%a6) # put "Next PC" on stack
+
+ mov.l EXC_A6(%a6),%a6 # restore a6
+ add.l &4+LOCAL_SIZE,%sp # clear stack frame
+
+ bra.l _real_trace
+
+#
+# UIEH FRAME
+# *****************
+# * 0x0 * 0x0f4 *
+# UIEH FRAME *****************
+# ***************** * Next *
+# * 0x0 * 0x0f4 * * PC *
+# ***************** *****************
+# * Current * * SR *
+# * PC * *****************
+# ***************** (4 words)
+# * SR *
+# *****************
+# (4 words)
+uieh_a7:
+ mov.b EXC_CC+1(%a6),EXC_ISR+1(%a6) # insert new ccodes
+ movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
+
+ mov.w &0x00f4,0xe(%a6) # put Vector Offset on stack
+ mov.l EXC_EXTWPTR(%a6),0xa(%a6) # put "Next PC" on stack
+ mov.w EXC_ISR(%a6),0x8(%a6) # put SR on stack
+
+ mov.l EXC_A6(%a6),%a6 # restore a6
+ add.l &8+LOCAL_SIZE,%sp # clear stack frame
+ bra.l _isp_done
+
+##########
+
+# this is the exit point if a data read or write fails.
+# a0 = failing address
+# d0 = fslw
+isp_dacc:
+ mov.l %a0,(%a6) # save address
+ mov.l %d0,-0x4(%a6) # save partial fslw
+
+ lea -64(%a6),%sp
+ movm.l (%sp)+,&0x7fff # restore d0-d7/a0-a6
+
+ mov.l 0xc(%sp),-(%sp) # move voff,hi(pc)
+ mov.l 0x4(%sp),0x10(%sp) # store fslw
+ mov.l 0xc(%sp),0x4(%sp) # store sr,lo(pc)
+ mov.l 0x8(%sp),0xc(%sp) # store address
+ mov.l (%sp)+,0x4(%sp) # store voff,hi(pc)
+ mov.w &0x4008,0x6(%sp) # store new voff
+
+ bra.b isp_acc_exit
+
+# this is the exit point if an instruction word read fails.
+# FSLW:
+# misaligned = true
+# read = true
+# size = word
+# instruction = true
+# software emulation error = true
+isp_iacc:
+ movm.l EXC_DREGS(%a6),&0x3fff # restore d0-d7/a0-a5
+ unlk %a6 # unlink frame
+ sub.w &0x8,%sp # make room for acc frame
+ mov.l 0x8(%sp),(%sp) # store sr,lo(pc)
+ mov.w 0xc(%sp),0x4(%sp) # store hi(pc)
+ mov.w &0x4008,0x6(%sp) # store new voff
+ mov.l 0x2(%sp),0x8(%sp) # store address (=pc)
+ mov.l &0x09428001,0xc(%sp) # store fslw
+
+isp_acc_exit:
+ btst &0x5,(%sp) # user or supervisor?
+ beq.b isp_acc_exit2 # user
+ bset &0x2,0xd(%sp) # set supervisor TM bit
+isp_acc_exit2:
+ bra.l _real_access
+
+# if the addressing mode was (an)+ or -(an), the address register must
+# be restored to its pre-exception value before entering _real_access.
+isp_restore:
+ cmpi.b SPCOND_FLG(%a6),&restore_flg # do we need a restore?
+ bne.b isp_restore_done # no
+ clr.l %d0
+ mov.b EXC_SAVREG(%a6),%d0 # regno to restore
+ mov.l EXC_SAVVAL(%a6),(EXC_AREGS,%a6,%d0.l*4) # restore value
+isp_restore_done:
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _calc_ea(): routine to calculate effective address #
+# #
+# XREF **************************************************************** #
+# _imem_read_word() - read instruction word #
+# _imem_read_long() - read instruction longword #
+# _dmem_read_long() - read data longword (for memory indirect) #
+# isp_iacc() - handle instruction access error exception #
+# isp_dacc() - handle data access error exception #
+# #
+# INPUT *************************************************************** #
+# d0 = number of bytes related to effective address (w,l) #
+# #
+# OUTPUT ************************************************************** #
+# If exiting through isp_dacc... #
+# a0 = failing address #
+# d0 = FSLW #
+# elsif exiting though isp_iacc... #
+# none #
+# else #
+# a0 = effective address #
+# #
+# ALGORITHM *********************************************************** #
+# The effective address type is decoded from the opword residing #
+# on the stack. A jump table is used to vector to a routine for the #
+# appropriate mode. Since none of the emulated integer instructions #
+# uses byte-sized operands, only handle word and long operations. #
+# #
+# Dn,An - shouldn't enter here #
+# (An) - fetch An value from stack #
+# -(An) - fetch An value from stack; return decr value; #
+# place decr value on stack; store old value in case of #
+# future access error; if -(a7), set mda7_flg in #
+# SPCOND_FLG #
+# (An)+ - fetch An value from stack; return value; #
+# place incr value on stack; store old value in case of #
+# future access error; if (a7)+, set mia7_flg in #
+# SPCOND_FLG #
+# (d16,An) - fetch An value from stack; read d16 using #
+# _imem_read_word(); fetch may fail -> branch to #
+# isp_iacc() #
+# (xxx).w,(xxx).l - use _imem_read_{word,long}() to fetch #
+# address; fetch may fail #
+# #<data> - return address of immediate value; set immed_flg #
+# in SPCOND_FLG #
+# (d16,PC) - fetch stacked PC value; read d16 using #
+# _imem_read_word(); fetch may fail -> branch to #
+# isp_iacc() #
+# everything else - read needed displacements as appropriate w/ #
+# _imem_read_{word,long}(); read may fail; if memory #
+# indirect, read indirect address using #
+# _dmem_read_long() which may also fail #
+# #
+#########################################################################
+
+ global _calc_ea
+_calc_ea:
+ mov.l %d0,%a0 # move # bytes to a0
+
+# MODE and REG are taken from the EXC_OPWORD.
+ mov.w EXC_OPWORD(%a6),%d0 # fetch opcode word
+ mov.w %d0,%d1 # make a copy
+
+ andi.w &0x3f,%d0 # extract mode field
+ andi.l &0x7,%d1 # extract reg field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+ mov.w (tbl_ea_mode.b,%pc,%d0.w*2), %d0 # fetch jmp distance
+ jmp (tbl_ea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+ swbeg &64
+tbl_ea_mode:
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+
+ short addr_ind_a0 - tbl_ea_mode
+ short addr_ind_a1 - tbl_ea_mode
+ short addr_ind_a2 - tbl_ea_mode
+ short addr_ind_a3 - tbl_ea_mode
+ short addr_ind_a4 - tbl_ea_mode
+ short addr_ind_a5 - tbl_ea_mode
+ short addr_ind_a6 - tbl_ea_mode
+ short addr_ind_a7 - tbl_ea_mode
+
+ short addr_ind_p_a0 - tbl_ea_mode
+ short addr_ind_p_a1 - tbl_ea_mode
+ short addr_ind_p_a2 - tbl_ea_mode
+ short addr_ind_p_a3 - tbl_ea_mode
+ short addr_ind_p_a4 - tbl_ea_mode
+ short addr_ind_p_a5 - tbl_ea_mode
+ short addr_ind_p_a6 - tbl_ea_mode
+ short addr_ind_p_a7 - tbl_ea_mode
+
+ short addr_ind_m_a0 - tbl_ea_mode
+ short addr_ind_m_a1 - tbl_ea_mode
+ short addr_ind_m_a2 - tbl_ea_mode
+ short addr_ind_m_a3 - tbl_ea_mode
+ short addr_ind_m_a4 - tbl_ea_mode
+ short addr_ind_m_a5 - tbl_ea_mode
+ short addr_ind_m_a6 - tbl_ea_mode
+ short addr_ind_m_a7 - tbl_ea_mode
+
+ short addr_ind_disp_a0 - tbl_ea_mode
+ short addr_ind_disp_a1 - tbl_ea_mode
+ short addr_ind_disp_a2 - tbl_ea_mode
+ short addr_ind_disp_a3 - tbl_ea_mode
+ short addr_ind_disp_a4 - tbl_ea_mode
+ short addr_ind_disp_a5 - tbl_ea_mode
+ short addr_ind_disp_a6 - tbl_ea_mode
+ short addr_ind_disp_a7 - tbl_ea_mode
+
+ short _addr_ind_ext - tbl_ea_mode
+ short _addr_ind_ext - tbl_ea_mode
+ short _addr_ind_ext - tbl_ea_mode
+ short _addr_ind_ext - tbl_ea_mode
+ short _addr_ind_ext - tbl_ea_mode
+ short _addr_ind_ext - tbl_ea_mode
+ short _addr_ind_ext - tbl_ea_mode
+ short _addr_ind_ext - tbl_ea_mode
+
+ short abs_short - tbl_ea_mode
+ short abs_long - tbl_ea_mode
+ short pc_ind - tbl_ea_mode
+ short pc_ind_ext - tbl_ea_mode
+ short immediate - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+ short tbl_ea_mode - tbl_ea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+addr_ind_a0:
+ mov.l EXC_A0(%a6),%a0 # Get current a0
+ rts
+
+addr_ind_a1:
+ mov.l EXC_A1(%a6),%a0 # Get current a1
+ rts
+
+addr_ind_a2:
+ mov.l EXC_A2(%a6),%a0 # Get current a2
+ rts
+
+addr_ind_a3:
+ mov.l EXC_A3(%a6),%a0 # Get current a3
+ rts
+
+addr_ind_a4:
+ mov.l EXC_A4(%a6),%a0 # Get current a4
+ rts
+
+addr_ind_a5:
+ mov.l EXC_A5(%a6),%a0 # Get current a5
+ rts
+
+addr_ind_a6:
+ mov.l EXC_A6(%a6),%a0 # Get current a6
+ rts
+
+addr_ind_a7:
+ mov.l EXC_A7(%a6),%a0 # Get current a7
+ rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+addr_ind_p_a0:
+ mov.l %a0,%d0 # copy no. bytes
+ mov.l EXC_A0(%a6),%a0 # load current value
+ add.l %a0,%d0 # increment
+ mov.l %d0,EXC_A0(%a6) # save incremented value
+
+ mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
+ mov.b &0x0,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_p_a1:
+ mov.l %a0,%d0 # copy no. bytes
+ mov.l EXC_A1(%a6),%a0 # load current value
+ add.l %a0,%d0 # increment
+ mov.l %d0,EXC_A1(%a6) # save incremented value
+
+ mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
+ mov.b &0x1,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_p_a2:
+ mov.l %a0,%d0 # copy no. bytes
+ mov.l EXC_A2(%a6),%a0 # load current value
+ add.l %a0,%d0 # increment
+ mov.l %d0,EXC_A2(%a6) # save incremented value
+
+ mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
+ mov.b &0x2,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_p_a3:
+ mov.l %a0,%d0 # copy no. bytes
+ mov.l EXC_A3(%a6),%a0 # load current value
+ add.l %a0,%d0 # increment
+ mov.l %d0,EXC_A3(%a6) # save incremented value
+
+ mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
+ mov.b &0x3,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_p_a4:
+ mov.l %a0,%d0 # copy no. bytes
+ mov.l EXC_A4(%a6),%a0 # load current value
+ add.l %a0,%d0 # increment
+ mov.l %d0,EXC_A4(%a6) # save incremented value
+
+ mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
+ mov.b &0x4,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_p_a5:
+ mov.l %a0,%d0 # copy no. bytes
+ mov.l EXC_A5(%a6),%a0 # load current value
+ add.l %a0,%d0 # increment
+ mov.l %d0,EXC_A5(%a6) # save incremented value
+
+ mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
+ mov.b &0x5,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_p_a6:
+ mov.l %a0,%d0 # copy no. bytes
+ mov.l EXC_A6(%a6),%a0 # load current value
+ add.l %a0,%d0 # increment
+ mov.l %d0,EXC_A6(%a6) # save incremented value
+
+ mov.l %a0,EXC_SAVVAL(%a6) # save in case of access error
+ mov.b &0x6,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_p_a7:
+ mov.b &mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+ mov.l %a0,%d0 # copy no. bytes
+ mov.l EXC_A7(%a6),%a0 # load current value
+ add.l %a0,%d0 # increment
+ mov.l %d0,EXC_A7(%a6) # save incremented value
+ rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+addr_ind_m_a0:
+ mov.l EXC_A0(%a6),%d0 # Get current a0
+ mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A0(%a6) # Save decr value
+ mov.l %d0,%a0
+
+ mov.b &0x0,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_m_a1:
+ mov.l EXC_A1(%a6),%d0 # Get current a1
+ mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A1(%a6) # Save decr value
+ mov.l %d0,%a0
+
+ mov.b &0x1,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_m_a2:
+ mov.l EXC_A2(%a6),%d0 # Get current a2
+ mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A2(%a6) # Save decr value
+ mov.l %d0,%a0
+
+ mov.b &0x2,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_m_a3:
+ mov.l EXC_A3(%a6),%d0 # Get current a3
+ mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A3(%a6) # Save decr value
+ mov.l %d0,%a0
+
+ mov.b &0x3,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_m_a4:
+ mov.l EXC_A4(%a6),%d0 # Get current a4
+ mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A4(%a6) # Save decr value
+ mov.l %d0,%a0
+
+ mov.b &0x4,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_m_a5:
+ mov.l EXC_A5(%a6),%d0 # Get current a5
+ mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A5(%a6) # Save decr value
+ mov.l %d0,%a0
+
+ mov.b &0x5,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_m_a6:
+ mov.l EXC_A6(%a6),%d0 # Get current a6
+ mov.l %d0,EXC_SAVVAL(%a6) # save in case of access error
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A6(%a6) # Save decr value
+ mov.l %d0,%a0
+
+ mov.b &0x6,EXC_SAVREG(%a6) # save regno, too
+ mov.b &restore_flg,SPCOND_FLG(%a6) # set flag
+ rts
+
+addr_ind_m_a7:
+ mov.b &mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+ mov.l EXC_A7(%a6),%d0 # Get current a7
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A7(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+addr_ind_disp_a0:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+ add.l EXC_A0(%a6),%a0 # a0 + d16
+ rts
+
+addr_ind_disp_a1:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+ add.l EXC_A1(%a6),%a0 # a1 + d16
+ rts
+
+addr_ind_disp_a2:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+ add.l EXC_A2(%a6),%a0 # a2 + d16
+ rts
+
+addr_ind_disp_a3:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+ add.l EXC_A3(%a6),%a0 # a3 + d16
+ rts
+
+addr_ind_disp_a4:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+ add.l EXC_A4(%a6),%a0 # a4 + d16
+ rts
+
+addr_ind_disp_a5:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+ add.l EXC_A5(%a6),%a0 # a5 + d16
+ rts
+
+addr_ind_disp_a6:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+ add.l EXC_A6(%a6),%a0 # a6 + d16
+ rts
+
+addr_ind_disp_a7:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+ add.l EXC_A7(%a6),%a0 # a7 + d16
+ rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (dn, An, Xn) #
+# " " " w/ " (base displacement): (bd, An, Xn) #
+# Memory indirect postindexed: ([bd, An], Xn, od) #
+# Memory indirect preindexed: ([bd, An, Xn], od) #
+########################################################################
+_addr_ind_ext:
+ mov.l %d1,-(%sp)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch extword in d0
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.l (%sp)+,%d1
+
+ mov.l (EXC_AREGS,%a6,%d1.w*4),%a0 # put base in a0
+
+ btst &0x8,%d0
+ beq.b addr_ind_index_8bit # for ext word or not?
+
+ movm.l &0x3c00,-(%sp) # save d2-d5
+
+ mov.l %d0,%d5 # put extword in d5
+ mov.l %a0,%d3 # put base in d3
+
+ bra.l calc_mem_ind # calc memory indirect
+
+addr_ind_index_8bit:
+ mov.l %d2,-(%sp) # save old d2
+
+ mov.l %d0,%d1
+ rol.w &0x4,%d1
+ andi.w &0xf,%d1 # extract index regno
+
+ mov.l (EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value
+
+ btst &0xb,%d0 # is it word or long?
+ bne.b aii8_long
+ ext.l %d1 # sign extend word index
+aii8_long:
+ mov.l %d0,%d2
+ rol.w &0x7,%d2
+ andi.l &0x3,%d2 # extract scale value
+
+ lsl.l %d2,%d1 # shift index by scale
+
+ extb.l %d0 # sign extend displacement
+ add.l %d1,%d0 # index + disp
+ add.l %d0,%a0 # An + (index + disp)
+
+ mov.l (%sp)+,%d2 # restore old d2
+ rts
+
+######################
+# Immediate: #<data> #
+#########################################################################
+# word, long: <ea> of the data is the current extension word #
+# pointer value. new extension word pointer is simply the old #
+# plus the number of bytes in the data type(2 or 4). #
+#########################################################################
+immediate:
+ mov.b &immed_flg,SPCOND_FLG(%a6) # set immediate flag
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch extension word ptr
+ rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+abs_short:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch short address
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # return <ea> in a0
+ rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+abs_long:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch long address
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.l %d0,%a0 # return <ea> in a0
+ rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+pc_ind:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch word displacement
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_EXTWPTR(%a6),%a0 # pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+ subq.l &0x2,%a0 # adjust <ea>
+
+ rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# " " w/ " (base displacement): (bd, PC, An) #
+# PC memory indirect postindexed: ([bd, PC], Xn, od) #
+# PC memory indirect preindexed: ([bd, PC, Xn], od) #
+##########################################################
+pc_ind_ext:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch ext word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # put base in a0
+ subq.l &0x2,%a0 # adjust base
+
+ btst &0x8,%d0 # is disp only 8 bits?
+ beq.b pc_ind_index_8bit # yes
+
+# the indexed addressing mode uses a base displacement of size
+# word or long
+ movm.l &0x3c00,-(%sp) # save d2-d5
+
+ mov.l %d0,%d5 # put extword in d5
+ mov.l %a0,%d3 # put base in d3
+
+ bra.l calc_mem_ind # calc memory indirect
+
+pc_ind_index_8bit:
+ mov.l %d2,-(%sp) # create a temp register
+
+ mov.l %d0,%d1 # make extword copy
+ rol.w &0x4,%d1 # rotate reg num into place
+ andi.w &0xf,%d1 # extract register number
+
+ mov.l (EXC_DREGS,%a6,%d1.w*4),%d1 # fetch index reg value
+
+ btst &0xb,%d0 # is index word or long?
+ bne.b pii8_long # long
+ ext.l %d1 # sign extend word index
+pii8_long:
+ mov.l %d0,%d2 # make extword copy
+ rol.w &0x7,%d2 # rotate scale value into place
+ andi.l &0x3,%d2 # extract scale value
+
+ lsl.l %d2,%d1 # shift index by scale
+
+ extb.l %d0 # sign extend displacement
+ add.l %d1,%d0 # index + disp
+ add.l %d0,%a0 # An + (index + disp)
+
+ mov.l (%sp)+,%d2 # restore temp register
+
+ rts
+
+# a5 = exc_extwptr (global to uaeh)
+# a4 = exc_opword (global to uaeh)
+# a3 = exc_dregs (global to uaeh)
+
+# d2 = index (internal " " )
+# d3 = base (internal " " )
+# d4 = od (internal " " )
+# d5 = extword (internal " " )
+calc_mem_ind:
+ btst &0x6,%d5 # is the index suppressed?
+ beq.b calc_index
+ clr.l %d2 # yes, so index = 0
+ bra.b base_supp_ck
+calc_index:
+ bfextu %d5{&16:&4},%d2
+ mov.l (EXC_DREGS,%a6,%d2.w*4),%d2
+ btst &0xb,%d5 # is index word or long?
+ bne.b no_ext
+ ext.l %d2
+no_ext:
+ bfextu %d5{&21:&2},%d0
+ lsl.l %d0,%d2
+base_supp_ck:
+ btst &0x7,%d5 # is the bd suppressed?
+ beq.b no_base_sup
+ clr.l %d3
+no_base_sup:
+ bfextu %d5{&26:&2},%d0 # get bd size
+# beq.l _error # if (size == 0) it's reserved
+ cmpi.b %d0,&2
+ blt.b no_bd
+ beq.b get_word_bd
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ bra.b chk_ind
+get_word_bd:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ ext.l %d0 # sign extend bd
+
+chk_ind:
+ add.l %d0,%d3 # base += bd
+no_bd:
+ bfextu %d5{&30:&2},%d0 # is od suppressed?
+ beq.w aii_bd
+ cmpi.b %d0,&0x2
+ blt.b null_od
+ beq.b word_od
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ bra.b add_them
+
+word_od:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ ext.l %d0 # sign extend od
+ bra.b add_them
+
+null_od:
+ clr.l %d0
+add_them:
+ mov.l %d0,%d4
+ btst &0x2,%d5 # pre or post indexing?
+ beq.b pre_indexed
+
+ mov.l %d3,%a0
+ bsr.l _dmem_read_long
+
+ tst.l %d1 # dfetch error?
+ bne.b calc_ea_err # yes
+
+ add.l %d2,%d0 # <ea> += index
+ add.l %d4,%d0 # <ea> += od
+ bra.b done_ea
+
+pre_indexed:
+ add.l %d2,%d3 # preindexing
+ mov.l %d3,%a0
+ bsr.l _dmem_read_long
+
+ tst.l %d1 # ifetch error?
+ bne.b calc_ea_err # yes
+
+ add.l %d4,%d0 # ea += od
+ bra.b done_ea
+
+aii_bd:
+ add.l %d2,%d3 # ea = (base + bd) + index
+ mov.l %d3,%d0
+done_ea:
+ mov.l %d0,%a0
+
+ movm.l (%sp)+,&0x003c # restore d2-d5
+ rts
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+# read = true
+# size = longword
+# TM = data
+# software emulation error = true
+calc_ea_err:
+ mov.l %d3,%a0 # pass failing address
+ mov.l &0x01010001,%d0 # pass fslw
+ bra.l isp_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+# _moveperipheral(): routine to emulate movep instruction #
+# #
+# XREF **************************************************************** #
+# _dmem_read_byte() - read byte from memory #
+# _dmem_write_byte() - write byte to memory #
+# isp_dacc() - handle data access error exception #
+# #
+# INPUT *************************************************************** #
+# none #
+# #
+# OUTPUT ************************************************************** #
+# If exiting through isp_dacc... #
+# a0 = failing address #
+# d0 = FSLW #
+# else #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# Decode the movep instruction words stored at EXC_OPWORD and #
+# either read or write the required bytes from/to memory. Use the #
+# _dmem_{read,write}_byte() routines. If one of the memory routines #
+# returns a failing value, we must pass the failing address and a FSLW #
+# to the _isp_dacc() routine. #
+# Since this instruction is used to access peripherals, make sure #
+# to only access the required bytes. #
+# #
+#########################################################################
+
+###########################
+# movep.(w,l) Dx,(d,Ay) #
+# movep.(w,l) (d,Ay),Dx #
+###########################
+ global _moveperipheral
+_moveperipheral:
+ mov.w EXC_OPWORD(%a6),%d1 # fetch the opcode word
+
+ mov.b %d1,%d0
+ and.w &0x7,%d0 # extract Ay from opcode word
+
+ mov.l (EXC_AREGS,%a6,%d0.w*4),%a0 # fetch ay
+
+ add.w EXC_EXTWORD(%a6),%a0 # add: an + sgn_ext(disp)
+
+ btst &0x7,%d1 # (reg 2 mem) or (mem 2 reg)
+ beq.w mem2reg
+
+# reg2mem: fetch dx, then write it to memory
+reg2mem:
+ mov.w %d1,%d0
+ rol.w &0x7,%d0
+ and.w &0x7,%d0 # extract Dx from opcode word
+
+ mov.l (EXC_DREGS,%a6,%d0.w*4), %d0 # fetch dx
+
+ btst &0x6,%d1 # word or long operation?
+ beq.b r2mwtrans
+
+# a0 = dst addr
+# d0 = Dx
+r2mltrans:
+ mov.l %d0,%d2 # store data
+ mov.l %a0,%a2 # store addr
+ rol.l &0x8,%d2
+ mov.l %d2,%d0
+
+ bsr.l _dmem_write_byte # os : write hi
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_write_err # yes
+
+ add.w &0x2,%a2 # incr addr
+ mov.l %a2,%a0
+ rol.l &0x8,%d2
+ mov.l %d2,%d0
+
+ bsr.l _dmem_write_byte # os : write lo
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_write_err # yes
+
+ add.w &0x2,%a2 # incr addr
+ mov.l %a2,%a0
+ rol.l &0x8,%d2
+ mov.l %d2,%d0
+
+ bsr.l _dmem_write_byte # os : write lo
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_write_err # yes
+
+ add.w &0x2,%a2 # incr addr
+ mov.l %a2,%a0
+ rol.l &0x8,%d2
+ mov.l %d2,%d0
+
+ bsr.l _dmem_write_byte # os : write lo
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_write_err # yes
+
+ rts
+
+# a0 = dst addr
+# d0 = Dx
+r2mwtrans:
+ mov.l %d0,%d2 # store data
+ mov.l %a0,%a2 # store addr
+ lsr.w &0x8,%d0
+
+ bsr.l _dmem_write_byte # os : write hi
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_write_err # yes
+
+ add.w &0x2,%a2
+ mov.l %a2,%a0
+ mov.l %d2,%d0
+
+ bsr.l _dmem_write_byte # os : write lo
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_write_err # yes
+
+ rts
+
+# mem2reg: read bytes from memory.
+# determines the dest register, and then writes the bytes into it.
+mem2reg:
+ btst &0x6,%d1 # word or long operation?
+ beq.b m2rwtrans
+
+# a0 = dst addr
+m2rltrans:
+ mov.l %a0,%a2 # store addr
+
+ bsr.l _dmem_read_byte # read first byte
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_read_err # yes
+
+ mov.l %d0,%d2
+
+ add.w &0x2,%a2 # incr addr by 2 bytes
+ mov.l %a2,%a0
+
+ bsr.l _dmem_read_byte # read second byte
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_read_err # yes
+
+ lsl.w &0x8,%d2
+ mov.b %d0,%d2 # append bytes
+
+ add.w &0x2,%a2 # incr addr by 2 bytes
+ mov.l %a2,%a0
+
+ bsr.l _dmem_read_byte # read second byte
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_read_err # yes
+
+ lsl.l &0x8,%d2
+ mov.b %d0,%d2 # append bytes
+
+ add.w &0x2,%a2 # incr addr by 2 bytes
+ mov.l %a2,%a0
+
+ bsr.l _dmem_read_byte # read second byte
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_read_err # yes
+
+ lsl.l &0x8,%d2
+ mov.b %d0,%d2 # append bytes
+
+ mov.b EXC_OPWORD(%a6),%d1
+ lsr.b &0x1,%d1
+ and.w &0x7,%d1 # extract Dx from opcode word
+
+ mov.l %d2,(EXC_DREGS,%a6,%d1.w*4) # store dx
+
+ rts
+
+# a0 = dst addr
+m2rwtrans:
+ mov.l %a0,%a2 # store addr
+
+ bsr.l _dmem_read_byte # read first byte
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_read_err # yes
+
+ mov.l %d0,%d2
+
+ add.w &0x2,%a2 # incr addr by 2 bytes
+ mov.l %a2,%a0
+
+ bsr.l _dmem_read_byte # read second byte
+
+ tst.l %d1 # dfetch error?
+ bne.w movp_read_err # yes
+
+ lsl.w &0x8,%d2
+ mov.b %d0,%d2 # append bytes
+
+ mov.b EXC_OPWORD(%a6),%d1
+ lsr.b &0x1,%d1
+ and.w &0x7,%d1 # extract Dx from opcode word
+
+ mov.w %d2,(EXC_DREGS+2,%a6,%d1.w*4) # store dx
+
+ rts
+
+# if dmem_{read,write}_byte() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+# write = true
+# size = byte
+# TM = data
+# software emulation error = true
+movp_write_err:
+ mov.l %a2,%a0 # pass failing address
+ mov.l &0x00a10001,%d0 # pass fslw
+ bra.l isp_dacc
+
+# FSLW:
+# read = true
+# size = byte
+# TM = data
+# software emulation error = true
+movp_read_err:
+ mov.l %a2,%a0 # pass failing address
+ mov.l &0x01210001,%d0 # pass fslw
+ bra.l isp_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+# _chk2_cmp2(): routine to emulate chk2/cmp2 instructions #
+# #
+# XREF **************************************************************** #
+# _calc_ea(): calculate effective address #
+# _dmem_read_long(): read operands #
+# _dmem_read_word(): read operands #
+# isp_dacc(): handle data access error exception #
+# #
+# INPUT *************************************************************** #
+# none #
+# #
+# OUTPUT ************************************************************** #
+# If exiting through isp_dacc... #
+# a0 = failing address #
+# d0 = FSLW #
+# else #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# First, calculate the effective address, then fetch the byte, #
+# word, or longword sized operands. Then, in the interest of #
+# simplicity, all operands are converted to longword size whether the #
+# operation is byte, word, or long. The bounds are sign extended #
+# accordingly. If Rn is a data regsiter, Rn is also sign extended. If #
+# Rn is an address register, it need not be sign extended since the #
+# full register is always used. #
+# The comparisons are made and the condition codes calculated. #
+# If the instruction is chk2 and the Rn value is out-of-bounds, set #
+# the ichk_flg in SPCOND_FLG. #
+# If the memory fetch returns a failing value, pass the failing #
+# address and FSLW to the isp_dacc() routine. #
+# #
+#########################################################################
+
+ global _chk2_cmp2
+_chk2_cmp2:
+
+# passing size parameter doesn't matter since chk2 & cmp2 can't do
+# either predecrement, postincrement, or immediate.
+ bsr.l _calc_ea # calculate <ea>
+
+ mov.b EXC_EXTWORD(%a6), %d0 # fetch hi extension word
+ rol.b &0x4, %d0 # rotate reg bits into lo
+ and.w &0xf, %d0 # extract reg bits
+
+ mov.l (EXC_DREGS,%a6,%d0.w*4), %d2 # get regval
+
+ cmpi.b EXC_OPWORD(%a6), &0x2 # what size is operation?
+ blt.b chk2_cmp2_byte # size == byte
+ beq.b chk2_cmp2_word # size == word
+
+# the bounds are longword size. call routine to read the lower
+# bound into d0 and the higher bound into d1.
+chk2_cmp2_long:
+ mov.l %a0,%a2 # save copy of <ea>
+ bsr.l _dmem_read_long # fetch long lower bound
+
+ tst.l %d1 # dfetch error?
+ bne.w chk2_cmp2_err_l # yes
+
+ mov.l %d0,%d3 # save long lower bound
+ addq.l &0x4,%a2
+ mov.l %a2,%a0 # pass <ea> of long upper bound
+ bsr.l _dmem_read_long # fetch long upper bound
+
+ tst.l %d1 # dfetch error?
+ bne.w chk2_cmp2_err_l # yes
+
+ mov.l %d0,%d1 # long upper bound in d1
+ mov.l %d3,%d0 # long lower bound in d0
+ bra.w chk2_cmp2_compare # go do the compare emulation
+
+# the bounds are word size. fetch them in one subroutine call by
+# reading a longword. sign extend both. if it's a data operation,
+# sign extend Rn to long, also.
+chk2_cmp2_word:
+ mov.l %a0,%a2
+ bsr.l _dmem_read_long # fetch 2 word bounds
+
+ tst.l %d1 # dfetch error?
+ bne.w chk2_cmp2_err_l # yes
+
+ mov.w %d0, %d1 # place hi in %d1
+ swap %d0 # place lo in %d0
+
+ ext.l %d0 # sign extend lo bnd
+ ext.l %d1 # sign extend hi bnd
+
+ btst &0x7, EXC_EXTWORD(%a6) # address compare?
+ bne.w chk2_cmp2_compare # yes; don't sign extend
+
+# operation is a data register compare.
+# sign extend word to long so we can do simple longword compares.
+ ext.l %d2 # sign extend data word
+ bra.w chk2_cmp2_compare # go emulate compare
+
+# the bounds are byte size. fetch them in one subroutine call by
+# reading a word. sign extend both. if it's a data operation,
+# sign extend Rn to long, also.
+chk2_cmp2_byte:
+ mov.l %a0,%a2
+ bsr.l _dmem_read_word # fetch 2 byte bounds
+
+ tst.l %d1 # dfetch error?
+ bne.w chk2_cmp2_err_w # yes
+
+ mov.b %d0, %d1 # place hi in %d1
+ lsr.w &0x8, %d0 # place lo in %d0
+
+ extb.l %d0 # sign extend lo bnd
+ extb.l %d1 # sign extend hi bnd
+
+ btst &0x7, EXC_EXTWORD(%a6) # address compare?
+ bne.b chk2_cmp2_compare # yes; don't sign extend
+
+# operation is a data register compare.
+# sign extend byte to long so we can do simple longword compares.
+ extb.l %d2 # sign extend data byte
+
+#
+# To set the ccodes correctly:
+# (1) save 'Z' bit from (Rn - lo)
+# (2) save 'Z' and 'N' bits from ((hi - lo) - (Rn - hi))
+# (3) keep 'X', 'N', and 'V' from before instruction
+# (4) combine ccodes
+#
+chk2_cmp2_compare:
+ sub.l %d0, %d2 # (Rn - lo)
+ mov.w %cc, %d3 # fetch resulting ccodes
+ andi.b &0x4, %d3 # keep 'Z' bit
+ sub.l %d0, %d1 # (hi - lo)
+ cmp.l %d1,%d2 # ((hi - lo) - (Rn - hi))
+
+ mov.w %cc, %d4 # fetch resulting ccodes
+ or.b %d4, %d3 # combine w/ earlier ccodes
+ andi.b &0x5, %d3 # keep 'Z' and 'N'
+
+ mov.w EXC_CC(%a6), %d4 # fetch old ccodes
+ andi.b &0x1a, %d4 # keep 'X','N','V' bits
+ or.b %d3, %d4 # insert new ccodes
+ mov.w %d4, EXC_CC(%a6) # save new ccodes
+
+ btst &0x3, EXC_EXTWORD(%a6) # separate chk2,cmp2
+ bne.b chk2_finish # it's a chk2
+
+ rts
+
+# this code handles the only difference between chk2 and cmp2. chk2 would
+# have trapped out if the value was out of bounds. we check this by seeing
+# if the 'N' bit was set by the operation.
+chk2_finish:
+ btst &0x0, %d4 # is 'N' bit set?
+ bne.b chk2_trap # yes;chk2 should trap
+ rts
+chk2_trap:
+ mov.b &ichk_flg,SPCOND_FLG(%a6) # set "special case" flag
+ rts
+
+# if dmem_read_{long,word}() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# FSLW:
+# read = true
+# size = longword
+# TM = data
+# software emulation error = true
+chk2_cmp2_err_l:
+ mov.l %a2,%a0 # pass failing address
+ mov.l &0x01010001,%d0 # pass fslw
+ bra.l isp_dacc
+
+# FSLW:
+# read = true
+# size = word
+# TM = data
+# software emulation error = true
+chk2_cmp2_err_w:
+ mov.l %a2,%a0 # pass failing address
+ mov.l &0x01410001,%d0 # pass fslw
+ bra.l isp_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+# _div64(): routine to emulate div{u,s}.l <ea>,Dr:Dq #
+# 64/32->32r:32q #
+# #
+# XREF **************************************************************** #
+# _calc_ea() - calculate effective address #
+# isp_iacc() - handle instruction access error exception #
+# isp_dacc() - handle data access error exception #
+# isp_restore() - restore An on access error w/ -() or ()+ #
+# #
+# INPUT *************************************************************** #
+# none #
+# #
+# OUTPUT ************************************************************** #
+# If exiting through isp_dacc... #
+# a0 = failing address #
+# d0 = FSLW #
+# else #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# First, decode the operand location. If it's in Dn, fetch from #
+# the stack. If it's in memory, use _calc_ea() to calculate the #
+# effective address. Use _dmem_read_long() to fetch at that address. #
+# Unless the operand is immediate data. Then use _imem_read_long(). #
+# Send failures to isp_dacc() or isp_iacc() as appropriate. #
+# If the operands are signed, make them unsigned and save the #
+# sign info for later. Separate out special cases like divide-by-zero #
+# or 32-bit divides if possible. Else, use a special math algorithm #
+# to calculate the result. #
+# Restore sign info if signed instruction. Set the condition #
+# codes. Set idbyz_flg in SPCOND_FLG if divisor was zero. Store the #
+# quotient and remainder in the appropriate data registers on the stack.#
+# #
+#########################################################################
+
+set NDIVISOR, EXC_TEMP+0x0
+set NDIVIDEND, EXC_TEMP+0x1
+set NDRSAVE, EXC_TEMP+0x2
+set NDQSAVE, EXC_TEMP+0x4
+set DDSECOND, EXC_TEMP+0x6
+set DDQUOTIENT, EXC_TEMP+0x8
+set DDNORMAL, EXC_TEMP+0xc
+
+ global _div64
+#############
+# div(u,s)l #
+#############
+_div64:
+ mov.b EXC_OPWORD+1(%a6), %d0
+ andi.b &0x38, %d0 # extract src mode
+
+ bne.w dcontrolmodel_s # %dn dest or control mode?
+
+ mov.b EXC_OPWORD+1(%a6), %d0 # extract Dn from opcode
+ andi.w &0x7, %d0
+ mov.l (EXC_DREGS,%a6,%d0.w*4), %d7 # fetch divisor from register
+
+dgotsrcl:
+ beq.w div64eq0 # divisor is = 0!!!
+
+ mov.b EXC_EXTWORD+1(%a6), %d0 # extract Dr from extword
+ mov.b EXC_EXTWORD(%a6), %d1 # extract Dq from extword
+ and.w &0x7, %d0
+ lsr.b &0x4, %d1
+ and.w &0x7, %d1
+ mov.w %d0, NDRSAVE(%a6) # save Dr for later
+ mov.w %d1, NDQSAVE(%a6) # save Dq for later
+
+# fetch %dr and %dq directly off stack since all regs are saved there
+ mov.l (EXC_DREGS,%a6,%d0.w*4), %d5 # get dividend hi
+ mov.l (EXC_DREGS,%a6,%d1.w*4), %d6 # get dividend lo
+
+# separate signed and unsigned divide
+ btst &0x3, EXC_EXTWORD(%a6) # signed or unsigned?
+ beq.b dspecialcases # use positive divide
+
+# save the sign of the divisor
+# make divisor unsigned if it's negative
+ tst.l %d7 # chk sign of divisor
+ slt NDIVISOR(%a6) # save sign of divisor
+ bpl.b dsgndividend
+ neg.l %d7 # complement negative divisor
+
+# save the sign of the dividend
+# make dividend unsigned if it's negative
+dsgndividend:
+ tst.l %d5 # chk sign of hi(dividend)
+ slt NDIVIDEND(%a6) # save sign of dividend
+ bpl.b dspecialcases
+
+ mov.w &0x0, %cc # clear 'X' cc bit
+ negx.l %d6 # complement signed dividend
+ negx.l %d5
+
+# extract some special cases:
+# - is (dividend == 0) ?
+# - is (hi(dividend) == 0 && (divisor <= lo(dividend))) ? (32-bit div)
+dspecialcases:
+ tst.l %d5 # is (hi(dividend) == 0)
+ bne.b dnormaldivide # no, so try it the long way
+
+ tst.l %d6 # is (lo(dividend) == 0), too
+ beq.w ddone # yes, so (dividend == 0)
+
+ cmp.l %d7,%d6 # is (divisor <= lo(dividend))
+ bls.b d32bitdivide # yes, so use 32 bit divide
+
+ exg %d5,%d6 # q = 0, r = dividend
+ bra.w divfinish # can't divide, we're done.
+
+d32bitdivide:
+ tdivu.l %d7, %d5:%d6 # it's only a 32/32 bit div!
+
+ bra.b divfinish
+
+dnormaldivide:
+# last special case:
+# - is hi(dividend) >= divisor ? if yes, then overflow
+ cmp.l %d7,%d5
+ bls.b ddovf # answer won't fit in 32 bits
+
+# perform the divide algorithm:
+ bsr.l dclassical # do int divide
+
+# separate into signed and unsigned finishes.
+divfinish:
+ btst &0x3, EXC_EXTWORD(%a6) # do divs, divu separately
+ beq.b ddone # divu has no processing!!!
+
+# it was a divs.l, so ccode setting is a little more complicated...
+ tst.b NDIVIDEND(%a6) # remainder has same sign
+ beq.b dcc # as dividend.
+ neg.l %d5 # sgn(rem) = sgn(dividend)
+dcc:
+ mov.b NDIVISOR(%a6), %d0
+ eor.b %d0, NDIVIDEND(%a6) # chk if quotient is negative
+ beq.b dqpos # branch to quot positive
+
+# 0x80000000 is the largest number representable as a 32-bit negative
+# number. the negative of 0x80000000 is 0x80000000.
+ cmpi.l %d6, &0x80000000 # will (-quot) fit in 32 bits?
+ bhi.b ddovf
+
+ neg.l %d6 # make (-quot) 2's comp
+
+ bra.b ddone
+
+dqpos:
+ btst &0x1f, %d6 # will (+quot) fit in 32 bits?
+ bne.b ddovf
+
+ddone:
+# at this point, result is normal so ccodes are set based on result.
+ mov.w EXC_CC(%a6), %cc
+ tst.l %d6 # set %ccode bits
+ mov.w %cc, EXC_CC(%a6)
+
+ mov.w NDRSAVE(%a6), %d0 # get Dr off stack
+ mov.w NDQSAVE(%a6), %d1 # get Dq off stack
+
+# if the register numbers are the same, only the quotient gets saved.
+# so, if we always save the quotient second, we save ourselves a cmp&beq
+ mov.l %d5, (EXC_DREGS,%a6,%d0.w*4) # save remainder
+ mov.l %d6, (EXC_DREGS,%a6,%d1.w*4) # save quotient
+
+ rts
+
+ddovf:
+ bset &0x1, EXC_CC+1(%a6) # 'V' set on overflow
+ bclr &0x0, EXC_CC+1(%a6) # 'C' cleared on overflow
+
+ rts
+
+div64eq0:
+ andi.b &0x1e, EXC_CC+1(%a6) # clear 'C' bit on divbyzero
+ ori.b &idbyz_flg,SPCOND_FLG(%a6) # set "special case" flag
+ rts
+
+###########################################################################
+#########################################################################
+# This routine uses the 'classical' Algorithm D from Donald Knuth's #
+# Art of Computer Programming, vol II, Seminumerical Algorithms. #
+# For this implementation b=2**16, and the target is U1U2U3U4/V1V2, #
+# where U,V are words of the quadword dividend and longword divisor, #
+# and U1, V1 are the most significant words. #
+# #
+# The most sig. longword of the 64 bit dividend must be in %d5, least #
+# in %d6. The divisor must be in the variable ddivisor, and the #
+# signed/unsigned flag ddusign must be set (0=unsigned,1=signed). #
+# The quotient is returned in %d6, remainder in %d5, unless the #
+# v (overflow) bit is set in the saved %ccr. If overflow, the dividend #
+# is unchanged. #
+#########################################################################
+dclassical:
+# if the divisor msw is 0, use simpler algorithm then the full blown
+# one at ddknuth:
+
+ cmpi.l %d7, &0xffff
+ bhi.b ddknuth # go use D. Knuth algorithm
+
+# Since the divisor is only a word (and larger than the mslw of the dividend),
+# a simpler algorithm may be used :
+# In the general case, four quotient words would be created by
+# dividing the divisor word into each dividend word. In this case,
+# the first two quotient words must be zero, or overflow would occur.
+# Since we already checked this case above, we can treat the most significant
+# longword of the dividend as (0) remainder (see Knuth) and merely complete
+# the last two divisions to get a quotient longword and word remainder:
+
+ clr.l %d1
+ swap %d5 # same as r*b if previous step rqd
+ swap %d6 # get u3 to lsw position
+ mov.w %d6, %d5 # rb + u3
+
+ divu.w %d7, %d5
+
+ mov.w %d5, %d1 # first quotient word
+ swap %d6 # get u4
+ mov.w %d6, %d5 # rb + u4
+
+ divu.w %d7, %d5
+
+ swap %d1
+ mov.w %d5, %d1 # 2nd quotient 'digit'
+ clr.w %d5
+ swap %d5 # now remainder
+ mov.l %d1, %d6 # and quotient
+
+ rts
+
+ddknuth:
+# In this algorithm, the divisor is treated as a 2 digit (word) number
+# which is divided into a 3 digit (word) dividend to get one quotient
+# digit (word). After subtraction, the dividend is shifted and the
+# process repeated. Before beginning, the divisor and quotient are
+# 'normalized' so that the process of estimating the quotient digit
+# will yield verifiably correct results..
+
+ clr.l DDNORMAL(%a6) # count of shifts for normalization
+ clr.b DDSECOND(%a6) # clear flag for quotient digits
+ clr.l %d1 # %d1 will hold trial quotient
+ddnchk:
+ btst &31, %d7 # must we normalize? first word of
+ bne.b ddnormalized # divisor (V1) must be >= 65536/2
+ addq.l &0x1, DDNORMAL(%a6) # count normalization shifts
+ lsl.l &0x1, %d7 # shift the divisor
+ lsl.l &0x1, %d6 # shift u4,u3 with overflow to u2
+ roxl.l &0x1, %d5 # shift u1,u2
+ bra.w ddnchk
+ddnormalized:
+
+# Now calculate an estimate of the quotient words (msw first, then lsw).
+# The comments use subscripts for the first quotient digit determination.
+ mov.l %d7, %d3 # divisor
+ mov.l %d5, %d2 # dividend mslw
+ swap %d2
+ swap %d3
+ cmp.w %d2, %d3 # V1 = U1 ?
+ bne.b ddqcalc1
+ mov.w &0xffff, %d1 # use max trial quotient word
+ bra.b ddadj0
+ddqcalc1:
+ mov.l %d5, %d1
+
+ divu.w %d3, %d1 # use quotient of mslw/msw
+
+ andi.l &0x0000ffff, %d1 # zero any remainder
+ddadj0:
+
+# now test the trial quotient and adjust. This step plus the
+# normalization assures (according to Knuth) that the trial
+# quotient will be at worst 1 too large.
+ mov.l %d6, -(%sp)
+ clr.w %d6 # word u3 left
+ swap %d6 # in lsw position
+ddadj1: mov.l %d7, %d3
+ mov.l %d1, %d2
+ mulu.w %d7, %d2 # V2q
+ swap %d3
+ mulu.w %d1, %d3 # V1q
+ mov.l %d5, %d4 # U1U2
+ sub.l %d3, %d4 # U1U2 - V1q
+
+ swap %d4
+
+ mov.w %d4,%d0
+ mov.w %d6,%d4 # insert lower word (U3)
+
+ tst.w %d0 # is upper word set?
+ bne.w ddadjd1
+
+# add.l %d6, %d4 # (U1U2 - V1q) + U3
+
+ cmp.l %d2, %d4
+ bls.b ddadjd1 # is V2q > (U1U2-V1q) + U3 ?
+ subq.l &0x1, %d1 # yes, decrement and recheck
+ bra.b ddadj1
+ddadjd1:
+# now test the word by multiplying it by the divisor (V1V2) and comparing
+# the 3 digit (word) result with the current dividend words
+ mov.l %d5, -(%sp) # save %d5 (%d6 already saved)
+ mov.l %d1, %d6
+ swap %d6 # shift answer to ms 3 words
+ mov.l %d7, %d5
+ bsr.l dmm2
+ mov.l %d5, %d2 # now %d2,%d3 are trial*divisor
+ mov.l %d6, %d3
+ mov.l (%sp)+, %d5 # restore dividend
+ mov.l (%sp)+, %d6
+ sub.l %d3, %d6
+ subx.l %d2, %d5 # subtract double precision
+ bcc dd2nd # no carry, do next quotient digit
+ subq.l &0x1, %d1 # q is one too large
+# need to add back divisor longword to current ms 3 digits of dividend
+# - according to Knuth, this is done only 2 out of 65536 times for random
+# divisor, dividend selection.
+ clr.l %d2
+ mov.l %d7, %d3
+ swap %d3
+ clr.w %d3 # %d3 now ls word of divisor
+ add.l %d3, %d6 # aligned with 3rd word of dividend
+ addx.l %d2, %d5
+ mov.l %d7, %d3
+ clr.w %d3 # %d3 now ms word of divisor
+ swap %d3 # aligned with 2nd word of dividend
+ add.l %d3, %d5
+dd2nd:
+ tst.b DDSECOND(%a6) # both q words done?
+ bne.b ddremain
+# first quotient digit now correct. store digit and shift the
+# (subtracted) dividend
+ mov.w %d1, DDQUOTIENT(%a6)
+ clr.l %d1
+ swap %d5
+ swap %d6
+ mov.w %d6, %d5
+ clr.w %d6
+ st DDSECOND(%a6) # second digit
+ bra.w ddnormalized
+ddremain:
+# add 2nd word to quotient, get the remainder.
+ mov.w %d1, DDQUOTIENT+2(%a6)
+# shift down one word/digit to renormalize remainder.
+ mov.w %d5, %d6
+ swap %d6
+ swap %d5
+ mov.l DDNORMAL(%a6), %d7 # get norm shift count
+ beq.b ddrn
+ subq.l &0x1, %d7 # set for loop count
+ddnlp:
+ lsr.l &0x1, %d5 # shift into %d6
+ roxr.l &0x1, %d6
+ dbf %d7, ddnlp
+ddrn:
+ mov.l %d6, %d5 # remainder
+ mov.l DDQUOTIENT(%a6), %d6 # quotient
+
+ rts
+dmm2:
+# factors for the 32X32->64 multiplication are in %d5 and %d6.
+# returns 64 bit result in %d5 (hi) %d6(lo).
+# destroys %d2,%d3,%d4.
+
+# multiply hi,lo words of each factor to get 4 intermediate products
+ mov.l %d6, %d2
+ mov.l %d6, %d3
+ mov.l %d5, %d4
+ swap %d3
+ swap %d4
+ mulu.w %d5, %d6 # %d6 <- lsw*lsw
+ mulu.w %d3, %d5 # %d5 <- msw-dest*lsw-source
+ mulu.w %d4, %d2 # %d2 <- msw-source*lsw-dest
+ mulu.w %d4, %d3 # %d3 <- msw*msw
+# now use swap and addx to consolidate to two longwords
+ clr.l %d4
+ swap %d6
+ add.w %d5, %d6 # add msw of l*l to lsw of m*l product
+ addx.w %d4, %d3 # add any carry to m*m product
+ add.w %d2, %d6 # add in lsw of other m*l product
+ addx.w %d4, %d3 # add any carry to m*m product
+ swap %d6 # %d6 is low 32 bits of final product
+ clr.w %d5
+ clr.w %d2 # lsw of two mixed products used,
+ swap %d5 # now use msws of longwords
+ swap %d2
+ add.l %d2, %d5
+ add.l %d3, %d5 # %d5 now ms 32 bits of final product
+ rts
+
+##########
+dcontrolmodel_s:
+ movq.l &LONG,%d0
+ bsr.l _calc_ea # calc <ea>
+
+ cmpi.b SPCOND_FLG(%a6),&immed_flg # immediate addressing mode?
+ beq.b dimmed # yes
+
+ mov.l %a0,%a2
+ bsr.l _dmem_read_long # fetch divisor from <ea>
+
+ tst.l %d1 # dfetch error?
+ bne.b div64_err # yes
+
+ mov.l %d0, %d7
+ bra.w dgotsrcl
+
+# we have to split out immediate data here because it must be read using
+# imem_read() instead of dmem_read(). this becomes especially important
+# if the fetch runs into some deadly fault.
+dimmed:
+ addq.l &0x4,EXC_EXTWPTR(%a6)
+ bsr.l _imem_read_long # read immediate value
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.l %d0,%d7
+ bra.w dgotsrcl
+
+##########
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# also, we call isp_restore in case the effective addressing mode was
+# (an)+ or -(an) in which case the previous "an" value must be restored.
+# FSLW:
+# read = true
+# size = longword
+# TM = data
+# software emulation error = true
+div64_err:
+ bsr.l isp_restore # restore addr reg
+ mov.l %a2,%a0 # pass failing address
+ mov.l &0x01010001,%d0 # pass fslw
+ bra.l isp_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+# _mul64(): routine to emulate mul{u,s}.l <ea>,Dh:Dl 32x32->64 #
+# #
+# XREF **************************************************************** #
+# _calc_ea() - calculate effective address #
+# isp_iacc() - handle instruction access error exception #
+# isp_dacc() - handle data access error exception #
+# isp_restore() - restore An on access error w/ -() or ()+ #
+# #
+# INPUT *************************************************************** #
+# none #
+# #
+# OUTPUT ************************************************************** #
+# If exiting through isp_dacc... #
+# a0 = failing address #
+# d0 = FSLW #
+# else #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# First, decode the operand location. If it's in Dn, fetch from #
+# the stack. If it's in memory, use _calc_ea() to calculate the #
+# effective address. Use _dmem_read_long() to fetch at that address. #
+# Unless the operand is immediate data. Then use _imem_read_long(). #
+# Send failures to isp_dacc() or isp_iacc() as appropriate. #
+# If the operands are signed, make them unsigned and save the #
+# sign info for later. Perform the multiplication using 16x16->32 #
+# unsigned multiplies and "add" instructions. Store the high and low #
+# portions of the result in the appropriate data registers on the #
+# stack. Calculate the condition codes, also. #
+# #
+#########################################################################
+
+#############
+# mul(u,s)l #
+#############
+ global _mul64
+_mul64:
+ mov.b EXC_OPWORD+1(%a6), %d0 # extract src {mode,reg}
+ cmpi.b %d0, &0x7 # is src mode Dn or other?
+ bgt.w mul64_memop # src is in memory
+
+# multiplier operand in the data register file.
+# must extract the register number and fetch the operand from the stack.
+mul64_regop:
+ andi.w &0x7, %d0 # extract Dn
+ mov.l (EXC_DREGS,%a6,%d0.w*4), %d3 # fetch multiplier
+
+# multiplier is in %d3. now, extract Dl and Dh fields and fetch the
+# multiplicand from the data register specified by Dl.
+mul64_multiplicand:
+ mov.w EXC_EXTWORD(%a6), %d2 # fetch ext word
+ clr.w %d1 # clear Dh reg
+ mov.b %d2, %d1 # grab Dh
+ rol.w &0x4, %d2 # align Dl byte
+ andi.w &0x7, %d2 # extract Dl
+
+ mov.l (EXC_DREGS,%a6,%d2.w*4), %d4 # get multiplicand
+
+# check for the case of "zero" result early
+ tst.l %d4 # test multiplicand
+ beq.w mul64_zero # handle zero separately
+ tst.l %d3 # test multiplier
+ beq.w mul64_zero # handle zero separately
+
+# multiplier is in %d3 and multiplicand is in %d4.
+# if the operation is to be signed, then the operands are converted
+# to unsigned and the result sign is saved for the end.
+ clr.b EXC_TEMP(%a6) # clear temp space
+ btst &0x3, EXC_EXTWORD(%a6) # signed or unsigned?
+ beq.b mul64_alg # unsigned; skip sgn calc
+
+ tst.l %d3 # is multiplier negative?
+ bge.b mul64_chk_md_sgn # no
+ neg.l %d3 # make multiplier positive
+ ori.b &0x1, EXC_TEMP(%a6) # save multiplier sgn
+
+# the result sign is the exclusive or of the operand sign bits.
+mul64_chk_md_sgn:
+ tst.l %d4 # is multiplicand negative?
+ bge.b mul64_alg # no
+ neg.l %d4 # make multiplicand positive
+ eori.b &0x1, EXC_TEMP(%a6) # calculate correct sign
+
+#########################################################################
+# 63 32 0 #
+# ---------------------------- #
+# | hi(mplier) * hi(mplicand)| #
+# ---------------------------- #
+# ----------------------------- #
+# | hi(mplier) * lo(mplicand) | #
+# ----------------------------- #
+# ----------------------------- #
+# | lo(mplier) * hi(mplicand) | #
+# ----------------------------- #
+# | ----------------------------- #
+# --|-- | lo(mplier) * lo(mplicand) | #
+# | ----------------------------- #
+# ======================================================== #
+# -------------------------------------------------------- #
+# | hi(result) | lo(result) | #
+# -------------------------------------------------------- #
+#########################################################################
+mul64_alg:
+# load temp registers with operands
+ mov.l %d3, %d5 # mr in %d5
+ mov.l %d3, %d6 # mr in %d6
+ mov.l %d4, %d7 # md in %d7
+ swap %d6 # hi(mr) in lo %d6
+ swap %d7 # hi(md) in lo %d7
+
+# complete necessary multiplies:
+ mulu.w %d4, %d3 # [1] lo(mr) * lo(md)
+ mulu.w %d6, %d4 # [2] hi(mr) * lo(md)
+ mulu.w %d7, %d5 # [3] lo(mr) * hi(md)
+ mulu.w %d7, %d6 # [4] hi(mr) * hi(md)
+
+# add lo portions of [2],[3] to hi portion of [1].
+# add carries produced from these adds to [4].
+# lo([1]) is the final lo 16 bits of the result.
+ clr.l %d7 # load %d7 w/ zero value
+ swap %d3 # hi([1]) <==> lo([1])
+ add.w %d4, %d3 # hi([1]) + lo([2])
+ addx.l %d7, %d6 # [4] + carry
+ add.w %d5, %d3 # hi([1]) + lo([3])
+ addx.l %d7, %d6 # [4] + carry
+ swap %d3 # lo([1]) <==> hi([1])
+
+# lo portions of [2],[3] have been added in to final result.
+# now, clear lo, put hi in lo reg, and add to [4]
+ clr.w %d4 # clear lo([2])
+ clr.w %d5 # clear hi([3])
+ swap %d4 # hi([2]) in lo %d4
+ swap %d5 # hi([3]) in lo %d5
+ add.l %d5, %d4 # [4] + hi([2])
+ add.l %d6, %d4 # [4] + hi([3])
+
+# unsigned result is now in {%d4,%d3}
+ tst.b EXC_TEMP(%a6) # should result be signed?
+ beq.b mul64_done # no
+
+# result should be a signed negative number.
+# compute 2's complement of the unsigned number:
+# -negate all bits and add 1
+mul64_neg:
+ not.l %d3 # negate lo(result) bits
+ not.l %d4 # negate hi(result) bits
+ addq.l &1, %d3 # add 1 to lo(result)
+ addx.l %d7, %d4 # add carry to hi(result)
+
+# the result is saved to the register file.
+# for '040 compatibility, if Dl == Dh then only the hi(result) is
+# saved. so, saving hi after lo accomplishes this without need to
+# check Dl,Dh equality.
+mul64_done:
+ mov.l %d3, (EXC_DREGS,%a6,%d2.w*4) # save lo(result)
+ mov.w &0x0, %cc
+ mov.l %d4, (EXC_DREGS,%a6,%d1.w*4) # save hi(result)
+
+# now, grab the condition codes. only one that can be set is 'N'.
+# 'N' CAN be set if the operation is unsigned if bit 63 is set.
+ mov.w %cc, %d7 # fetch %ccr to see if 'N' set
+ andi.b &0x8, %d7 # extract 'N' bit
+
+mul64_ccode_set:
+ mov.b EXC_CC+1(%a6), %d6 # fetch previous %ccr
+ andi.b &0x10, %d6 # all but 'X' bit changes
+
+ or.b %d7, %d6 # group 'X' and 'N'
+ mov.b %d6, EXC_CC+1(%a6) # save new %ccr
+
+ rts
+
+# one or both of the operands is zero so the result is also zero.
+# save the zero result to the register file and set the 'Z' ccode bit.
+mul64_zero:
+ clr.l (EXC_DREGS,%a6,%d2.w*4) # save lo(result)
+ clr.l (EXC_DREGS,%a6,%d1.w*4) # save hi(result)
+
+ movq.l &0x4, %d7 # set 'Z' ccode bit
+ bra.b mul64_ccode_set # finish ccode set
+
+##########
+
+# multiplier operand is in memory at the effective address.
+# must calculate the <ea> and go fetch the 32-bit operand.
+mul64_memop:
+ movq.l &LONG, %d0 # pass # of bytes
+ bsr.l _calc_ea # calculate <ea>
+
+ cmpi.b SPCOND_FLG(%a6),&immed_flg # immediate addressing mode?
+ beq.b mul64_immed # yes
+
+ mov.l %a0,%a2
+ bsr.l _dmem_read_long # fetch src from addr (%a0)
+
+ tst.l %d1 # dfetch error?
+ bne.w mul64_err # yes
+
+ mov.l %d0, %d3 # store multiplier in %d3
+
+ bra.w mul64_multiplicand
+
+# we have to split out immediate data here because it must be read using
+# imem_read() instead of dmem_read(). this becomes especially important
+# if the fetch runs into some deadly fault.
+mul64_immed:
+ addq.l &0x4,EXC_EXTWPTR(%a6)
+ bsr.l _imem_read_long # read immediate value
+
+ tst.l %d1 # ifetch error?
+ bne.l isp_iacc # yes
+
+ mov.l %d0,%d3
+ bra.w mul64_multiplicand
+
+##########
+
+# if dmem_read_long() returns a fail message in d1, the package
+# must create an access error frame. here, we pass a skeleton fslw
+# and the failing address to the routine that creates the new frame.
+# also, we call isp_restore in case the effective addressing mode was
+# (an)+ or -(an) in which case the previous "an" value must be restored.
+# FSLW:
+# read = true
+# size = longword
+# TM = data
+# software emulation error = true
+mul64_err:
+ bsr.l isp_restore # restore addr reg
+ mov.l %a2,%a0 # pass failing address
+ mov.l &0x01010001,%d0 # pass fslw
+ bra.l isp_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+# _compandset2(): routine to emulate cas2() #
+# (internal to package) #
+# #
+# _isp_cas2_finish(): store ccodes, store compare regs #
+# (external to package) #
+# #
+# XREF **************************************************************** #
+# _real_lock_page() - "callout" to lock op's page from page-outs #
+# _cas_terminate2() - access error exit #
+# _real_cas2() - "callout" to core cas2 emulation code #
+# _real_unlock_page() - "callout" to unlock page #
+# #
+# INPUT *************************************************************** #
+# _compandset2(): #
+# d0 = instruction extension word #
+# #
+# _isp_cas2_finish(): #
+# see cas2 core emulation code #
+# #
+# OUTPUT ************************************************************** #
+# _compandset2(): #
+# see cas2 core emulation code #
+# #
+# _isp_cas_finish(): #
+# None (register file or memroy changed as appropriate) #
+# #
+# ALGORITHM *********************************************************** #
+# compandset2(): #
+# Decode the instruction and fetch the appropriate Update and #
+# Compare operands. Then call the "callout" _real_lock_page() for each #
+# memory operand address so that the operating system can keep these #
+# pages from being paged out. If either _real_lock_page() fails, exit #
+# through _cas_terminate2(). Don't forget to unlock the 1st locked page #
+# using _real_unlock_paged() if the 2nd lock-page fails. #
+# Finally, branch to the core cas2 emulation code by calling the #
+# "callout" _real_cas2(). #
+# #
+# _isp_cas2_finish(): #
+# Re-perform the comparison so we can determine the condition #
+# codes which were too much trouble to keep around during the locked #
+# emulation. Then unlock each operands page by calling the "callout" #
+# _real_unlock_page(). #
+# #
+#########################################################################
+
+set ADDR1, EXC_TEMP+0xc
+set ADDR2, EXC_TEMP+0x0
+set DC2, EXC_TEMP+0xa
+set DC1, EXC_TEMP+0x8
+
+ global _compandset2
+_compandset2:
+ mov.l %d0,EXC_TEMP+0x4(%a6) # store for possible restart
+ mov.l %d0,%d1 # extension word in d0
+
+ rol.w &0x4,%d0
+ andi.w &0xf,%d0 # extract Rn2
+ mov.l (EXC_DREGS,%a6,%d0.w*4),%a1 # fetch ADDR2
+ mov.l %a1,ADDR2(%a6)
+
+ mov.l %d1,%d0
+
+ lsr.w &0x6,%d1
+ andi.w &0x7,%d1 # extract Du2
+ mov.l (EXC_DREGS,%a6,%d1.w*4),%d5 # fetch Update2 Op
+
+ andi.w &0x7,%d0 # extract Dc2
+ mov.l (EXC_DREGS,%a6,%d0.w*4),%d3 # fetch Compare2 Op
+ mov.w %d0,DC2(%a6)
+
+ mov.w EXC_EXTWORD(%a6),%d0
+ mov.l %d0,%d1
+
+ rol.w &0x4,%d0
+ andi.w &0xf,%d0 # extract Rn1
+ mov.l (EXC_DREGS,%a6,%d0.w*4),%a0 # fetch ADDR1
+ mov.l %a0,ADDR1(%a6)
+
+ mov.l %d1,%d0
+
+ lsr.w &0x6,%d1
+ andi.w &0x7,%d1 # extract Du1
+ mov.l (EXC_DREGS,%a6,%d1.w*4),%d4 # fetch Update1 Op
+
+ andi.w &0x7,%d0 # extract Dc1
+ mov.l (EXC_DREGS,%a6,%d0.w*4),%d2 # fetch Compare1 Op
+ mov.w %d0,DC1(%a6)
+
+ btst &0x1,EXC_OPWORD(%a6) # word or long?
+ sne %d7
+
+ btst &0x5,EXC_ISR(%a6) # user or supervisor?
+ sne %d6
+
+ mov.l %a0,%a2
+ mov.l %a1,%a3
+
+ mov.l %d7,%d1 # pass size
+ mov.l %d6,%d0 # pass mode
+ bsr.l _real_lock_page # lock page
+ mov.l %a2,%a0
+ tst.l %d0 # error?
+ bne.l _cas_terminate2 # yes
+
+ mov.l %d7,%d1 # pass size
+ mov.l %d6,%d0 # pass mode
+ mov.l %a3,%a0 # pass addr
+ bsr.l _real_lock_page # lock page
+ mov.l %a3,%a0
+ tst.l %d0 # error?
+ bne.b cas_preterm # yes
+
+ mov.l %a2,%a0
+ mov.l %a3,%a1
+
+ bra.l _real_cas2
+
+# if the 2nd lock attempt fails, then we must still unlock the
+# first page(s).
+cas_preterm:
+ mov.l %d0,-(%sp) # save FSLW
+ mov.l %d7,%d1 # pass size
+ mov.l %d6,%d0 # pass mode
+ mov.l %a2,%a0 # pass ADDR1
+ bsr.l _real_unlock_page # unlock first page(s)
+ mov.l (%sp)+,%d0 # restore FSLW
+ mov.l %a3,%a0 # pass failing addr
+ bra.l _cas_terminate2
+
+#############################################################
+
+ global _isp_cas2_finish
+_isp_cas2_finish:
+ btst &0x1,EXC_OPWORD(%a6)
+ bne.b cas2_finish_l
+
+ mov.w EXC_CC(%a6),%cc # load old ccodes
+ cmp.w %d0,%d2
+ bne.b cas2_finish_w_save
+ cmp.w %d1,%d3
+cas2_finish_w_save:
+ mov.w %cc,EXC_CC(%a6) # save new ccodes
+
+ tst.b %d4 # update compare reg?
+ bne.b cas2_finish_w_done # no
+
+ mov.w DC2(%a6),%d3 # fetch Dc2
+ mov.w %d1,(2+EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op
+
+ mov.w DC1(%a6),%d2 # fetch Dc1
+ mov.w %d0,(2+EXC_DREGS,%a6,%d2.w*4) # store new Compare1 Op
+
+cas2_finish_w_done:
+ btst &0x5,EXC_ISR(%a6)
+ sne %d2
+ mov.l %d2,%d0 # pass mode
+ sf %d1 # pass size
+ mov.l ADDR1(%a6),%a0 # pass ADDR1
+ bsr.l _real_unlock_page # unlock page
+
+ mov.l %d2,%d0 # pass mode
+ sf %d1 # pass size
+ mov.l ADDR2(%a6),%a0 # pass ADDR2
+ bsr.l _real_unlock_page # unlock page
+ rts
+
+cas2_finish_l:
+ mov.w EXC_CC(%a6),%cc # load old ccodes
+ cmp.l %d0,%d2
+ bne.b cas2_finish_l_save
+ cmp.l %d1,%d3
+cas2_finish_l_save:
+ mov.w %cc,EXC_CC(%a6) # save new ccodes
+
+ tst.b %d4 # update compare reg?
+ bne.b cas2_finish_l_done # no
+
+ mov.w DC2(%a6),%d3 # fetch Dc2
+ mov.l %d1,(EXC_DREGS,%a6,%d3.w*4) # store new Compare2 Op
+
+ mov.w DC1(%a6),%d2 # fetch Dc1
+ mov.l %d0,(EXC_DREGS,%a6,%d2.w*4) # store new Compare1 Op
+
+cas2_finish_l_done:
+ btst &0x5,EXC_ISR(%a6)
+ sne %d2
+ mov.l %d2,%d0 # pass mode
+ st %d1 # pass size
+ mov.l ADDR1(%a6),%a0 # pass ADDR1
+ bsr.l _real_unlock_page # unlock page
+
+ mov.l %d2,%d0 # pass mode
+ st %d1 # pass size
+ mov.l ADDR2(%a6),%a0 # pass ADDR2
+ bsr.l _real_unlock_page # unlock page
+ rts
+
+########
+ global cr_cas2
+cr_cas2:
+ mov.l EXC_TEMP+0x4(%a6),%d0
+ bra.w _compandset2
+
+#########################################################################
+# XDEF **************************************************************** #
+# _compandset(): routine to emulate cas w/ misaligned <ea> #
+# (internal to package) #
+# _isp_cas_finish(): routine called when cas emulation completes #
+# (external and internal to package) #
+# _isp_cas_restart(): restart cas emulation after a fault #
+# (external to package) #
+# _isp_cas_terminate(): create access error stack frame on fault #
+# (external and internal to package) #
+# _isp_cas_inrange(): checks whether instr addess is within range #
+# of core cas/cas2emulation code #
+# (external to package) #
+# #
+# XREF **************************************************************** #
+# _calc_ea(): calculate effective address #
+# #
+# INPUT *************************************************************** #
+# compandset(): #
+# none #
+# _isp_cas_restart(): #
+# d6 = previous sfc/dfc #
+# _isp_cas_finish(): #
+# _isp_cas_terminate(): #
+# a0 = failing address #
+# d0 = FSLW #
+# d6 = previous sfc/dfc #
+# _isp_cas_inrange(): #
+# a0 = instruction address to be checked #
+# #
+# OUTPUT ************************************************************** #
+# compandset(): #
+# none #
+# _isp_cas_restart(): #
+# a0 = effective address #
+# d7 = word or longword flag #
+# _isp_cas_finish(): #
+# a0 = effective address #
+# _isp_cas_terminate(): #
+# initial register set before emulation exception #
+# _isp_cas_inrange(): #
+# d0 = 0 => in range; -1 => out of range #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# compandset(): #
+# First, calculate the effective address. Then, decode the #
+# instruction word and fetch the "compare" (DC) and "update" (Du) #
+# operands. #
+# Next, call the external routine _real_lock_page() so that the #
+# operating system can keep this page from being paged out while we're #
+# in this routine. If this call fails, jump to _cas_terminate2(). #
+# The routine then branches to _real_cas(). This external routine #
+# that actually emulates cas can be supplied by the external os or #
+# made to point directly back into the 060ISP which has a routine for #
+# this purpose. #
+# #
+# _isp_cas_finish(): #
+# Either way, after emulation, the package is re-entered at #
+# _isp_cas_finish(). This routine re-compares the operands in order to #
+# set the condition codes. Finally, these routines will call #
+# _real_unlock_page() in order to unlock the pages that were previously #
+# locked. #
+# #
+# _isp_cas_restart(): #
+# This routine can be entered from an access error handler where #
+# the emulation sequence should be re-started from the beginning. #
+# #
+# _isp_cas_terminate(): #
+# This routine can be entered from an access error handler where #
+# an emulation operand access failed and the operating system would #
+# like an access error stack frame created instead of the current #
+# unimplemented integer instruction frame. #
+# Also, the package enters here if a call to _real_lock_page() #
+# fails. #
+# #
+# _isp_cas_inrange(): #
+# Checks to see whether the instruction address passed to it in #
+# a0 is within the software package cas/cas2 emulation routines. This #
+# can be helpful for an operating system to determine whether an access #
+# error during emulation was due to a cas/cas2 emulation access. #
+# #
+#########################################################################
+
+set DC, EXC_TEMP+0x8
+set ADDR, EXC_TEMP+0x4
+
+ global _compandset
+_compandset:
+ btst &0x1,EXC_OPWORD(%a6) # word or long operation?
+ bne.b compandsetl # long
+
+compandsetw:
+ movq.l &0x2,%d0 # size = 2 bytes
+ bsr.l _calc_ea # a0 = calculated <ea>
+ mov.l %a0,ADDR(%a6) # save <ea> for possible restart
+ sf %d7 # clear d7 for word size
+ bra.b compandsetfetch
+
+compandsetl:
+ movq.l &0x4,%d0 # size = 4 bytes
+ bsr.l _calc_ea # a0 = calculated <ea>
+ mov.l %a0,ADDR(%a6) # save <ea> for possible restart
+ st %d7 # set d7 for longword size
+
+compandsetfetch:
+ mov.w EXC_EXTWORD(%a6),%d0 # fetch cas extension word
+ mov.l %d0,%d1 # make a copy
+
+ lsr.w &0x6,%d0
+ andi.w &0x7,%d0 # extract Du
+ mov.l (EXC_DREGS,%a6,%d0.w*4),%d2 # get update operand
+
+ andi.w &0x7,%d1 # extract Dc
+ mov.l (EXC_DREGS,%a6,%d1.w*4),%d4 # get compare operand
+ mov.w %d1,DC(%a6) # save Dc
+
+ btst &0x5,EXC_ISR(%a6) # which mode for exception?
+ sne %d6 # set on supervisor mode
+
+ mov.l %a0,%a2 # save temporarily
+ mov.l %d7,%d1 # pass size
+ mov.l %d6,%d0 # pass mode
+ bsr.l _real_lock_page # lock page
+ tst.l %d0 # did error occur?
+ bne.w _cas_terminate2 # yes, clean up the mess
+ mov.l %a2,%a0 # pass addr in a0
+
+ bra.l _real_cas
+
+########
+ global _isp_cas_finish
+_isp_cas_finish:
+ btst &0x1,EXC_OPWORD(%a6)
+ bne.b cas_finish_l
+
+# just do the compare again since it's faster than saving the ccodes
+# from the locked routine...
+cas_finish_w:
+ mov.w EXC_CC(%a6),%cc # restore cc
+ cmp.w %d0,%d4 # do word compare
+ mov.w %cc,EXC_CC(%a6) # save cc
+
+ tst.b %d1 # update compare reg?
+ bne.b cas_finish_w_done # no
+
+ mov.w DC(%a6),%d3
+ mov.w %d0,(EXC_DREGS+2,%a6,%d3.w*4) # Dc = destination
+
+cas_finish_w_done:
+ mov.l ADDR(%a6),%a0 # pass addr
+ sf %d1 # pass size
+ btst &0x5,EXC_ISR(%a6)
+ sne %d0 # pass mode
+ bsr.l _real_unlock_page # unlock page
+ rts
+
+# just do the compare again since it's faster than saving the ccodes
+# from the locked routine...
+cas_finish_l:
+ mov.w EXC_CC(%a6),%cc # restore cc
+ cmp.l %d0,%d4 # do longword compare
+ mov.w %cc,EXC_CC(%a6) # save cc
+
+ tst.b %d1 # update compare reg?
+ bne.b cas_finish_l_done # no
+
+ mov.w DC(%a6),%d3
+ mov.l %d0,(EXC_DREGS,%a6,%d3.w*4) # Dc = destination
+
+cas_finish_l_done:
+ mov.l ADDR(%a6),%a0 # pass addr
+ st %d1 # pass size
+ btst &0x5,EXC_ISR(%a6)
+ sne %d0 # pass mode
+ bsr.l _real_unlock_page # unlock page
+ rts
+
+########
+
+ global _isp_cas_restart
+_isp_cas_restart:
+ mov.l %d6,%sfc # restore previous sfc
+ mov.l %d6,%dfc # restore previous dfc
+
+ cmpi.b EXC_OPWORD+1(%a6),&0xfc # cas or cas2?
+ beq.l cr_cas2 # cas2
+cr_cas:
+ mov.l ADDR(%a6),%a0 # load <ea>
+ btst &0x1,EXC_OPWORD(%a6) # word or long operation?
+ sne %d7 # set d7 accordingly
+ bra.w compandsetfetch
+
+########
+
+# At this stage, it would be nice if d0 held the FSLW.
+ global _isp_cas_terminate
+_isp_cas_terminate:
+ mov.l %d6,%sfc # restore previous sfc
+ mov.l %d6,%dfc # restore previous dfc
+
+ global _cas_terminate2
+_cas_terminate2:
+ mov.l %a0,%a2 # copy failing addr to a2
+
+ mov.l %d0,-(%sp)
+ bsr.l isp_restore # restore An (if ()+ or -())
+ mov.l (%sp)+,%d0
+
+ addq.l &0x4,%sp # remove sub return addr
+ subq.l &0x8,%sp # make room for bigger stack
+ subq.l &0x8,%a6 # shift frame ptr down, too
+ mov.l &26,%d1 # want to move 51 longwords
+ lea 0x8(%sp),%a0 # get address of old stack
+ lea 0x0(%sp),%a1 # get address of new stack
+cas_term_cont:
+ mov.l (%a0)+,(%a1)+ # move a longword
+ dbra.w %d1,cas_term_cont # keep going
+
+ mov.w &0x4008,EXC_IVOFF(%a6) # put new stk fmt, voff
+ mov.l %a2,EXC_IVOFF+0x2(%a6) # put faulting addr on stack
+ mov.l %d0,EXC_IVOFF+0x6(%a6) # put FSLW on stack
+ movm.l EXC_DREGS(%a6),&0x3fff # restore user regs
+ unlk %a6 # unlink stack frame
+ bra.l _real_access
+
+########
+
+ global _isp_cas_inrange
+_isp_cas_inrange:
+ clr.l %d0 # clear return result
+ lea _CASHI(%pc),%a1 # load end of CAS core code
+ cmp.l %a1,%a0 # is PC in range?
+ blt.b cin_no # no
+ lea _CASLO(%pc),%a1 # load begin of CAS core code
+ cmp.l %a0,%a1 # is PC in range?
+ blt.b cin_no # no
+ rts # yes; return d0 = 0
+cin_no:
+ mov.l &-0x1,%d0 # out of range; return d0 = -1
+ rts
+
+#################################################################
+#################################################################
+#################################################################
+# This is the start of the cas and cas2 "core" emulation code. #
+# This is the section that may need to be replaced by the host #
+# OS if it is too operating system-specific. #
+# Please refer to the package documentation to see how to #
+# "replace" this section, if necessary. #
+#################################################################
+#################################################################
+#################################################################
+
+# ###### ## ###### ####
+# # # # # # #
+# # ###### ###### #
+# # # # # #
+# ###### # # ###### ######
+
+#########################################################################
+# XDEF **************************************************************** #
+# _isp_cas2(): "core" emulation code for the cas2 instruction #
+# #
+# XREF **************************************************************** #
+# _isp_cas2_finish() - only exit point for this emulation code; #
+# do clean-up; calculate ccodes; store #
+# Compare Ops if appropriate. #
+# #
+# INPUT *************************************************************** #
+# *see chart below* #
+# #
+# OUTPUT ************************************************************** #
+# *see chart below* #
+# #
+# ALGORITHM *********************************************************** #
+# (1) Make several copies of the effective address. #
+# (2) Save current SR; Then mask off all maskable interrupts. #
+# (3) Save current SFC/DFC (ASSUMED TO BE EQUAL!!!); Then set #
+# according to whether exception occurred in user or #
+# supervisor mode. #
+# (4) Use "plpaw" instruction to pre-load ATC with effective #
+# address pages(s). THIS SHOULD NOT FAULT!!! The relevant #
+# page(s) should have already been made resident prior to #
+# entering this routine. #
+# (5) Push the operand lines from the cache w/ "cpushl". #
+# In the 68040, this was done within the locked region. In #
+# the 68060, it is done outside of the locked region. #
+# (6) Use "plpar" instruction to do a re-load of ATC entries for #
+# ADDR1 since ADDR2 entries may have pushed ADDR1 out of the #
+# ATC. #
+# (7) Pre-fetch the core emulation instructions by executing #
+# one branch within each physical line (16 bytes) of the code #
+# before actually executing the code. #
+# (8) Load the BUSCR w/ the bus lock value. #
+# (9) Fetch the source operands using "moves". #
+# (10)Do the compares. If both equal, go to step (13). #
+# (11)Unequal. No update occurs. But, we do write the DST1 op #
+# back to itself (as w/ the '040) so we can gracefully unlock #
+# the bus (and assert LOCKE*) using BUSCR and the final move. #
+# (12)Exit. #
+# (13)Write update operand to the DST locations. Use BUSCR to #
+# assert LOCKE* for the final write operation. #
+# (14)Exit. #
+# #
+# The algorithm is actually implemented slightly differently #
+# depending on the size of the operation and the misalignment of the #
+# operands. A misaligned operand must be written in aligned chunks or #
+# else the BUSCR register control gets confused. #
+# #
+#########################################################################
+
+#################################################################
+# THIS IS THE STATE OF THE INTEGER REGISTER FILE UPON #
+# ENTERING _isp_cas2(). #
+# #
+# D0 = xxxxxxxx #
+# D1 = xxxxxxxx #
+# D2 = cmp operand 1 #
+# D3 = cmp operand 2 #
+# D4 = update oper 1 #
+# D5 = update oper 2 #
+# D6 = 'xxxxxxff if supervisor mode; 'xxxxxx00 if user mode #
+# D7 = 'xxxxxxff if longword operation; 'xxxxxx00 if word #
+# A0 = ADDR1 #
+# A1 = ADDR2 #
+# A2 = xxxxxxxx #
+# A3 = xxxxxxxx #
+# A4 = xxxxxxxx #
+# A5 = xxxxxxxx #
+# A6 = frame pointer #
+# A7 = stack pointer #
+#################################################################
+
+# align 0x1000
+# beginning label used by _isp_cas_inrange()
+ global _CASLO
+_CASLO:
+
+ global _isp_cas2
+_isp_cas2:
+ tst.b %d6 # user or supervisor mode?
+ bne.b cas2_supervisor # supervisor
+cas2_user:
+ movq.l &0x1,%d0 # load user data fc
+ bra.b cas2_cont
+cas2_supervisor:
+ movq.l &0x5,%d0 # load supervisor data fc
+cas2_cont:
+ tst.b %d7 # word or longword?
+ beq.w cas2w # word
+
+####
+cas2l:
+ mov.l %a0,%a2 # copy ADDR1
+ mov.l %a1,%a3 # copy ADDR2
+ mov.l %a0,%a4 # copy ADDR1
+ mov.l %a1,%a5 # copy ADDR2
+
+ addq.l &0x3,%a4 # ADDR1+3
+ addq.l &0x3,%a5 # ADDR2+3
+ mov.l %a2,%d1 # ADDR1
+
+# mask interrupts levels 0-6. save old mask value.
+ mov.w %sr,%d7 # save current SR
+ ori.w &0x0700,%sr # inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+ movc %sfc,%d6 # save old SFC/DFC
+ movc %d0,%sfc # store new SFC
+ movc %d0,%dfc # store new DFC
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+ plpaw (%a2) # load atc for ADDR1
+ plpaw (%a4) # load atc for ADDR1+3
+ plpaw (%a3) # load atc for ADDR2
+ plpaw (%a5) # load atc for ADDR2+3
+
+# push the operand lines from the cache if they exist.
+ cpushl %dc,(%a2) # push line for ADDR1
+ cpushl %dc,(%a4) # push line for ADDR1+3
+ cpushl %dc,(%a3) # push line for ADDR2
+ cpushl %dc,(%a5) # push line for ADDR2+2
+
+ mov.l %d1,%a2 # ADDR1
+ addq.l &0x3,%d1
+ mov.l %d1,%a4 # ADDR1+3
+# if ADDR1 was ATC resident before the above "plpaw" and was executed
+# and it was the next entry scheduled for replacement and ADDR2
+# shares the same set, then the "plpaw" for ADDR2 can push the ADDR1
+# entries from the ATC. so, we do a second set of "plpa"s.
+ plpar (%a2) # load atc for ADDR1
+ plpar (%a4) # load atc for ADDR1+3
+
+# load the BUSCR values.
+ mov.l &0x80000000,%a2 # assert LOCK* buscr value
+ mov.l &0xa0000000,%a3 # assert LOCKE* buscr value
+ mov.l &0x00000000,%a4 # buscr unlock value
+
+# there are three possible mis-aligned cases for longword cas. they
+# are separated because the final write which asserts LOCKE* must
+# be aligned.
+ mov.l %a0,%d0 # is ADDR1 misaligned?
+ andi.b &0x3,%d0
+ beq.b CAS2L_ENTER # no
+ cmpi.b %d0,&0x2
+ beq.w CAS2L2_ENTER # yes; word misaligned
+ bra.w CAS2L3_ENTER # yes; byte misaligned
+
+#
+# D0 = dst operand 1 <-
+# D1 = dst operand 2 <-
+# D2 = cmp operand 1
+# D3 = cmp operand 2
+# D4 = update oper 1
+# D5 = update oper 2
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR1
+# A1 = ADDR2
+# A2 = bus LOCK* value
+# A3 = bus LOCKE* value
+# A4 = bus unlock value
+# A5 = xxxxxxxx
+#
+ align 0x10
+CAS2L_START:
+ movc %a2,%buscr # assert LOCK*
+ movs.l (%a1),%d1 # fetch Dest2[31:0]
+ movs.l (%a0),%d0 # fetch Dest1[31:0]
+ bra.b CAS2L_CONT
+CAS2L_ENTER:
+ bra.b ~+16
+
+CAS2L_CONT:
+ cmp.l %d0,%d2 # Dest1 - Compare1
+ bne.b CAS2L_NOUPDATE
+ cmp.l %d1,%d3 # Dest2 - Compare2
+ bne.b CAS2L_NOUPDATE
+ movs.l %d5,(%a1) # Update2[31:0] -> DEST2
+ bra.b CAS2L_UPDATE
+ bra.b ~+16
+
+CAS2L_UPDATE:
+ movc %a3,%buscr # assert LOCKE*
+ movs.l %d4,(%a0) # Update1[31:0] -> DEST1
+ movc %a4,%buscr # unlock the bus
+ bra.b cas2l_update_done
+ bra.b ~+16
+
+CAS2L_NOUPDATE:
+ movc %a3,%buscr # assert LOCKE*
+ movs.l %d0,(%a0) # Dest1[31:0] -> DEST1
+ movc %a4,%buscr # unlock the bus
+ bra.b cas2l_noupdate_done
+ bra.b ~+16
+
+CAS2L_FILLER:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bra.b CAS2L_START
+
+####
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON #
+# ENTERING _isp_cas2(). #
+# #
+# D0 = destination[31:0] operand 1 #
+# D1 = destination[31:0] operand 2 #
+# D2 = cmp[31:0] operand 1 #
+# D3 = cmp[31:0] operand 2 #
+# D4 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required #
+# D5 = xxxxxxxx #
+# D6 = xxxxxxxx #
+# D7 = xxxxxxxx #
+# A0 = xxxxxxxx #
+# A1 = xxxxxxxx #
+# A2 = xxxxxxxx #
+# A3 = xxxxxxxx #
+# A4 = xxxxxxxx #
+# A5 = xxxxxxxx #
+# A6 = frame pointer #
+# A7 = stack pointer #
+#################################################################
+
+cas2l_noupdate_done:
+
+# restore previous SFC/DFC value.
+ movc %d6,%sfc # restore old SFC
+ movc %d6,%dfc # restore old DFC
+
+# restore previous interrupt mask level.
+ mov.w %d7,%sr # restore old SR
+
+ sf %d4 # indicate no update was done
+ bra.l _isp_cas2_finish
+
+cas2l_update_done:
+
+# restore previous SFC/DFC value.
+ movc %d6,%sfc # restore old SFC
+ movc %d6,%dfc # restore old DFC
+
+# restore previous interrupt mask level.
+ mov.w %d7,%sr # restore old SR
+
+ st %d4 # indicate update was done
+ bra.l _isp_cas2_finish
+####
+
+ align 0x10
+CAS2L2_START:
+ movc %a2,%buscr # assert LOCK*
+ movs.l (%a1),%d1 # fetch Dest2[31:0]
+ movs.l (%a0),%d0 # fetch Dest1[31:0]
+ bra.b CAS2L2_CONT
+CAS2L2_ENTER:
+ bra.b ~+16
+
+CAS2L2_CONT:
+ cmp.l %d0,%d2 # Dest1 - Compare1
+ bne.b CAS2L2_NOUPDATE
+ cmp.l %d1,%d3 # Dest2 - Compare2
+ bne.b CAS2L2_NOUPDATE
+ movs.l %d5,(%a1) # Update2[31:0] -> Dest2
+ bra.b CAS2L2_UPDATE
+ bra.b ~+16
+
+CAS2L2_UPDATE:
+ swap %d4 # get Update1[31:16]
+ movs.w %d4,(%a0)+ # Update1[31:16] -> DEST1
+ movc %a3,%buscr # assert LOCKE*
+ swap %d4 # get Update1[15:0]
+ bra.b CAS2L2_UPDATE2
+ bra.b ~+16
+
+CAS2L2_UPDATE2:
+ movs.w %d4,(%a0) # Update1[15:0] -> DEST1+0x2
+ movc %a4,%buscr # unlock the bus
+ bra.w cas2l_update_done
+ nop
+ bra.b ~+16
+
+CAS2L2_NOUPDATE:
+ swap %d0 # get Dest1[31:16]
+ movs.w %d0,(%a0)+ # Dest1[31:16] -> DEST1
+ movc %a3,%buscr # assert LOCKE*
+ swap %d0 # get Dest1[15:0]
+ bra.b CAS2L2_NOUPDATE2
+ bra.b ~+16
+
+CAS2L2_NOUPDATE2:
+ movs.w %d0,(%a0) # Dest1[15:0] -> DEST1+0x2
+ movc %a4,%buscr # unlock the bus
+ bra.w cas2l_noupdate_done
+ nop
+ bra.b ~+16
+
+CAS2L2_FILLER:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bra.b CAS2L2_START
+
+#################################
+
+ align 0x10
+CAS2L3_START:
+ movc %a2,%buscr # assert LOCK*
+ movs.l (%a1),%d1 # fetch Dest2[31:0]
+ movs.l (%a0),%d0 # fetch Dest1[31:0]
+ bra.b CAS2L3_CONT
+CAS2L3_ENTER:
+ bra.b ~+16
+
+CAS2L3_CONT:
+ cmp.l %d0,%d2 # Dest1 - Compare1
+ bne.b CAS2L3_NOUPDATE
+ cmp.l %d1,%d3 # Dest2 - Compare2
+ bne.b CAS2L3_NOUPDATE
+ movs.l %d5,(%a1) # Update2[31:0] -> DEST2
+ bra.b CAS2L3_UPDATE
+ bra.b ~+16
+
+CAS2L3_UPDATE:
+ rol.l &0x8,%d4 # get Update1[31:24]
+ movs.b %d4,(%a0)+ # Update1[31:24] -> DEST1
+ swap %d4 # get Update1[23:8]
+ movs.w %d4,(%a0)+ # Update1[23:8] -> DEST1+0x1
+ bra.b CAS2L3_UPDATE2
+ bra.b ~+16
+
+CAS2L3_UPDATE2:
+ rol.l &0x8,%d4 # get Update1[7:0]
+ movc %a3,%buscr # assert LOCKE*
+ movs.b %d4,(%a0) # Update1[7:0] -> DEST1+0x3
+ bra.b CAS2L3_UPDATE3
+ nop
+ bra.b ~+16
+
+CAS2L3_UPDATE3:
+ movc %a4,%buscr # unlock the bus
+ bra.w cas2l_update_done
+ nop
+ nop
+ nop
+ bra.b ~+16
+
+CAS2L3_NOUPDATE:
+ rol.l &0x8,%d0 # get Dest1[31:24]
+ movs.b %d0,(%a0)+ # Dest1[31:24] -> DEST1
+ swap %d0 # get Dest1[23:8]
+ movs.w %d0,(%a0)+ # Dest1[23:8] -> DEST1+0x1
+ bra.b CAS2L3_NOUPDATE2
+ bra.b ~+16
+
+CAS2L3_NOUPDATE2:
+ rol.l &0x8,%d0 # get Dest1[7:0]
+ movc %a3,%buscr # assert LOCKE*
+ movs.b %d0,(%a0) # Update1[7:0] -> DEST1+0x3
+ bra.b CAS2L3_NOUPDATE3
+ nop
+ bra.b ~+16
+
+CAS2L3_NOUPDATE3:
+ movc %a4,%buscr # unlock the bus
+ bra.w cas2l_noupdate_done
+ nop
+ nop
+ nop
+ bra.b ~+14
+
+CAS2L3_FILLER:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bra.w CAS2L3_START
+
+#############################################################
+#############################################################
+
+cas2w:
+ mov.l %a0,%a2 # copy ADDR1
+ mov.l %a1,%a3 # copy ADDR2
+ mov.l %a0,%a4 # copy ADDR1
+ mov.l %a1,%a5 # copy ADDR2
+
+ addq.l &0x1,%a4 # ADDR1+1
+ addq.l &0x1,%a5 # ADDR2+1
+ mov.l %a2,%d1 # ADDR1
+
+# mask interrupt levels 0-6. save old mask value.
+ mov.w %sr,%d7 # save current SR
+ ori.w &0x0700,%sr # inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+ movc %sfc,%d6 # save old SFC/DFC
+ movc %d0,%sfc # store new SFC
+ movc %d0,%dfc # store new DFC
+
+# pre-load the operand ATC. no page faults should occur because
+# _real_lock_page() should have taken care of this.
+ plpaw (%a2) # load atc for ADDR1
+ plpaw (%a4) # load atc for ADDR1+1
+ plpaw (%a3) # load atc for ADDR2
+ plpaw (%a5) # load atc for ADDR2+1
+
+# push the operand cache lines from the cache if they exist.
+ cpushl %dc,(%a2) # push line for ADDR1
+ cpushl %dc,(%a4) # push line for ADDR1+1
+ cpushl %dc,(%a3) # push line for ADDR2
+ cpushl %dc,(%a5) # push line for ADDR2+1
+
+ mov.l %d1,%a2 # ADDR1
+ addq.l &0x3,%d1
+ mov.l %d1,%a4 # ADDR1+3
+# if ADDR1 was ATC resident before the above "plpaw" and was executed
+# and it was the next entry scheduled for replacement and ADDR2
+# shares the same set, then the "plpaw" for ADDR2 can push the ADDR1
+# entries from the ATC. so, we do a second set of "plpa"s.
+ plpar (%a2) # load atc for ADDR1
+ plpar (%a4) # load atc for ADDR1+3
+
+# load the BUSCR values.
+ mov.l &0x80000000,%a2 # assert LOCK* buscr value
+ mov.l &0xa0000000,%a3 # assert LOCKE* buscr value
+ mov.l &0x00000000,%a4 # buscr unlock value
+
+# there are two possible mis-aligned cases for word cas. they
+# are separated because the final write which asserts LOCKE* must
+# be aligned.
+ mov.l %a0,%d0 # is ADDR1 misaligned?
+ btst &0x0,%d0
+ bne.w CAS2W2_ENTER # yes
+ bra.b CAS2W_ENTER # no
+
+#
+# D0 = dst operand 1 <-
+# D1 = dst operand 2 <-
+# D2 = cmp operand 1
+# D3 = cmp operand 2
+# D4 = update oper 1
+# D5 = update oper 2
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR1
+# A1 = ADDR2
+# A2 = bus LOCK* value
+# A3 = bus LOCKE* value
+# A4 = bus unlock value
+# A5 = xxxxxxxx
+#
+ align 0x10
+CAS2W_START:
+ movc %a2,%buscr # assert LOCK*
+ movs.w (%a1),%d1 # fetch Dest2[15:0]
+ movs.w (%a0),%d0 # fetch Dest1[15:0]
+ bra.b CAS2W_CONT2
+CAS2W_ENTER:
+ bra.b ~+16
+
+CAS2W_CONT2:
+ cmp.w %d0,%d2 # Dest1 - Compare1
+ bne.b CAS2W_NOUPDATE
+ cmp.w %d1,%d3 # Dest2 - Compare2
+ bne.b CAS2W_NOUPDATE
+ movs.w %d5,(%a1) # Update2[15:0] -> DEST2
+ bra.b CAS2W_UPDATE
+ bra.b ~+16
+
+CAS2W_UPDATE:
+ movc %a3,%buscr # assert LOCKE*
+ movs.w %d4,(%a0) # Update1[15:0] -> DEST1
+ movc %a4,%buscr # unlock the bus
+ bra.b cas2w_update_done
+ bra.b ~+16
+
+CAS2W_NOUPDATE:
+ movc %a3,%buscr # assert LOCKE*
+ movs.w %d0,(%a0) # Dest1[15:0] -> DEST1
+ movc %a4,%buscr # unlock the bus
+ bra.b cas2w_noupdate_done
+ bra.b ~+16
+
+CAS2W_FILLER:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bra.b CAS2W_START
+
+####
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON #
+# ENTERING _isp_cas2(). #
+# #
+# D0 = destination[15:0] operand 1 #
+# D1 = destination[15:0] operand 2 #
+# D2 = cmp[15:0] operand 1 #
+# D3 = cmp[15:0] operand 2 #
+# D4 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required #
+# D5 = xxxxxxxx #
+# D6 = xxxxxxxx #
+# D7 = xxxxxxxx #
+# A0 = xxxxxxxx #
+# A1 = xxxxxxxx #
+# A2 = xxxxxxxx #
+# A3 = xxxxxxxx #
+# A4 = xxxxxxxx #
+# A5 = xxxxxxxx #
+# A6 = frame pointer #
+# A7 = stack pointer #
+#################################################################
+
+cas2w_noupdate_done:
+
+# restore previous SFC/DFC value.
+ movc %d6,%sfc # restore old SFC
+ movc %d6,%dfc # restore old DFC
+
+# restore previous interrupt mask level.
+ mov.w %d7,%sr # restore old SR
+
+ sf %d4 # indicate no update was done
+ bra.l _isp_cas2_finish
+
+cas2w_update_done:
+
+# restore previous SFC/DFC value.
+ movc %d6,%sfc # restore old SFC
+ movc %d6,%dfc # restore old DFC
+
+# restore previous interrupt mask level.
+ mov.w %d7,%sr # restore old SR
+
+ st %d4 # indicate update was done
+ bra.l _isp_cas2_finish
+####
+
+ align 0x10
+CAS2W2_START:
+ movc %a2,%buscr # assert LOCK*
+ movs.w (%a1),%d1 # fetch Dest2[15:0]
+ movs.w (%a0),%d0 # fetch Dest1[15:0]
+ bra.b CAS2W2_CONT2
+CAS2W2_ENTER:
+ bra.b ~+16
+
+CAS2W2_CONT2:
+ cmp.w %d0,%d2 # Dest1 - Compare1
+ bne.b CAS2W2_NOUPDATE
+ cmp.w %d1,%d3 # Dest2 - Compare2
+ bne.b CAS2W2_NOUPDATE
+ movs.w %d5,(%a1) # Update2[15:0] -> DEST2
+ bra.b CAS2W2_UPDATE
+ bra.b ~+16
+
+CAS2W2_UPDATE:
+ ror.l &0x8,%d4 # get Update1[15:8]
+ movs.b %d4,(%a0)+ # Update1[15:8] -> DEST1
+ movc %a3,%buscr # assert LOCKE*
+ rol.l &0x8,%d4 # get Update1[7:0]
+ bra.b CAS2W2_UPDATE2
+ bra.b ~+16
+
+CAS2W2_UPDATE2:
+ movs.b %d4,(%a0) # Update1[7:0] -> DEST1+0x1
+ movc %a4,%buscr # unlock the bus
+ bra.w cas2w_update_done
+ nop
+ bra.b ~+16
+
+CAS2W2_NOUPDATE:
+ ror.l &0x8,%d0 # get Dest1[15:8]
+ movs.b %d0,(%a0)+ # Dest1[15:8] -> DEST1
+ movc %a3,%buscr # assert LOCKE*
+ rol.l &0x8,%d0 # get Dest1[7:0]
+ bra.b CAS2W2_NOUPDATE2
+ bra.b ~+16
+
+CAS2W2_NOUPDATE2:
+ movs.b %d0,(%a0) # Dest1[7:0] -> DEST1+0x1
+ movc %a4,%buscr # unlock the bus
+ bra.w cas2w_noupdate_done
+ nop
+ bra.b ~+16
+
+CAS2W2_FILLER:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bra.b CAS2W2_START
+
+# ###### ## ######
+# # # # #
+# # ###### ######
+# # # # #
+# ###### # # ######
+
+#########################################################################
+# XDEF **************************************************************** #
+# _isp_cas(): "core" emulation code for the cas instruction #
+# #
+# XREF **************************************************************** #
+# _isp_cas_finish() - only exit point for this emulation code; #
+# do clean-up #
+# #
+# INPUT *************************************************************** #
+# *see entry chart below* #
+# #
+# OUTPUT ************************************************************** #
+# *see exit chart below* #
+# #
+# ALGORITHM *********************************************************** #
+# (1) Make several copies of the effective address. #
+# (2) Save current SR; Then mask off all maskable interrupts. #
+# (3) Save current DFC/SFC (ASSUMED TO BE EQUAL!!!); Then set #
+# SFC/DFC according to whether exception occurred in user or #
+# supervisor mode. #
+# (4) Use "plpaw" instruction to pre-load ATC with efective #
+# address page(s). THIS SHOULD NOT FAULT!!! The relevant #
+# page(s) should have been made resident prior to entering #
+# this routine. #
+# (5) Push the operand lines from the cache w/ "cpushl". #
+# In the 68040, this was done within the locked region. In #
+# the 68060, it is done outside of the locked region. #
+# (6) Pre-fetch the core emulation instructions by executing one #
+# branch within each physical line (16 bytes) of the code #
+# before actually executing the code. #
+# (7) Load the BUSCR with the bus lock value. #
+# (8) Fetch the source operand. #
+# (9) Do the compare. If equal, go to step (12). #
+# (10)Unequal. No update occurs. But, we do write the DST op back #
+# to itself (as w/ the '040) so we can gracefully unlock #
+# the bus (and assert LOCKE*) using BUSCR and the final move. #
+# (11)Exit. #
+# (12)Write update operand to the DST location. Use BUSCR to #
+# assert LOCKE* for the final write operation. #
+# (13)Exit. #
+# #
+# The algorithm is actually implemented slightly differently #
+# depending on the size of the operation and the misalignment of the #
+# operand. A misaligned operand must be written in aligned chunks or #
+# else the BUSCR register control gets confused. #
+# #
+#########################################################################
+
+#########################################################
+# THIS IS THE STATE OF THE INTEGER REGISTER FILE UPON #
+# ENTERING _isp_cas(). #
+# #
+# D0 = xxxxxxxx #
+# D1 = xxxxxxxx #
+# D2 = update operand #
+# D3 = xxxxxxxx #
+# D4 = compare operand #
+# D5 = xxxxxxxx #
+# D6 = supervisor ('xxxxxxff) or user mode ('xxxxxx00) #
+# D7 = longword ('xxxxxxff) or word size ('xxxxxx00) #
+# A0 = ADDR #
+# A1 = xxxxxxxx #
+# A2 = xxxxxxxx #
+# A3 = xxxxxxxx #
+# A4 = xxxxxxxx #
+# A5 = xxxxxxxx #
+# A6 = frame pointer #
+# A7 = stack pointer #
+#########################################################
+
+ global _isp_cas
+_isp_cas:
+ tst.b %d6 # user or supervisor mode?
+ bne.b cas_super # supervisor
+cas_user:
+ movq.l &0x1,%d0 # load user data fc
+ bra.b cas_cont
+cas_super:
+ movq.l &0x5,%d0 # load supervisor data fc
+
+cas_cont:
+ tst.b %d7 # word or longword?
+ bne.w casl # longword
+
+####
+casw:
+ mov.l %a0,%a1 # make copy for plpaw1
+ mov.l %a0,%a2 # make copy for plpaw2
+ addq.l &0x1,%a2 # plpaw2 points to end of word
+
+ mov.l %d2,%d3 # d3 = update[7:0]
+ lsr.w &0x8,%d2 # d2 = update[15:8]
+
+# mask interrupt levels 0-6. save old mask value.
+ mov.w %sr,%d7 # save current SR
+ ori.w &0x0700,%sr # inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+ movc %sfc,%d6 # save old SFC/DFC
+ movc %d0,%sfc # load new sfc
+ movc %d0,%dfc # load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+ plpaw (%a1) # load atc for ADDR
+ plpaw (%a2) # load atc for ADDR+1
+
+# push the operand lines from the cache if they exist.
+ cpushl %dc,(%a1) # push dirty data
+ cpushl %dc,(%a2) # push dirty data
+
+# load the BUSCR values.
+ mov.l &0x80000000,%a1 # assert LOCK* buscr value
+ mov.l &0xa0000000,%a2 # assert LOCKE* buscr value
+ mov.l &0x00000000,%a3 # buscr unlock value
+
+# pre-load the instruction cache for the following algorithm.
+# this will minimize the number of cycles that LOCK* will be asserted.
+ bra.b CASW_ENTER # start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = update[15:8] operand
+# D2 = update[7:0] operand
+# D3 = xxxxxxxx
+# D4 = compare[15:0] operand
+# D5 = xxxxxxxx
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK* value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+ align 0x10
+CASW_START:
+ movc %a1,%buscr # assert LOCK*
+ movs.w (%a0),%d0 # fetch Dest[15:0]
+ cmp.w %d0,%d4 # Dest - Compare
+ bne.b CASW_NOUPDATE
+ bra.b CASW_UPDATE
+CASW_ENTER:
+ bra.b ~+16
+
+CASW_UPDATE:
+ movs.b %d2,(%a0)+ # Update[15:8] -> DEST
+ movc %a2,%buscr # assert LOCKE*
+ movs.b %d3,(%a0) # Update[7:0] -> DEST+0x1
+ bra.b CASW_UPDATE2
+ bra.b ~+16
+
+CASW_UPDATE2:
+ movc %a3,%buscr # unlock the bus
+ bra.b casw_update_done
+ nop
+ nop
+ nop
+ nop
+ bra.b ~+16
+
+CASW_NOUPDATE:
+ ror.l &0x8,%d0 # get Dest[15:8]
+ movs.b %d0,(%a0)+ # Dest[15:8] -> DEST
+ movc %a2,%buscr # assert LOCKE*
+ rol.l &0x8,%d0 # get Dest[7:0]
+ bra.b CASW_NOUPDATE2
+ bra.b ~+16
+
+CASW_NOUPDATE2:
+ movs.b %d0,(%a0) # Dest[7:0] -> DEST+0x1
+ movc %a3,%buscr # unlock the bus
+ bra.b casw_noupdate_done
+ nop
+ nop
+ bra.b ~+16
+
+CASW_FILLER:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bra.b CASW_START
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON #
+# CALLING _isp_cas_finish(). #
+# #
+# D0 = destination[15:0] operand #
+# D1 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required #
+# D2 = xxxxxxxx #
+# D3 = xxxxxxxx #
+# D4 = compare[15:0] operand #
+# D5 = xxxxxxxx #
+# D6 = xxxxxxxx #
+# D7 = xxxxxxxx #
+# A0 = xxxxxxxx #
+# A1 = xxxxxxxx #
+# A2 = xxxxxxxx #
+# A3 = xxxxxxxx #
+# A4 = xxxxxxxx #
+# A5 = xxxxxxxx #
+# A6 = frame pointer #
+# A7 = stack pointer #
+#################################################################
+
+casw_noupdate_done:
+
+# restore previous SFC/DFC value.
+ movc %d6,%sfc # restore old SFC
+ movc %d6,%dfc # restore old DFC
+
+# restore previous interrupt mask level.
+ mov.w %d7,%sr # restore old SR
+
+ sf %d1 # indicate no update was done
+ bra.l _isp_cas_finish
+
+casw_update_done:
+
+# restore previous SFC/DFC value.
+ movc %d6,%sfc # restore old SFC
+ movc %d6,%dfc # restore old DFC
+
+# restore previous interrupt mask level.
+ mov.w %d7,%sr # restore old SR
+
+ st %d1 # indicate update was done
+ bra.l _isp_cas_finish
+
+################
+
+# there are two possible mis-aligned cases for longword cas. they
+# are separated because the final write which asserts LOCKE* must
+# be an aligned write.
+casl:
+ mov.l %a0,%a1 # make copy for plpaw1
+ mov.l %a0,%a2 # make copy for plpaw2
+ addq.l &0x3,%a2 # plpaw2 points to end of longword
+
+ mov.l %a0,%d1 # byte or word misaligned?
+ btst &0x0,%d1
+ bne.w casl2 # byte misaligned
+
+ mov.l %d2,%d3 # d3 = update[15:0]
+ swap %d2 # d2 = update[31:16]
+
+# mask interrupts levels 0-6. save old mask value.
+ mov.w %sr,%d7 # save current SR
+ ori.w &0x0700,%sr # inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+ movc %sfc,%d6 # save old SFC/DFC
+ movc %d0,%sfc # load new sfc
+ movc %d0,%dfc # load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this.
+ plpaw (%a1) # load atc for ADDR
+ plpaw (%a2) # load atc for ADDR+3
+
+# push the operand lines from the cache if they exist.
+ cpushl %dc,(%a1) # push dirty data
+ cpushl %dc,(%a2) # push dirty data
+
+# load the BUSCR values.
+ mov.l &0x80000000,%a1 # assert LOCK* buscr value
+ mov.l &0xa0000000,%a2 # assert LOCKE* buscr value
+ mov.l &0x00000000,%a3 # buscr unlock value
+
+ bra.b CASL_ENTER # start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = xxxxxxxx
+# D2 = update[31:16] operand
+# D3 = update[15:0] operand
+# D4 = compare[31:0] operand
+# D5 = xxxxxxxx
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK* value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+ align 0x10
+CASL_START:
+ movc %a1,%buscr # assert LOCK*
+ movs.l (%a0),%d0 # fetch Dest[31:0]
+ cmp.l %d0,%d4 # Dest - Compare
+ bne.b CASL_NOUPDATE
+ bra.b CASL_UPDATE
+CASL_ENTER:
+ bra.b ~+16
+
+CASL_UPDATE:
+ movs.w %d2,(%a0)+ # Update[31:16] -> DEST
+ movc %a2,%buscr # assert LOCKE*
+ movs.w %d3,(%a0) # Update[15:0] -> DEST+0x2
+ bra.b CASL_UPDATE2
+ bra.b ~+16
+
+CASL_UPDATE2:
+ movc %a3,%buscr # unlock the bus
+ bra.b casl_update_done
+ nop
+ nop
+ nop
+ nop
+ bra.b ~+16
+
+CASL_NOUPDATE:
+ swap %d0 # get Dest[31:16]
+ movs.w %d0,(%a0)+ # Dest[31:16] -> DEST
+ swap %d0 # get Dest[15:0]
+ movc %a2,%buscr # assert LOCKE*
+ bra.b CASL_NOUPDATE2
+ bra.b ~+16
+
+CASL_NOUPDATE2:
+ movs.w %d0,(%a0) # Dest[15:0] -> DEST+0x2
+ movc %a3,%buscr # unlock the bus
+ bra.b casl_noupdate_done
+ nop
+ nop
+ bra.b ~+16
+
+CASL_FILLER:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bra.b CASL_START
+
+#################################################################
+# THIS MUST BE THE STATE OF THE INTEGER REGISTER FILE UPON #
+# CALLING _isp_cas_finish(). #
+# #
+# D0 = destination[31:0] operand #
+# D1 = 'xxxxxx11 -> no reg update; 'xxxxxx00 -> update required #
+# D2 = xxxxxxxx #
+# D3 = xxxxxxxx #
+# D4 = compare[31:0] operand #
+# D5 = xxxxxxxx #
+# D6 = xxxxxxxx #
+# D7 = xxxxxxxx #
+# A0 = xxxxxxxx #
+# A1 = xxxxxxxx #
+# A2 = xxxxxxxx #
+# A3 = xxxxxxxx #
+# A4 = xxxxxxxx #
+# A5 = xxxxxxxx #
+# A6 = frame pointer #
+# A7 = stack pointer #
+#################################################################
+
+casl_noupdate_done:
+
+# restore previous SFC/DFC value.
+ movc %d6,%sfc # restore old SFC
+ movc %d6,%dfc # restore old DFC
+
+# restore previous interrupt mask level.
+ mov.w %d7,%sr # restore old SR
+
+ sf %d1 # indicate no update was done
+ bra.l _isp_cas_finish
+
+casl_update_done:
+
+# restore previous SFC/DFC value.
+ movc %d6,%sfc # restore old SFC
+ movc %d6,%dfc # restore old DFC
+
+# restore previous interrupts mask level.
+ mov.w %d7,%sr # restore old SR
+
+ st %d1 # indicate update was done
+ bra.l _isp_cas_finish
+
+#######################################
+casl2:
+ mov.l %d2,%d5 # d5 = Update[7:0]
+ lsr.l &0x8,%d2
+ mov.l %d2,%d3 # d3 = Update[23:8]
+ swap %d2 # d2 = Update[31:24]
+
+# mask interrupts levels 0-6. save old mask value.
+ mov.w %sr,%d7 # save current SR
+ ori.w &0x0700,%sr # inhibit interrupts
+
+# load the SFC and DFC with the appropriate mode.
+ movc %sfc,%d6 # save old SFC/DFC
+ movc %d0,%sfc # load new sfc
+ movc %d0,%dfc # load new dfc
+
+# pre-load the operand ATC. no page faults should occur here because
+# _real_lock_page() should have taken care of this already.
+ plpaw (%a1) # load atc for ADDR
+ plpaw (%a2) # load atc for ADDR+3
+
+# puch the operand lines from the cache if they exist.
+ cpushl %dc,(%a1) # push dirty data
+ cpushl %dc,(%a2) # push dirty data
+
+# load the BUSCR values.
+ mov.l &0x80000000,%a1 # assert LOCK* buscr value
+ mov.l &0xa0000000,%a2 # assert LOCKE* buscr value
+ mov.l &0x00000000,%a3 # buscr unlock value
+
+# pre-load the instruction cache for the following algorithm.
+# this will minimize the number of cycles that LOCK* will be asserted.
+ bra.b CASL2_ENTER # start pre-loading icache
+
+#
+# D0 = dst operand <-
+# D1 = xxxxxxxx
+# D2 = update[31:24] operand
+# D3 = update[23:8] operand
+# D4 = compare[31:0] operand
+# D5 = update[7:0] operand
+# D6 = old SFC/DFC
+# D7 = old SR
+# A0 = ADDR
+# A1 = bus LOCK* value
+# A2 = bus LOCKE* value
+# A3 = bus unlock value
+# A4 = xxxxxxxx
+# A5 = xxxxxxxx
+#
+ align 0x10
+CASL2_START:
+ movc %a1,%buscr # assert LOCK*
+ movs.l (%a0),%d0 # fetch Dest[31:0]
+ cmp.l %d0,%d4 # Dest - Compare
+ bne.b CASL2_NOUPDATE
+ bra.b CASL2_UPDATE
+CASL2_ENTER:
+ bra.b ~+16
+
+CASL2_UPDATE:
+ movs.b %d2,(%a0)+ # Update[31:24] -> DEST
+ movs.w %d3,(%a0)+ # Update[23:8] -> DEST+0x1
+ movc %a2,%buscr # assert LOCKE*
+ bra.b CASL2_UPDATE2
+ bra.b ~+16
+
+CASL2_UPDATE2:
+ movs.b %d5,(%a0) # Update[7:0] -> DEST+0x3
+ movc %a3,%buscr # unlock the bus
+ bra.w casl_update_done
+ nop
+ bra.b ~+16
+
+CASL2_NOUPDATE:
+ rol.l &0x8,%d0 # get Dest[31:24]
+ movs.b %d0,(%a0)+ # Dest[31:24] -> DEST
+ swap %d0 # get Dest[23:8]
+ movs.w %d0,(%a0)+ # Dest[23:8] -> DEST+0x1
+ bra.b CASL2_NOUPDATE2
+ bra.b ~+16
+
+CASL2_NOUPDATE2:
+ rol.l &0x8,%d0 # get Dest[7:0]
+ movc %a2,%buscr # assert LOCKE*
+ movs.b %d0,(%a0) # Dest[7:0] -> DEST+0x3
+ bra.b CASL2_NOUPDATE3
+ nop
+ bra.b ~+16
+
+CASL2_NOUPDATE3:
+ movc %a3,%buscr # unlock the bus
+ bra.w casl_noupdate_done
+ nop
+ nop
+ nop
+ bra.b ~+16
+
+CASL2_FILLER:
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ bra.b CASL2_START
+
+####
+####
+# end label used by _isp_cas_inrange()
+ global _CASHI
+_CASHI:
diff --git a/arch/m68k/ifpsp060/src/itest.S b/arch/m68k/ifpsp060/src/itest.S
new file mode 100644
index 00000000000..ba4a30cbcbe
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/itest.S
@@ -0,0 +1,6386 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+#############################################
+set SREGS, -64
+set IREGS, -128
+set SCCR, -130
+set ICCR, -132
+set TESTCTR, -136
+set EAMEM, -140
+set EASTORE, -144
+set DATA, -160
+
+#############################################
+TESTTOP:
+ bra.l _060TESTS_
+
+start_str:
+ string "Testing 68060 ISP started:\n"
+
+pass_str:
+ string "passed\n"
+fail_str:
+ string " failed\n"
+
+ align 0x4
+chk_test:
+ tst.l %d0
+ bne.b test_fail
+test_pass:
+ pea pass_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+ rts
+test_fail:
+ mov.l %d1,-(%sp)
+ bsr.l _print_num
+ addq.l &0x4,%sp
+
+ pea fail_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+ rts
+
+#############################################
+_060TESTS_:
+ link %a6,&-160
+
+ movm.l &0x3f3c,-(%sp)
+
+ pea start_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+### mul
+ clr.l TESTCTR(%a6)
+ pea mulul_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l mulul_0
+
+ bsr.l chk_test
+
+### div
+ clr.l TESTCTR(%a6)
+ pea divul_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l divul_0
+
+ bsr.l chk_test
+
+### cmp2
+ clr.l TESTCTR(%a6)
+ pea cmp2_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l cmp2_1
+
+ bsr.l chk_test
+
+### movp
+ clr.l TESTCTR(%a6)
+ pea movp_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l movp_0
+
+ bsr.l chk_test
+
+### ea
+ clr.l TESTCTR(%a6)
+ pea ea_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ mov.l &0x2,EAMEM(%a6)
+ bsr.l ea_0
+
+ bsr.l chk_test
+
+### cas
+ clr.l TESTCTR(%a6)
+ pea cas_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l cas0
+
+ bsr.l chk_test
+
+### cas2
+ clr.l TESTCTR(%a6)
+ pea cas2_str(%pc)
+ bsr.l _print_str
+ addq.l &0x4,%sp
+
+ bsr.l cas20
+
+ bsr.l chk_test
+
+###
+ movm.l (%sp)+,&0x3cfc
+
+ unlk %a6
+ rts
+
+#############################################
+#############################################
+
+mulul_str:
+ string "\t64-bit multiply..."
+
+ align 0x4
+mulul_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d1
+ mov.l &0x99999999,%d2
+ mov.l &0x88888888,%d3
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ clr.l IREGS+0x8(%a6)
+ clr.l IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+mulul_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x77777777,%d1
+ mov.l &0x99999999,%d2
+ mov.l &0x00000000,%d3
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ clr.l IREGS+0x8(%a6)
+ clr.l IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+mulul_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x00000010,%d1
+ mov.l &0x66666666,%d2
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l %d1,%d2:%d2
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000006,IREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+mulul_3:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x55555555,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0x00000003,%d3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000000,IREGS+0x8(%a6)
+ mov.l &0xffffffff,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+mulul_4:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x40000000,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0x00000004,%d3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000001,IREGS+0x8(%a6)
+ mov.l &0x00000000,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+mulul_5:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xffffffff,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0xffffffff,%d3
+
+ mov.w &0x0008,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0xfffffffe,IREGS+0x8(%a6)
+ mov.l &0x00000001,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+mulul_6:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x80000000,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0xffffffff,%d3
+
+ mov.w &0x00000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ muls.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000000,IREGS+0x8(%a6)
+ mov.l &0x80000000,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+mulul_7:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x80000000,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0x00000001,%d3
+
+ mov.w &0x0008,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ muls.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0xffffffff,IREGS+0x8(%a6)
+ mov.l &0x80000000,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+mulul_8:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x00000001,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0x80000000,%d3
+
+ mov.w &0x0008,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ muls.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0xffffffff,IREGS+0x8(%a6)
+ mov.l &0x80000000,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ mov.l TESTCTR(%a6),%d1
+ clr.l %d0
+ rts
+
+#############################################
+
+movp_str:
+ string "\tmovep..."
+
+ align 0x4
+###############################
+# movep.w %d0,(0x0,%a0) #
+###############################
+movp_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.w &0xaaaa,%d0
+ clr.b 0x0(%a0)
+ clr.b 0x2(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w %d0,(0x0,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x2(%a0),%d1
+ lsl.w &0x8,%d1
+ mov.b 0x0(%a0),%d1
+
+ cmp.w %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.w %d0,(0x0,%a0) #
+###############################
+movp_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x4(%a6),%a0
+ mov.w &0xaaaa,%d0
+ clr.l -0x4(%a0)
+ clr.l (%a0)
+ clr.l 0x4(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w %d0,(0x0,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ tst.l -0x4(%a0)
+ bne.l error
+ tst.l 0x4(%a0)
+ bne.l error
+ cmpi.l (%a0),&0xaa00aa00
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+#####################################################
+# movep.w %d0,(0x0,%a0) #
+# - this test has %cc initially equal to zero #
+#####################################################
+movp_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.w &0xaaaa,%d0
+ clr.b 0x0(%a0)
+ clr.b 0x2(%a0)
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w %d0,(0x0,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x2(%a0),%d1
+ lsl.w &0x8,%d1
+ mov.b 0x0(%a0),%d1
+
+ cmp.w %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.w (0x0,%a0),%d0 #
+###############################
+movp_3:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.b &0xaa,0x0(%a0)
+ mov.b &0xaa,0x2(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w (0x0,%a0),%d0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.w &0xaaaa,IREGS+0x2(%a6)
+
+ mov.w &0xaaaa,%d1
+
+ cmp.w %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.l %d0,(0x0,%a0) #
+###############################
+movp_4:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.l &0xaaaaaaaa,%d0
+ clr.b 0x0(%a0)
+ clr.b 0x2(%a0)
+ clr.b 0x4(%a0)
+ clr.b 0x6(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.l %d0,(0x0,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x6(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x4(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x2(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x0(%a0),%d1
+
+ cmp.l %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.l %d0,(0x0,%a0) #
+###############################
+movp_5:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x4(%a6),%a0
+ mov.l &0xaaaaaaaa,%d0
+ clr.l -0x4(%a0)
+ clr.l (%a0)
+ clr.l 0x4(%a0)
+ clr.l 0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.l %d0,(0x0,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ tst.l -0x4(%a0)
+ bne.l error
+ tst.l 0x8(%a0)
+ bne.l error
+ cmpi.l (%a0),&0xaa00aa00
+ bne.l error
+ cmpi.l 0x4(%a0),&0xaa00aa00
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.l (0x0,%a0),%d0 #
+###############################
+movp_6:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.b &0xaa,0x0(%a0)
+ mov.b &0xaa,0x2(%a0)
+ mov.b &0xaa,0x4(%a0)
+ mov.b &0xaa,0x6(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.l (0x0,%a0),%d0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0xaaaaaaaa,IREGS(%a6)
+
+ mov.l &0xaaaaaaaa,%d1
+
+ cmp.l %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.w %d7,(0x0,%a0) #
+###############################
+movp_7:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.w &0xaaaa,%d7
+ clr.b 0x0(%a0)
+ clr.b 0x2(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w %d7,(0x0,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x2(%a0),%d1
+ lsl.w &0x8,%d1
+ mov.b 0x0(%a0),%d1
+
+ cmp.w %d7,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.w (0x0,%a0),%d7 #
+###############################
+movp_8:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.b &0xaa,0x0(%a0)
+ mov.b &0xaa,0x2(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w (0x0,%a0),%d7
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.w &0xaaaa,IREGS+30(%a6)
+
+ mov.w &0xaaaa,%d1
+
+ cmp.w %d7,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.w %d0,(0x0,%a0) #
+###############################
+movp_9:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.w &0xaaaa,%d0
+ clr.b 0x0(%a0)
+ clr.b 0x2(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w %d0,(0x0,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x2(%a0),%d1
+ lsl.w &0x8,%d1
+ mov.b 0x0(%a0),%d1
+
+ cmp.w %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.w %d0,(0x8,%a0) #
+###############################
+movp_10:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.w &0xaaaa,%d0
+ clr.b 0x0+0x8(%a0)
+ clr.b 0x2+0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x1f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w %d0,(0x8,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x2+0x8(%a0),%d1
+ lsl.w &0x8,%d1
+ mov.b 0x0+0x8(%a0),%d1
+
+ cmp.w %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.w (0x8,%a0),%d0 #
+###############################
+movp_11:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.b &0xaa,0x0+0x8(%a0)
+ mov.b &0xaa,0x2+0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x1f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w (0x8,%a0),%d0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.w &0xaaaa,IREGS+0x2(%a6)
+
+ mov.w &0xaaaa,%d1
+
+ cmp.w %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.l %d0,(0x8,%a0) #
+###############################
+movp_12:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.l &0xaaaaaaaa,%d0
+ clr.b 0x0+0x8(%a0)
+ clr.b 0x2+0x8(%a0)
+ clr.b 0x4+0x8(%a0)
+ clr.b 0x6+0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x1f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.l %d0,(0x8,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x6+0x8(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x4+0x8(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x2+0x8(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x0+0x8(%a0),%d1
+
+ cmp.l %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+###############################
+# movep.l (0x8,%a0),%d0 #
+###############################
+movp_13:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA(%a6),%a0
+ mov.b &0xaa,0x0+0x8(%a0)
+ mov.b &0xaa,0x2+0x8(%a0)
+ mov.b &0xaa,0x4+0x8(%a0)
+ mov.b &0xaa,0x6+0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x1f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.l (0x8,%a0),%d0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0xaaaaaaaa,IREGS(%a6)
+
+ mov.l &0xaaaaaaaa,%d1
+
+ cmp.l %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+################################
+# movep.w %d0,(-0x8,%a0) #
+################################
+movp_14:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x8(%a6),%a0
+ mov.w &0xaaaa,%d0
+ clr.b 0x0-0x8(%a0)
+ clr.b 0x2-0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x1f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w %d0,(-0x8,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x2-0x8(%a0),%d1
+ lsl.w &0x8,%d1
+ mov.b 0x0-0x8(%a0),%d1
+
+ cmp.w %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+################################
+# movep.w (-0x8,%a0),%d0 #
+################################
+movp_15:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x8(%a6),%a0
+ mov.b &0xaa,0x0-0x8(%a0)
+ mov.b &0xaa,0x2-0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x1f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.w (-0x8,%a0),%d0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.w &0xaaaa,IREGS+0x2(%a6)
+
+ mov.w &0xaaaa,%d1
+
+ cmp.w %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+################################
+# movep.l %d0,(-0x8,%a0) #
+################################
+movp_16:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x8(%a6),%a0
+ mov.l &0xaaaaaaaa,%d0
+ clr.b 0x0-0x8(%a0)
+ clr.b 0x2-0x8(%a0)
+ clr.b 0x4-0x8(%a0)
+ clr.b 0x8-0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x1f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.l %d0,(-0x8,%a0)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ mov.b 0x6-0x8(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x4-0x8(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x2-0x8(%a0),%d1
+ lsl.l &0x8,%d1
+ mov.b 0x0-0x8(%a0),%d1
+
+ cmp.l %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+################################
+# movep.l (-0x8,%a0),%d0 #
+################################
+movp_17:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x8(%a6),%a0
+ mov.b &0xaa,0x0-0x8(%a0)
+ mov.b &0xaa,0x2-0x8(%a0)
+ mov.b &0xaa,0x4-0x8(%a0)
+ mov.b &0xaa,0x8-0x8(%a0)
+
+ mov.w &0x001f,ICCR(%a6)
+ mov.w &0x1f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ movp.l (-0x8,%a0),%d0
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0xaaaaaaaa,IREGS(%a6)
+
+ mov.l &0xaaaaaaaa,%d1
+
+ cmp.l %d0,%d1
+ bne.l error
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ mov.l TESTCTR(%a6),%d1
+ clr.l %d0
+ rts
+
+###########################################################
+
+divul_str:
+ string "\t64-bit divide..."
+
+ align 0x4
+divul_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+# movm.l DEF_REGS(%pc),&0x3fff
+
+# clr.l %d1
+# mov.l &0x99999999,%d2
+# mov.l &0x88888888,%d3
+
+# mov.w &0x001e,ICCR(%a6)
+# mov.w &0x001f,%cc
+# movm.l &0x7fff,IREGS(%a6)
+
+# divu.l %d1,%d2:%d3
+
+# mov.w %cc,SCCR(%a6)
+# movm.l &0x7fff,SREGS(%a6)
+
+# bsr.l chkregs
+# tst.b %d0
+# bne.l error
+
+divul_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x00000001,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0x00000000,%d3
+
+ mov.w &0x0014,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x44444444,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0x55555555,%d3
+
+ mov.w &0x0010,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x11111111,IREGS+0x8(%a6)
+ mov.l &0x00000001,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_3:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x55555555,%d1
+ mov.l &0x00000000,%d2
+ mov.l &0x44444444,%d3
+
+ mov.w &0x0014,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x44444444,IREGS+0x8(%a6)
+ mov.l &0x00000000,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_4:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x11111111,%d1
+ mov.l &0x44444444,%d2
+ mov.l &0x44444444,%d3
+
+ mov.w &0x001e,ICCR(%a6)
+ mov.w &0x001d,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_5:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xfffffffe,%d1
+ mov.l &0x00000001,%d2
+ mov.l &0x00000002,%d3
+
+ mov.w &0x001e,ICCR(%a6)
+ mov.w &0x001d,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divs.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_6:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xfffffffe,%d1
+ mov.l &0x00000001,%d2
+ mov.l &0x00000000,%d3
+
+ mov.w &0x0018,ICCR(%a6)
+ mov.w &0x001d,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divs.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000000,IREGS+0x8(%a6)
+ mov.l &0x80000000,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_7:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x00000002,%d1
+ mov.l &0x00000001,%d2
+ mov.l &0x00000000,%d3
+
+ mov.w &0x001e,ICCR(%a6)
+ mov.w &0x001d,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divs.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_8:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xffffffff,%d1
+ mov.l &0xfffffffe,%d2
+ mov.l &0xffffffff,%d3
+
+ mov.w &0x0008,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_9:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xffffffff,%d1
+ mov.l &0xfffffffe,%d2
+ mov.l &0xffffffff,%d3
+
+ mov.w &0x0008,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divu.l &0xffffffff,%d2:%d2
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0xffffffff,IREGS+0x8(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+divul_10:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x0000ffff,%d1
+ mov.l &0x00000001,%d2
+ mov.l &0x55555555,%d3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ divu.l %d1,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x0000aaab,IREGS+0x8(%a6)
+ mov.l &0x00015556,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ mov.l TESTCTR(%a6),%d1
+ clr.l %d0
+ rts
+
+###########################################################
+
+cas_str:
+ string "\tcas..."
+
+ align 0x4
+cas0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+
+ mov.w &0xaaaa,(%a0)
+
+ mov.w &0xaaaa,%d1
+ mov.w &0xbbbb,%d2
+
+ mov.w &0x0014,ICCR(%a6)
+ mov.w &0x0010,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas.w %d1,%d2,(%a0) # Dc,Du,<ea>
+
+ mov.w %cc,SCCR(%a6)
+ mov.w (%a0),%d3
+ mov.w &0xbbbb,IREGS+0xc+0x2(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+
+ mov.w &0xeeee,(%a0)
+
+ mov.w &0x0000aaaa,%d1
+ mov.w &0x0000bbbb,%d2
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas.w %d1,%d2,(%a0) # Dc,Du,<ea>
+
+ mov.w %cc,SCCR(%a6)
+ mov.w (%a0),%d3
+ mov.w &0xeeee,IREGS+0x4+0x2(%a6)
+ mov.w &0xeeee,IREGS+0xc+0x2(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x2(%a6),%a0
+
+ mov.l &0xaaaaaaaa,(%a0)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas.l %d1,%d2,(%a0) # Dc,Du,<ea>
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d3
+ mov.l &0xbbbbbbbb,IREGS+0xc(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas3:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x2(%a6),%a0
+
+ mov.l &0xeeeeeeee,(%a0)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas.l %d1,%d2,(%a0) # Dc,Du,<ea>
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d3
+ mov.l &0xeeeeeeee,IREGS+0x4(%a6)
+ mov.l &0xeeeeeeee,IREGS+0xc(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas4:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+
+ mov.l &0xaaaaaaaa,(%a0)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas.l %d1,%d2,(%a0) # Dc,Du,<ea>
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d3
+ mov.l &0xbbbbbbbb,IREGS+0xc(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas5:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+
+ mov.l &0x7fffffff,(%a0)
+
+ mov.l &0x80000000,%d1
+ mov.l &0xbbbbbbbb,%d2
+
+ mov.w &0x001b,ICCR(%a6)
+ mov.w &0x0010,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas.l %d1,%d2,(%a0) # Dc,Du,<ea>
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d3
+ mov.l &0x7fffffff,IREGS+0x4(%a6)
+ mov.l &0x7fffffff,IREGS+0xc(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ mov.l TESTCTR(%a6),%d1
+ clr.l %d0
+ rts
+
+###########################################################
+
+cas2_str:
+ string "\tcas2..."
+
+ align 0x4
+cas20:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x0(%a6),%a0
+ lea DATA+0x4(%a6),%a1
+
+ mov.l &0xaaaaaaaa,(%a0)
+ mov.l &0xbbbbbbbb,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x0014,ICCR(%a6)
+ mov.w &0x0010,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xcccccccc,IREGS+0x14(%a6)
+ mov.l &0xdddddddd,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas21:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+ lea DATA+0x5(%a6),%a1
+
+ mov.l &0xaaaaaaaa,(%a0)
+ mov.l &0xbbbbbbbb,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x0014,ICCR(%a6)
+ mov.w &0x0010,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xcccccccc,IREGS+0x14(%a6)
+ mov.l &0xdddddddd,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas22:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x2(%a6),%a0
+ lea DATA+0x6(%a6),%a1
+
+ mov.l &0xaaaaaaaa,(%a0)
+ mov.l &0xbbbbbbbb,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x0014,ICCR(%a6)
+ mov.w &0x0010,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xcccccccc,IREGS+0x14(%a6)
+ mov.l &0xdddddddd,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas23:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x0(%a6),%a0
+ lea DATA+0x4(%a6),%a1
+
+ mov.l &0xeeeeeeee,(%a0)
+ mov.l &0xbbbbbbbb,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xeeeeeeee,IREGS+0x4(%a6)
+ mov.l &0xbbbbbbbb,IREGS+0x8(%a6)
+ mov.l &0xeeeeeeee,IREGS+0x14(%a6)
+ mov.l &0xbbbbbbbb,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas24:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+ lea DATA+0x5(%a6),%a1
+
+ mov.l &0xeeeeeeee,(%a0)
+ mov.l &0xbbbbbbbb,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xeeeeeeee,IREGS+0x4(%a6)
+ mov.l &0xbbbbbbbb,IREGS+0x8(%a6)
+ mov.l &0xeeeeeeee,IREGS+0x14(%a6)
+ mov.l &0xbbbbbbbb,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas25:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x2(%a6),%a0
+ lea DATA+0x6(%a6),%a1
+
+ mov.l &0xeeeeeeee,(%a0)
+ mov.l &0xbbbbbbbb,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xeeeeeeee,IREGS+0x4(%a6)
+ mov.l &0xbbbbbbbb,IREGS+0x8(%a6)
+ mov.l &0xeeeeeeee,IREGS+0x14(%a6)
+ mov.l &0xbbbbbbbb,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas26:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x0(%a6),%a0
+ lea DATA+0x4(%a6),%a1
+
+ mov.l &0xaaaaaaaa,(%a0)
+ mov.l &0xeeeeeeee,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xaaaaaaaa,IREGS+0x4(%a6)
+ mov.l &0xeeeeeeee,IREGS+0x8(%a6)
+ mov.l &0xaaaaaaaa,IREGS+0x14(%a6)
+ mov.l &0xeeeeeeee,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas27:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+ lea DATA+0x5(%a6),%a1
+
+ mov.l &0xaaaaaaaa,(%a0)
+ mov.l &0xeeeeeeee,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0xbbbbbbbb,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xaaaaaaaa,IREGS+0x4(%a6)
+ mov.l &0xeeeeeeee,IREGS+0x8(%a6)
+ mov.l &0xaaaaaaaa,IREGS+0x14(%a6)
+ mov.l &0xeeeeeeee,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas28:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x2(%a6),%a0
+ lea DATA+0x6(%a6),%a1
+
+ mov.l &0xaaaaaaaa,(%a0)
+ mov.l &0x7fffffff,(%a1)
+
+ mov.l &0xaaaaaaaa,%d1
+ mov.l &0x80000000,%d2
+ mov.l &0xcccccccc,%d3
+ mov.l &0xdddddddd,%d4
+
+ mov.w &0x000b,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.l %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.l (%a0),%d5
+ mov.l (%a1),%d6
+ mov.l &0xaaaaaaaa,IREGS+0x4(%a6)
+ mov.l &0x7fffffff,IREGS+0x8(%a6)
+ mov.l &0xaaaaaaaa,IREGS+0x14(%a6)
+ mov.l &0x7fffffff,IREGS+0x18(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+##################################
+cas29:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x0(%a6),%a0
+ lea DATA+0x4(%a6),%a1
+
+ mov.w &0xaaaa,(%a0)
+ mov.w &0xbbbb,(%a1)
+
+ mov.w &0xaaaa,%d1
+ mov.w &0xbbbb,%d2
+ mov.w &0xcccc,%d3
+ mov.w &0xdddd,%d4
+
+ mov.w &0x0014,ICCR(%a6)
+ mov.w &0x0010,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.w (%a0),%d5
+ mov.w (%a1),%d6
+ mov.w &0xcccc,IREGS+0x14+0x2(%a6)
+ mov.w &0xdddd,IREGS+0x18+0x2(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas210:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+ lea DATA+0x5(%a6),%a1
+
+ mov.w &0xaaaa,(%a0)
+ mov.w &0xbbbb,(%a1)
+
+ mov.w &0xaaaa,%d1
+ mov.w &0xbbbb,%d2
+ mov.w &0xcccc,%d3
+ mov.w &0xdddd,%d4
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.w (%a0),%d5
+ mov.w (%a1),%d6
+ mov.w &0xcccc,IREGS+0x14+0x2(%a6)
+ mov.w &0xdddd,IREGS+0x18+0x2(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas211:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x0(%a6),%a0
+ lea DATA+0x4(%a6),%a1
+
+ mov.w &0xeeee,(%a0)
+ mov.w &0xbbbb,(%a1)
+
+ mov.w &0xaaaa,%d1
+ mov.w &0xbbbb,%d2
+ mov.w &0xcccc,%d3
+ mov.w &0xdddd,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.w (%a0),%d5
+ mov.w (%a1),%d6
+ mov.w &0xeeee,IREGS+0x4+0x2(%a6)
+ mov.w &0xbbbb,IREGS+0x8+0x2(%a6)
+ mov.w &0xeeee,IREGS+0x14+0x2(%a6)
+ mov.w &0xbbbb,IREGS+0x18+0x2(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas212:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+ lea DATA+0x5(%a6),%a1
+
+ mov.w &0xeeee,(%a0)
+ mov.w &0xbbbb,(%a1)
+
+ mov.w &0xaaaa,%d1
+ mov.w &0xbbbb,%d2
+ mov.w &0xcccc,%d3
+ mov.w &0xdddd,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.w (%a0),%d5
+ mov.w (%a1),%d6
+ mov.w &0xeeee,IREGS+0x4+0x2(%a6)
+ mov.w &0xbbbb,IREGS+0x8+0x2(%a6)
+ mov.w &0xeeee,IREGS+0x14+0x2(%a6)
+ mov.w &0xbbbb,IREGS+0x18+0x2(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas213:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x0(%a6),%a0
+ lea DATA+0x4(%a6),%a1
+
+ mov.w &0xaaaa,(%a0)
+ mov.w &0xeeee,(%a1)
+
+ mov.w &0xaaaa,%d1
+ mov.w &0xbbbb,%d2
+ mov.w &0xcccc,%d3
+ mov.w &0xdddd,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.w (%a0),%d5
+ mov.w (%a1),%d6
+ mov.w &0xaaaa,IREGS+0x4+0x2(%a6)
+ mov.w &0xeeee,IREGS+0x8+0x2(%a6)
+ mov.w &0xaaaa,IREGS+0x14+0x2(%a6)
+ mov.w &0xeeee,IREGS+0x18+0x2(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cas214:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ lea DATA+0x1(%a6),%a0
+ lea DATA+0x5(%a6),%a1
+
+ mov.w &0xaaaa,(%a0)
+ mov.w &0x7fff,(%a1)
+
+ mov.w &0xaaaa,%d1
+ mov.w &0x8000,%d2
+ mov.w &0xcccc,%d3
+ mov.w &0xdddd,%d4
+
+ mov.w &0x001b,ICCR(%a6)
+ mov.w &0x0010,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cas2.w %d1:%d2,%d3:%d4,(%a0):(%a1) # Dc1:Dc2,Du1:Du2,(Rn1):(Rn2)
+
+ mov.w %cc,SCCR(%a6)
+ mov.w (%a0),%d5
+ mov.w (%a1),%d6
+ mov.w &0xaaaa,IREGS+0x4+0x2(%a6)
+ mov.w &0x7fff,IREGS+0x8+0x2(%a6)
+ mov.w &0xaaaa,IREGS+0x14+0x2(%a6)
+ mov.w &0x7fff,IREGS+0x18+0x2(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ mov.l TESTCTR(%a6),%d1
+ clr.l %d0
+ rts
+
+###########################################################
+
+cmp2_str:
+ string "\tcmp2,chk2..."
+
+ align 0x4
+# unsigned - small,small
+cmp2_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0x2040,DATA(%a6)
+ mov.l &0x11111120,%d1
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0x2040,DATA(%a6)
+ mov.l &0x00000040,%a1
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_3:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0x2040,DATA(%a6)
+ mov.l &0x11111130,%d1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ chk2.b DATA(%a6),%d1
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_4:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0x2040,DATA(%a6)
+ mov.l &0x00000010,%a1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_5:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0x2040,DATA(%a6)
+ mov.l &0x11111150,%d1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_6:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0x2040,DATA(%a6)
+ mov.l &0x00000090,%a1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+# unsigned - small,large
+cmp2_7:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x2000a000,DATA(%a6)
+ mov.l &0x11112000,%d1
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.w %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_8:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x2000a000,DATA(%a6)
+ mov.l &0xffffa000,%a1
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.w %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_9:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x2000a000,DATA(%a6)
+ mov.l &0x11113000,%d1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ chk2.w DATA(%a6),%d1
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_10:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x2000a000,DATA(%a6)
+ mov.l &0xffff9000,%a1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.w %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_11:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x2000a000,DATA(%a6)
+ mov.l &0x11111000,%d1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.w %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_12:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0x2000a000,DATA(%a6)
+ mov.l &0xffffb000,%a1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.w %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+# unsigned - large,large
+cmp2_13:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xa0000000,DATA(%a6)
+ mov.l &0xc0000000,DATA+0x4(%a6)
+ mov.l &0xa0000000,%d1
+
+ mov.w &0x000c,ICCR(%a6)
+ mov.w &0x0008,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.l %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_14:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xa0000000,DATA(%a6)
+ mov.l &0xc0000000,DATA+0x4(%a6)
+ mov.l &0xc0000000,%a1
+
+ mov.w &0x000c,ICCR(%a6)
+ mov.w &0x0008,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.l %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_15:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xa0000000,DATA(%a6)
+ mov.l &0xc0000000,DATA+0x4(%a6)
+ mov.l &0xb0000000,%d1
+
+ mov.w &0x0008,ICCR(%a6)
+ mov.w &0x0008,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ chk2.l DATA(%a6),%d1
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_16:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xa0000000,DATA(%a6)
+ mov.l &0xc0000000,DATA+0x4(%a6)
+ mov.l &0x10000000,%a1
+
+ mov.w &0x0009,ICCR(%a6)
+ mov.w &0x0008,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.l %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_17:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xa0000000,DATA(%a6)
+ mov.l &0xc0000000,DATA+0x4(%a6)
+ mov.l &0x90000000,%d1
+
+ mov.w &0x0009,ICCR(%a6)
+ mov.w &0x0008,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.l %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_18:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l &0xa0000000,DATA(%a6)
+ mov.l &0xc0000000,DATA+0x4(%a6)
+ mov.l &0xd0000000,%a1
+
+ mov.w &0x0009,ICCR(%a6)
+ mov.w &0x0008,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.l %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+# signed - negative,positive
+cmp2_19:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa040,DATA(%a6)
+ mov.l &0x111111a0,%d1
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_20:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa040,DATA(%a6)
+ mov.l &0x00000040,%a1
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ chk2.b DATA(%a6),%a1
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_21:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa040,DATA(%a6)
+ mov.l &0x111111b0,%d1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_22:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa040,DATA(%a6)
+ mov.l &0x00000010,%a1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_23:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa040,DATA(%a6)
+ mov.l &0x11111190,%d1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_24:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa040,DATA(%a6)
+ mov.l &0x00000050,%a1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+# signed - negative,negative
+cmp2_25:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa0c0,DATA(%a6)
+ mov.l &0x111111a0,%d1
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_26:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa0c0,DATA(%a6)
+ mov.l &0xffffffc0,%a1
+
+ mov.w &0x0004,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_27:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa0c0,DATA(%a6)
+ mov.l &0x111111b0,%d1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ chk2.b DATA(%a6),%d1
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_28:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa0c0,DATA(%a6)
+ mov.l &0x11111190,%a1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_29:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa0c0,DATA(%a6)
+ mov.l &0x111111d0,%d1
+
+ mov.w &0x0001,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %d1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+cmp2_30:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.w &0xa0c0,DATA(%a6)
+ mov.l &0x00000050,%a1
+
+ mov.w &0x001b,ICCR(%a6)
+ mov.w &0x001f,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ cmp2.b %a1,DATA(%a6)
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ mov.l TESTCTR(%a6),%d1
+ clr.l %d0
+ rts
+
+###########################################################
+
+ea_str:
+ string "\tEffective addresses..."
+
+ align 0x4
+ea_0:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a0),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_1:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a0)+,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM+0x4(%a6),%a0
+ mov.l %a0,IREGS+0x20(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_2:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x4(%a6),%a0
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l -(%a0),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM(%a6),%a0
+ mov.l %a0,IREGS+0x20(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_3:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x1000(%a6),%a0
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (-0x1000,%a0),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_4:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a0
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x1000,%a0),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_5:
+ addq.l &0x1,TESTCTR(%a6)
+
+# movm.l DEF_REGS(%pc),&0x3fff
+
+# clr.l %d2
+# mov.l &0x00000002,%d3
+
+# mov.w &0x0000,ICCR(%a6)
+# mov.w &0x0000,%cc
+# movm.l &0xffff,IREGS(%a6)
+
+# mulu.l EAMEM.w,%d2:%d3
+
+# mov.w %cc,SCCR(%a6)
+# movm.l &0xffff,SREGS(%a6)
+# mov.l &0x00000004,IREGS+0xc(%a6)
+
+# bsr.l chkregs
+# tst.b %d0
+# bne.l error
+
+ea_6:
+ addq.l &0x1,TESTCTR(%a6)
+
+# movm.l DEF_REGS(%pc),&0x3fff
+
+# clr.l %d2
+# mov.l &0x00000002,%d3
+
+# mov.w &0x0000,ICCR(%a6)
+# mov.w &0x0000,%cc
+# movm.l &0xffff,IREGS(%a6)
+
+# mulu.l EAMEM.l,%d2:%d3
+
+# mov.w %cc,SCCR(%a6)
+# movm.l &0xffff,SREGS(%a6)
+# mov.l &0x00000004,IREGS+0xc(%a6)
+
+# bsr.l chkregs
+# tst.b %d0
+# bne.l error
+
+ea_7:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l &0x00000002,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_8:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_8_next
+ea_8_mem:
+ long 0x00000002
+ea_8_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_8_mem.w,%pc),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_9:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x4(%a6),%a1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l -(%a1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM(%a6),%a0
+ mov.l %a0,IREGS+0x24(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_10:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x4(%a6),%a2
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l -(%a2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM(%a6),%a0
+ mov.l %a0,IREGS+0x28(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_11:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x4(%a6),%a3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l -(%a3),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM(%a6),%a0
+ mov.l %a0,IREGS+0x2c(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_12:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x4(%a6),%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l -(%a4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM(%a6),%a0
+ mov.l %a0,IREGS+0x30(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_13:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x4(%a6),%a5
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l -(%a5),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM(%a6),%a0
+ mov.l %a0,IREGS+0x34(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_14:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a6,%a1
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x4(%a1),%a6
+
+ mov.w &0x0000,ICCR(%a1)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a1)
+
+ mulu.l -(%a6),%d2:%d3
+
+ mov.w %cc,SCCR(%a1)
+ movm.l &0xffff,SREGS(%a1)
+ mov.l &0x00000004,IREGS+0xc(%a1)
+ lea EAMEM(%a1),%a0
+ mov.l %a0,IREGS+0x38(%a1)
+
+ mov.l %a1,%a6
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_15:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ mov.l %a7,%a0
+ lea EAMEM+0x4(%a6),%a7
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l -(%a7),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM(%a6),%a1
+ mov.l %a1,IREGS+0x3c(%a6)
+
+ mov.l %a0,%a7
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_16:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_17:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%d4.w*2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_18:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%d4.w*4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_19:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%d4.w*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_20:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%d4.l*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_21:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%d4.l*2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_22:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%d4.l*4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_23:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%d4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_24:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x2,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a0,%a4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_25:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &0x2,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (-0x10.b,%a0,%a4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_26:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a1
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a1,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_27:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a2
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a2,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_28:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a3,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_29:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a4
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a4,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_30:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a5
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a5,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_31:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a6,%a1
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a1),%a6
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a1)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a1)
+
+ mulu.l (0x10.b,%a6,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a1)
+ movm.l &0xffff,SREGS(%a1)
+ mov.l &0x00000004,IREGS+0xc(%a1)
+
+ mov.l %a1,%a6
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_32:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ mov.l %a7,%a0
+ lea EAMEM(%a6),%a7
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.b,%a7,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ mov.l %a0,%a7
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_33:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_34:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a2
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_35:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a3),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_36:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_37:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a5
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a5),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_38:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a6,%a1
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a1),%a6
+
+ mov.w &0x0000,ICCR(%a1)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a1)
+
+ mulu.l (%a6),%d2:%d3
+
+ mov.w %cc,SCCR(%a1)
+ movm.l &0xffff,SREGS(%a1)
+ mov.l &0x00000004,IREGS+0xc(%a1)
+
+ mov.l %a1,%a6
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_39:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ mov.l %a7,%a0
+ lea EAMEM(%a6),%a7
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a7),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ mov.l %a0,%a7
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_40:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a1)+,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM+0x4(%a6),%a0
+ mov.l %a0,IREGS+0x24(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_41:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a2
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a2)+,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM+0x4(%a6),%a0
+ mov.l %a0,IREGS+0x28(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_42:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a3)+,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM+0x4(%a6),%a0
+ mov.l %a0,IREGS+0x2c(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_43:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a4)+,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM+0x4(%a6),%a0
+ mov.l %a0,IREGS+0x30(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_44:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a5
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a5)+,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM+0x4(%a6),%a0
+ mov.l %a0,IREGS+0x34(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_45:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a6,%a1
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a1),%a6
+
+ mov.w &0x0000,ICCR(%a1)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a1)
+
+ mulu.l (%a6)+,%d2:%d3
+
+ mov.w %cc,SCCR(%a1)
+ movm.l &0xffff,SREGS(%a1)
+ mov.l &0x00000004,IREGS+0xc(%a1)
+ lea EAMEM+0x4(%a1),%a0
+ mov.l %a0,IREGS+0x38(%a1)
+
+ mov.l %a1,%a6
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_46:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ mov.l %a7,%a0
+ lea EAMEM(%a6),%a7
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (%a7)+,%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+ lea EAMEM+0x4(%a6),%a1
+ mov.l %a1,IREGS+0x3c(%a6)
+
+ mov.l %a0,%a7
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_47:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a1
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x1000,%a1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_48:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a2
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x1000,%a2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_49:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x1000,%a3),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_50:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x1000,%a4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_51:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a5
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x1000,%a5),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_52:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a6,%a1
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a1),%a6
+
+ mov.w &0x0000,ICCR(%a1)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a1)
+
+ mulu.l (0x1000,%a6),%d2:%d3
+
+ mov.w %cc,SCCR(%a1)
+ movm.l &0xffff,SREGS(%a1)
+ mov.l &0x00000004,IREGS+0xc(%a1)
+
+ mov.l %a1,%a6
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_53:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ mov.l %a7,%a0
+ lea EAMEM-0x1000(%a6),%a7
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x1000,%a7),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ mov.l %a0,%a7
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_54:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x1000(%a6),%a0
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (-0x1000,%a0),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_55:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_55_next
+
+ea_55_data:
+ long 0x00000002
+ea_55_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_55_data.w,%pc),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_56:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.w,%a3,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_57:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.w,%a3,%d4.w*2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_58:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.w,%a3,%d4.w*4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_59:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.w,%a3,%d4.w*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_60:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.w,%a3,%d4.l*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_61:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.w,%a3,%d4.l*2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_62:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.w,%a3,%d4.l*4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_63:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x10.w,%a3,%d4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_64:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &0x2,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (-0x10.w,%a3,%a4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_65:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &0x2,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (0x00.w,%a3,%za4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_66:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l %a3,%a4
+ add.l &0x10,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (-0x10.w,%za3,%a4.l*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_67:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &0x2,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (-0x10.l,%a3,%a4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_68:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_68_next
+ea_68_mem:
+ long 0x00000002
+ea_68_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_68_mem+0x10.w,%pc,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_69:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_69_next
+ea_69_mem:
+ long 0x00000002
+ea_69_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l (ea_69_mem+0x10.w,%pc,%d4.w*2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_70:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_70_next
+ea_70_mem:
+ long 0x00000002
+ea_70_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l (ea_70_mem+0x10.w,%pc,%d4.w*4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_71:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_71_next
+ea_71_mem:
+ long 0x00000002
+ea_71_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l (ea_71_mem+0x10.w,%pc,%d4.w*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_72:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_72_next
+ea_72_mem:
+ long 0x00000002
+ea_72_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l (ea_72_mem+0x10.w,%pc,%d4.l*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_73:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_73_next
+ea_73_mem:
+ long 0x00000002
+ea_73_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l (ea_73_mem+0x10.w,%pc,%d4.l*2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_74:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_74_next
+ea_74_mem:
+ long 0x00000002
+ea_74_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l (ea_74_mem+0x10.w,%pc,%d4.l*4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_75:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_75_next
+ea_75_mem:
+ long 0x00000002
+ea_75_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0x7fff,IREGS(%a6)
+
+ mulu.l (ea_75_mem+0x10.w,%pc,%d4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0x7fff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_76:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_76_next
+ea_76_mem:
+ long 0x00000002
+ea_76_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &-0x2,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_76_mem+0x10.w,%pc,%a4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_77:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_77_next
+ea_77_mem:
+ long 0x00000002
+ea_77_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a3
+ mov.l &0x2,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_77_mem+0x00.w,%pc,%za4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_78:
+ addq.l &0x1,TESTCTR(%a6)
+
+# movm.l DEF_REGS(%pc),&0x3fff
+
+# clr.l %d2
+# mov.l &0x00000002,%d3
+# lea EAMEM,%a3
+# mov.l %a3,%a4
+# add.l &0x10,%a4
+
+# mov.w &0x0000,ICCR(%a6)
+# mov.w &0x0000,%cc
+# movm.l &0xffff,IREGS(%a6)
+
+# mulu.l (EAMEM-0x10.w,%zpc,%a4.l*1),%d2:%d3
+
+# mov.w %cc,SCCR(%a6)
+# movm.l &0xffff,SREGS(%a6)
+# mov.l &0x00000004,IREGS+0xc(%a6)
+
+# bsr.l chkregs
+# tst.b %d0
+# bne.l error
+
+ea_79:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM,%a3
+ mov.l &0x2,%a4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_79_mem-0x10.l,%pc,%a4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bra.b ea_79_next
+ea_79_mem:
+ long 0x00000002
+ea_79_next:
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_80:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_80_next
+ea_80_mem:
+ long 0x00000002
+ea_80_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a1
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_80_mem+0x10.b,%pc,%d4.w*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_81:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_81_next
+ea_81_mem:
+ long 0x00000002
+ea_81_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_81_mem+0x10.b,%pc,%d4.w*2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_82:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_82_next
+ea_82_mem:
+ long 0x00000002
+ea_82_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_82_mem+0x10.b,%pc,%d4.w*4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_83:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_83_next
+ea_83_mem:
+ long 0x00000002
+ea_83_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_83_mem+0x10.b,%pc,%d4.w*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_84:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_84_next
+ea_84_mem:
+ long 0x00000002
+ea_84_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_84_mem+0x10.b,%pc,%d4.l*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_85:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_85_next
+ea_85_mem:
+ long 0x00000002
+ea_85_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_85_mem+0x10.b,%pc,%d4.l*2),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_86:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_86_next
+ea_86_mem:
+ long 0x00000002
+ea_86_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_86_mem+0x10.b,%pc,%d4.l*4),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_87:
+ addq.l &0x1,TESTCTR(%a6)
+
+ bra.b ea_87_next
+ea_87_mem:
+ long 0x00000002
+ea_87_next:
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_87_mem+0x10.b,%pc,%d4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_88:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a6),%a0
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l (ea_88_mem+0x10.b,%pc,%d4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bra.b ea_88_next
+ea_88_mem:
+ long 0x00000002
+ea_88_next:
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_89:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4,%d4.w*1],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_90:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4,%d4.w*2],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_91:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4,%d4.w*4],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_92:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4,%d4.w*8],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_93:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4,%d4.l*1],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_94:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4,%d4.l*2],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_95:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4,%d4.l*4],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_96:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4,%d4.l*8],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_97:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.l,%a4,%d4.l*8],0x1000.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_98:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x00.l,%a4,%zd4.l*8],0x1000.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_99:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([%a4,%zd4.l*8],0x1000.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_100:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x10,%d4
+ add.l %a4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.l,%za4,%d4.l*1],0x1000.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_101:
+ addq.l &0x1,TESTCTR(%a6)
+
+# movm.l DEF_REGS(%pc),&0x3fff
+
+# clr.l %d2
+# mov.l &0x00000002,%d3
+# lea EAMEM(%a6),%a3
+# lea EASTORE(%a6),%a4
+# mov.l %a3,(%a4)
+# mov.l &-0x10,%d4
+
+# mov.w &0x0000,ICCR(%a6)
+# mov.w &0x0000,%cc
+# movm.l &0xffff,IREGS(%a6)
+
+# mulu.l ([EASTORE.l,%za4,%zd4.l*1]),%d2:%d3
+
+# mov.w %cc,SCCR(%a6)
+# movm.l &0xffff,SREGS(%a6)
+# mov.l &0x00000004,IREGS+0xc(%a6)
+
+# bsr.l chkregs
+# tst.b %d0
+# bne.l error
+
+ea_102:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a6,%a1
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x1000(%a1),%a3
+ lea EASTORE(%a1),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%a6
+
+ mov.w &0x0000,ICCR(%a1)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a1)
+
+ mulu.l ([0x10.w,%a4,%a6.l*8],-0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a1)
+ movm.l &0xffff,SREGS(%a1)
+ mov.l &0x00000004,IREGS+0xc(%a1)
+
+ mov.l %a1,%a6
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_103:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a6,%a1
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x1000(%a1),%a3
+ lea EASTORE(%a1),%a4
+ mov.l %a3,(%a4)
+ mov.l &0x2,%a6
+
+ mov.w &0x0000,ICCR(%a1)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a1)
+
+ mulu.l ([-0x10.w,%a4,%a6.l*8],-0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a1)
+ movm.l &0xffff,SREGS(%a1)
+ mov.l &0x00000004,IREGS+0xc(%a1)
+
+ mov.l %a1,%a6
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_104:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4],%d4.w*1,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_105:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4],%d4.w*2,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_106:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4],%d4.w*4,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_107:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4],%d4.w*8,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_108:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4],%d4.l*1,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_109:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4],%d4.w*2,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_110:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4],%d4.l*4,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_111:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.w,%a4],%d4.l*8,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_112:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.l,%a4],%d4.l*8,0x10.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_113:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a4
+ mov.l %a3,(%a4)
+ mov.l &0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x00.l,%a4],%zd4.l*8,0x20.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_114:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a7,%a0
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%a6),%a3
+ lea EASTORE(%a6),%a7
+ mov.l %a3,(%a7)
+ mov.l &0x20,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([%a7],%d4.l*1),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ mov.l %a0,%a7
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_115:
+ addq.l &0x1,TESTCTR(%a6)
+
+# movm.l DEF_REGS(%pc),&0x3fff
+
+# clr.l %d2
+# mov.l &0x00000002,%d3
+# lea EAMEM-0x20(%pc),%a3
+# lea EASTORE(%pc),%a4
+# mov.l %a3,(%a4)
+# mov.l &0x2,%d4
+
+# mov.w &0x0000,ICCR(%a6)
+# mov.w &0x0000,%cc
+# movm.l &0xffff,IREGS(%a6)
+
+# mulu.l ([EASTORE.l,%za4],%zd4.l*8,0x20.l),%d2:%d3
+
+# mov.w %cc,SCCR(%a6)
+# movm.l &0xffff,SREGS(%a6)
+# mov.l &0x00000004,IREGS+0xc(%a6)
+
+# bsr.l chkregs
+# tst.b %d0
+# bne.l error
+
+ea_116:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a6,%a1
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%a1),%a3
+ lea EASTORE(%a1),%a6
+ mov.l %a3,(%a6)
+ add.l &0x10,%a6
+ mov.l &-0x2,%a5
+
+ mov.w &0x0000,ICCR(%a1)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a1)
+
+ mulu.l ([-0x10.w,%a6],%a5.l*8,0x10.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a1)
+ movm.l &0xffff,SREGS(%a1)
+ mov.l &0x00000004,IREGS+0xc(%a1)
+
+ mov.l %a1,%a6
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ mov.l TESTCTR(%a6),%d1
+ clr.l %d0
+ rts
+
+ea_117:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x10.w,%pc,%d4.w*1],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_118:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x10.w,%pc,%d4.w*2],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_119:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x10.w,%pc,%d4.w*4],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_120:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x10.w,%pc,%d4.w*8],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_121:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x10.w,%pc,%d4.l*1],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_122:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x10.w,%pc,%d4.l*2],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_123:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x10.w,%pc,%d4.l*4],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_124:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x10.w,%pc,%d4.l*8],0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_125:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+ mulu.l ([EASTORE+0x10.l,%pc,%d4.l*8],0x1000.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_126:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE+0x00.l,%pc,%zd4.l*8],0x1000.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_127:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l %a4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([%zpc,%d4.l*1],0x1000.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_128:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x10,%d4
+ add.l %a4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([0x10.l,%zpc,%d4.l*1],0x1000.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_129:
+ addq.l &0x1,TESTCTR(%a6)
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &-0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.l,%zpc,%zd4.l*1]),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_130:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &0x2,%a6
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE-0x10.w,%pc,%a6.l*8],-0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_131:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a7,%a0
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM+0x1000(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &0x2,%a7
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE-0x10.w,%pc,%a7.l*8],-0x1000.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ mov.l %a0,%a7
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_132:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%d4.w*1,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_133:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%d4.w*2,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_134:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%d4.w*4,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_135:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%d4.w*8,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_136:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x10,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%d4.l*1,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_137:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x8,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%d4.w*2,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_138:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%d4.l*4,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_139:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%d4.l*8,0x10.w),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_140:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ sub.l &0x10,%a4
+ mov.l &0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.l,%pc],%d4.l*8,0x10.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_141:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &0x2,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.l,%pc],%zd4.l*8,0x20.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_142:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM-0x20(%pc),%a3
+ lea EASTORE(%pc),%a4
+ mov.l %a3,(%a4)
+ mov.l &0x4,%d4
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.l,%zpc],%d4.l*8),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ea_143:
+ addq.l &0x1,TESTCTR(%a6)
+
+ movm.l DEF_REGS(%pc),&0x3fff
+
+ mov.l %a7,%a0
+ clr.l %d2
+ mov.l &0x00000002,%d3
+ lea EAMEM(%pc),%a3
+ lea EASTORE(%pc),%a6
+ mov.l %a3,(%a6)
+ add.l &0x10,%a6
+ mov.l &-0x2,%a7
+
+ mov.w &0x0000,ICCR(%a6)
+ mov.w &0x0000,%cc
+ movm.l &0xffff,IREGS(%a6)
+
+ mulu.l ([EASTORE.w,%pc],%a7.l*8,0x10.l),%d2:%d3
+
+ mov.w %cc,SCCR(%a6)
+ movm.l &0xffff,SREGS(%a6)
+ mov.l &0x00000004,IREGS+0xc(%a6)
+
+ mov.l %a0,%a7
+ bsr.l chkregs
+ tst.b %d0
+ bne.l error
+
+ clr.l %d0
+ rts
+
+###########################################################
+###########################################################
+chkregs:
+ lea IREGS(%a6),%a0
+ lea SREGS(%a6),%a1
+ mov.l &14,%d0
+chkregs_loop:
+ cmp.l (%a0)+,(%a1)+
+ bne.l chkregs_error
+ dbra.w %d0,chkregs_loop
+
+ mov.w ICCR(%a6),%d0
+ mov.w SCCR(%a6),%d1
+ cmp.w %d0,%d1
+ bne.l chkregs_error
+
+ clr.l %d0
+ rts
+
+chkregs_error:
+ movq.l &0x1,%d0
+ rts
+
+error:
+ mov.l TESTCTR(%a6),%d1
+ movq.l &0x1,%d0
+ rts
+
+DEF_REGS:
+ long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+ long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+ long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+ long 0xacacacac, 0xacacacac, 0xacacacac, 0xacacacac
+
+############################################################
+
+_print_str:
+ mov.l %d0,-(%sp)
+ mov.l (TESTTOP-0x80+0x0,%pc),%d0
+ pea (TESTTOP-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+_print_num:
+ mov.l %d0,-(%sp)
+ mov.l (TESTTOP-0x80+0x4,%pc),%d0
+ pea (TESTTOP-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+############################################################
diff --git a/arch/m68k/ifpsp060/src/pfpsp.S b/arch/m68k/ifpsp060/src/pfpsp.S
new file mode 100644
index 00000000000..0c997c436be
--- /dev/null
+++ b/arch/m68k/ifpsp060/src/pfpsp.S
@@ -0,0 +1,14745 @@
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+MOTOROLA MICROPROCESSOR & MEMORY TECHNOLOGY GROUP
+M68000 Hi-Performance Microprocessor Division
+M68060 Software Package
+Production Release P1.00 -- October 10, 1994
+
+M68060 Software Package Copyright © 1993, 1994 Motorola Inc. All rights reserved.
+
+THE SOFTWARE is provided on an "AS IS" basis and without warranty.
+To the maximum extent permitted by applicable law,
+MOTOROLA DISCLAIMS ALL WARRANTIES WHETHER EXPRESS OR IMPLIED,
+INCLUDING IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+and any warranty against infringement with regard to the SOFTWARE
+(INCLUDING ANY MODIFIED VERSIONS THEREOF) and any accompanying written materials.
+
+To the maximum extent permitted by applicable law,
+IN NO EVENT SHALL MOTOROLA BE LIABLE FOR ANY DAMAGES WHATSOEVER
+(INCLUDING WITHOUT LIMITATION, DAMAGES FOR LOSS OF BUSINESS PROFITS,
+BUSINESS INTERRUPTION, LOSS OF BUSINESS INFORMATION, OR OTHER PECUNIARY LOSS)
+ARISING OF THE USE OR INABILITY TO USE THE SOFTWARE.
+Motorola assumes no responsibility for the maintenance and support of the SOFTWARE.
+
+You are hereby granted a copyright license to use, modify, and distribute the SOFTWARE
+so long as this entire notice is retained without alteration in any modified and/or
+redistributed versions, and that such modified versions are clearly identified as such.
+No licenses are granted by implication, estoppel or otherwise under any patents
+or trademarks of Motorola, Inc.
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+# freal.s:
+# This file is appended to the top of the 060FPSP package
+# and contains the entry points into the package. The user, in
+# effect, branches to one of the branch table entries located
+# after _060FPSP_TABLE.
+# Also, subroutine stubs exist in this file (_fpsp_done for
+# example) that are referenced by the FPSP package itself in order
+# to call a given routine. The stub routine actually performs the
+# callout. The FPSP code does a "bsr" to the stub routine. This
+# extra layer of hierarchy adds a slight performance penalty but
+# it makes the FPSP code easier to read and more mainatinable.
+#
+
+set _off_bsun, 0x00
+set _off_snan, 0x04
+set _off_operr, 0x08
+set _off_ovfl, 0x0c
+set _off_unfl, 0x10
+set _off_dz, 0x14
+set _off_inex, 0x18
+set _off_fline, 0x1c
+set _off_fpu_dis, 0x20
+set _off_trap, 0x24
+set _off_trace, 0x28
+set _off_access, 0x2c
+set _off_done, 0x30
+
+set _off_imr, 0x40
+set _off_dmr, 0x44
+set _off_dmw, 0x48
+set _off_irw, 0x4c
+set _off_irl, 0x50
+set _off_drb, 0x54
+set _off_drw, 0x58
+set _off_drl, 0x5c
+set _off_dwb, 0x60
+set _off_dww, 0x64
+set _off_dwl, 0x68
+
+_060FPSP_TABLE:
+
+###############################################################
+
+# Here's the table of ENTRY POINTS for those linking the package.
+ bra.l _fpsp_snan
+ short 0x0000
+ bra.l _fpsp_operr
+ short 0x0000
+ bra.l _fpsp_ovfl
+ short 0x0000
+ bra.l _fpsp_unfl
+ short 0x0000
+ bra.l _fpsp_dz
+ short 0x0000
+ bra.l _fpsp_inex
+ short 0x0000
+ bra.l _fpsp_fline
+ short 0x0000
+ bra.l _fpsp_unsupp
+ short 0x0000
+ bra.l _fpsp_effadd
+ short 0x0000
+
+ space 56
+
+###############################################################
+ global _fpsp_done
+_fpsp_done:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_done,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_ovfl
+_real_ovfl:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_ovfl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_unfl
+_real_unfl:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_unfl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_inex
+_real_inex:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_inex,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_bsun
+_real_bsun:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_bsun,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_operr
+_real_operr:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_operr,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_snan
+_real_snan:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_snan,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_dz
+_real_dz:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dz,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_fline
+_real_fline:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_fline,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_fpu_disabled
+_real_fpu_disabled:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_fpu_dis,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_trap
+_real_trap:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_trap,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_trace
+_real_trace:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_trace,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _real_access
+_real_access:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_access,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+#######################################
+
+ global _imem_read
+_imem_read:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_imr,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read
+_dmem_read:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dmr,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write
+_dmem_write:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dmw,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _imem_read_word
+_imem_read_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_irw,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _imem_read_long
+_imem_read_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_irl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_byte
+_dmem_read_byte:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_drb,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_word
+_dmem_read_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_drw,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_read_long
+_dmem_read_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_drl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_byte
+_dmem_write_byte:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dwb,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_word
+_dmem_write_word:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dww,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+ global _dmem_write_long
+_dmem_write_long:
+ mov.l %d0,-(%sp)
+ mov.l (_060FPSP_TABLE-0x80+_off_dwl,%pc),%d0
+ pea.l (_060FPSP_TABLE-0x80,%pc,%d0)
+ mov.l 0x4(%sp),%d0
+ rtd &0x4
+
+#
+# This file contains a set of define statements for constants
+# in order to promote readability within the corecode itself.
+#
+
+set LOCAL_SIZE, 192 # stack frame size(bytes)
+set LV, -LOCAL_SIZE # stack offset
+
+set EXC_SR, 0x4 # stack status register
+set EXC_PC, 0x6 # stack pc
+set EXC_VOFF, 0xa # stacked vector offset
+set EXC_EA, 0xc # stacked <ea>
+
+set EXC_FP, 0x0 # frame pointer
+
+set EXC_AREGS, -68 # offset of all address regs
+set EXC_DREGS, -100 # offset of all data regs
+set EXC_FPREGS, -36 # offset of all fp regs
+
+set EXC_A7, EXC_AREGS+(7*4) # offset of saved a7
+set OLD_A7, EXC_AREGS+(6*4) # extra copy of saved a7
+set EXC_A6, EXC_AREGS+(6*4) # offset of saved a6
+set EXC_A5, EXC_AREGS+(5*4)
+set EXC_A4, EXC_AREGS+(4*4)
+set EXC_A3, EXC_AREGS+(3*4)
+set EXC_A2, EXC_AREGS+(2*4)
+set EXC_A1, EXC_AREGS+(1*4)
+set EXC_A0, EXC_AREGS+(0*4)
+set EXC_D7, EXC_DREGS+(7*4)
+set EXC_D6, EXC_DREGS+(6*4)
+set EXC_D5, EXC_DREGS+(5*4)
+set EXC_D4, EXC_DREGS+(4*4)
+set EXC_D3, EXC_DREGS+(3*4)
+set EXC_D2, EXC_DREGS+(2*4)
+set EXC_D1, EXC_DREGS+(1*4)
+set EXC_D0, EXC_DREGS+(0*4)
+
+set EXC_FP0, EXC_FPREGS+(0*12) # offset of saved fp0
+set EXC_FP1, EXC_FPREGS+(1*12) # offset of saved fp1
+set EXC_FP2, EXC_FPREGS+(2*12) # offset of saved fp2 (not used)
+
+set FP_SCR1, LV+80 # fp scratch 1
+set FP_SCR1_EX, FP_SCR1+0
+set FP_SCR1_SGN, FP_SCR1+2
+set FP_SCR1_HI, FP_SCR1+4
+set FP_SCR1_LO, FP_SCR1+8
+
+set FP_SCR0, LV+68 # fp scratch 0
+set FP_SCR0_EX, FP_SCR0+0
+set FP_SCR0_SGN, FP_SCR0+2
+set FP_SCR0_HI, FP_SCR0+4
+set FP_SCR0_LO, FP_SCR0+8
+
+set FP_DST, LV+56 # fp destination operand
+set FP_DST_EX, FP_DST+0
+set FP_DST_SGN, FP_DST+2
+set FP_DST_HI, FP_DST+4
+set FP_DST_LO, FP_DST+8
+
+set FP_SRC, LV+44 # fp source operand
+set FP_SRC_EX, FP_SRC+0
+set FP_SRC_SGN, FP_SRC+2
+set FP_SRC_HI, FP_SRC+4
+set FP_SRC_LO, FP_SRC+8
+
+set USER_FPIAR, LV+40 # FP instr address register
+
+set USER_FPSR, LV+36 # FP status register
+set FPSR_CC, USER_FPSR+0 # FPSR condition codes
+set FPSR_QBYTE, USER_FPSR+1 # FPSR qoutient byte
+set FPSR_EXCEPT, USER_FPSR+2 # FPSR exception status byte
+set FPSR_AEXCEPT, USER_FPSR+3 # FPSR accrued exception byte
+
+set USER_FPCR, LV+32 # FP control register
+set FPCR_ENABLE, USER_FPCR+2 # FPCR exception enable
+set FPCR_MODE, USER_FPCR+3 # FPCR rounding mode control
+
+set L_SCR3, LV+28 # integer scratch 3
+set L_SCR2, LV+24 # integer scratch 2
+set L_SCR1, LV+20 # integer scratch 1
+
+set STORE_FLG, LV+19 # flag: operand store (ie. not fcmp/ftst)
+
+set EXC_TEMP2, LV+24 # temporary space
+set EXC_TEMP, LV+16 # temporary space
+
+set DTAG, LV+15 # destination operand type
+set STAG, LV+14 # source operand type
+
+set SPCOND_FLG, LV+10 # flag: special case (see below)
+
+set EXC_CC, LV+8 # saved condition codes
+set EXC_EXTWPTR, LV+4 # saved current PC (active)
+set EXC_EXTWORD, LV+2 # saved extension word
+set EXC_CMDREG, LV+2 # saved extension word
+set EXC_OPWORD, LV+0 # saved operation word
+
+################################
+
+# Helpful macros
+
+set FTEMP, 0 # offsets within an
+set FTEMP_EX, 0 # extended precision
+set FTEMP_SGN, 2 # value saved in memory.
+set FTEMP_HI, 4
+set FTEMP_LO, 8
+set FTEMP_GRS, 12
+
+set LOCAL, 0 # offsets within an
+set LOCAL_EX, 0 # extended precision
+set LOCAL_SGN, 2 # value saved in memory.
+set LOCAL_HI, 4
+set LOCAL_LO, 8
+set LOCAL_GRS, 12
+
+set DST, 0 # offsets within an
+set DST_EX, 0 # extended precision
+set DST_HI, 4 # value saved in memory.
+set DST_LO, 8
+
+set SRC, 0 # offsets within an
+set SRC_EX, 0 # extended precision
+set SRC_HI, 4 # value saved in memory.
+set SRC_LO, 8
+
+set SGL_LO, 0x3f81 # min sgl prec exponent
+set SGL_HI, 0x407e # max sgl prec exponent
+set DBL_LO, 0x3c01 # min dbl prec exponent
+set DBL_HI, 0x43fe # max dbl prec exponent
+set EXT_LO, 0x0 # min ext prec exponent
+set EXT_HI, 0x7ffe # max ext prec exponent
+
+set EXT_BIAS, 0x3fff # extended precision bias
+set SGL_BIAS, 0x007f # single precision bias
+set DBL_BIAS, 0x03ff # double precision bias
+
+set NORM, 0x00 # operand type for STAG/DTAG
+set ZERO, 0x01 # operand type for STAG/DTAG
+set INF, 0x02 # operand type for STAG/DTAG
+set QNAN, 0x03 # operand type for STAG/DTAG
+set DENORM, 0x04 # operand type for STAG/DTAG
+set SNAN, 0x05 # operand type for STAG/DTAG
+set UNNORM, 0x06 # operand type for STAG/DTAG
+
+##################
+# FPSR/FPCR bits #
+##################
+set neg_bit, 0x3 # negative result
+set z_bit, 0x2 # zero result
+set inf_bit, 0x1 # infinite result
+set nan_bit, 0x0 # NAN result
+
+set q_sn_bit, 0x7 # sign bit of quotient byte
+
+set bsun_bit, 7 # branch on unordered
+set snan_bit, 6 # signalling NAN
+set operr_bit, 5 # operand error
+set ovfl_bit, 4 # overflow
+set unfl_bit, 3 # underflow
+set dz_bit, 2 # divide by zero
+set inex2_bit, 1 # inexact result 2
+set inex1_bit, 0 # inexact result 1
+
+set aiop_bit, 7 # accrued inexact operation bit
+set aovfl_bit, 6 # accrued overflow bit
+set aunfl_bit, 5 # accrued underflow bit
+set adz_bit, 4 # accrued dz bit
+set ainex_bit, 3 # accrued inexact bit
+
+#############################
+# FPSR individual bit masks #
+#############################
+set neg_mask, 0x08000000 # negative bit mask (lw)
+set inf_mask, 0x02000000 # infinity bit mask (lw)
+set z_mask, 0x04000000 # zero bit mask (lw)
+set nan_mask, 0x01000000 # nan bit mask (lw)
+
+set neg_bmask, 0x08 # negative bit mask (byte)
+set inf_bmask, 0x02 # infinity bit mask (byte)
+set z_bmask, 0x04 # zero bit mask (byte)
+set nan_bmask, 0x01 # nan bit mask (byte)
+
+set bsun_mask, 0x00008000 # bsun exception mask
+set snan_mask, 0x00004000 # snan exception mask
+set operr_mask, 0x00002000 # operr exception mask
+set ovfl_mask, 0x00001000 # overflow exception mask
+set unfl_mask, 0x00000800 # underflow exception mask
+set dz_mask, 0x00000400 # dz exception mask
+set inex2_mask, 0x00000200 # inex2 exception mask
+set inex1_mask, 0x00000100 # inex1 exception mask
+
+set aiop_mask, 0x00000080 # accrued illegal operation
+set aovfl_mask, 0x00000040 # accrued overflow
+set aunfl_mask, 0x00000020 # accrued underflow
+set adz_mask, 0x00000010 # accrued divide by zero
+set ainex_mask, 0x00000008 # accrued inexact
+
+######################################
+# FPSR combinations used in the FPSP #
+######################################
+set dzinf_mask, inf_mask+dz_mask+adz_mask
+set opnan_mask, nan_mask+operr_mask+aiop_mask
+set nzi_mask, 0x01ffffff #clears N, Z, and I
+set unfinx_mask, unfl_mask+inex2_mask+aunfl_mask+ainex_mask
+set unf2inx_mask, unfl_mask+inex2_mask+ainex_mask
+set ovfinx_mask, ovfl_mask+inex2_mask+aovfl_mask+ainex_mask
+set inx1a_mask, inex1_mask+ainex_mask
+set inx2a_mask, inex2_mask+ainex_mask
+set snaniop_mask, nan_mask+snan_mask+aiop_mask
+set snaniop2_mask, snan_mask+aiop_mask
+set naniop_mask, nan_mask+aiop_mask
+set neginf_mask, neg_mask+inf_mask
+set infaiop_mask, inf_mask+aiop_mask
+set negz_mask, neg_mask+z_mask
+set opaop_mask, operr_mask+aiop_mask
+set unfl_inx_mask, unfl_mask+aunfl_mask+ainex_mask
+set ovfl_inx_mask, ovfl_mask+aovfl_mask+ainex_mask
+
+#########
+# misc. #
+#########
+set rnd_stky_bit, 29 # stky bit pos in longword
+
+set sign_bit, 0x7 # sign bit
+set signan_bit, 0x6 # signalling nan bit
+
+set sgl_thresh, 0x3f81 # minimum sgl exponent
+set dbl_thresh, 0x3c01 # minimum dbl exponent
+
+set x_mode, 0x0 # extended precision
+set s_mode, 0x4 # single precision
+set d_mode, 0x8 # double precision
+
+set rn_mode, 0x0 # round-to-nearest
+set rz_mode, 0x1 # round-to-zero
+set rm_mode, 0x2 # round-tp-minus-infinity
+set rp_mode, 0x3 # round-to-plus-infinity
+
+set mantissalen, 64 # length of mantissa in bits
+
+set BYTE, 1 # len(byte) == 1 byte
+set WORD, 2 # len(word) == 2 bytes
+set LONG, 4 # len(longword) == 2 bytes
+
+set BSUN_VEC, 0xc0 # bsun vector offset
+set INEX_VEC, 0xc4 # inexact vector offset
+set DZ_VEC, 0xc8 # dz vector offset
+set UNFL_VEC, 0xcc # unfl vector offset
+set OPERR_VEC, 0xd0 # operr vector offset
+set OVFL_VEC, 0xd4 # ovfl vector offset
+set SNAN_VEC, 0xd8 # snan vector offset
+
+###########################
+# SPecial CONDition FLaGs #
+###########################
+set ftrapcc_flg, 0x01 # flag bit: ftrapcc exception
+set fbsun_flg, 0x02 # flag bit: bsun exception
+set mia7_flg, 0x04 # flag bit: (a7)+ <ea>
+set mda7_flg, 0x08 # flag bit: -(a7) <ea>
+set fmovm_flg, 0x40 # flag bit: fmovm instruction
+set immed_flg, 0x80 # flag bit: &<data> <ea>
+
+set ftrapcc_bit, 0x0
+set fbsun_bit, 0x1
+set mia7_bit, 0x2
+set mda7_bit, 0x3
+set immed_bit, 0x7
+
+##################################
+# TRANSCENDENTAL "LAST-OP" FLAGS #
+##################################
+set FMUL_OP, 0x0 # fmul instr performed last
+set FDIV_OP, 0x1 # fdiv performed last
+set FADD_OP, 0x2 # fadd performed last
+set FMOV_OP, 0x3 # fmov performed last
+
+#############
+# CONSTANTS #
+#############
+T1: long 0x40C62D38,0xD3D64634 # 16381 LOG2 LEAD
+T2: long 0x3D6F90AE,0xB1E75CC7 # 16381 LOG2 TRAIL
+
+PI: long 0x40000000,0xC90FDAA2,0x2168C235,0x00000000
+PIBY2: long 0x3FFF0000,0xC90FDAA2,0x2168C235,0x00000000
+
+TWOBYPI:
+ long 0x3FE45F30,0x6DC9C883
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_ovfl(): 060FPSP entry point for FP Overflow exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Overflow exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# fout() - emulate an opclass 3 instruction #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
+# _real_ovfl() - "callout" for Overflow exception enabled code #
+# _real_inex() - "callout" for Inexact exception enabled code #
+# _real_trace() - "callout" for Trace exception code #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP Ovfl exception stack frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# Overflow Exception enabled: #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# Overflow Exception disabled: #
+# - The system stack is unchanged #
+# - The "exception present" flag in the fsave frame is cleared #
+# #
+# ALGORITHM *********************************************************** #
+# On the 060, if an FP overflow is present as the result of any #
+# instruction, the 060 will take an overflow exception whether the #
+# exception is enabled or disabled in the FPCR. For the disabled case, #
+# This handler emulates the instruction to determine what the correct #
+# default result should be for the operation. This default result is #
+# then stored in either the FP regfile, data regfile, or memory. #
+# Finally, the handler exits through the "callout" _fpsp_done() #
+# denoting that no exceptional conditions exist within the machine. #
+# If the exception is enabled, then this handler must create the #
+# exceptional operand and plave it in the fsave state frame, and store #
+# the default result (only if the instruction is opclass 3). For #
+# exceptions enabled, this handler must exit through the "callout" #
+# _real_ovfl() so that the operating system enabled overflow handler #
+# can handle this case. #
+# Two other conditions exist. First, if overflow was disabled #
+# but the inexact exception was enabled, this handler must exit #
+# through the "callout" _real_inex() regardless of whether the result #
+# was inexact. #
+# Also, in the case of an opclass three instruction where #
+# overflow was disabled and the trace exception was enabled, this #
+# handler must exit through the "callout" _real_trace(). #
+# #
+#########################################################################
+
+ global _fpsp_ovfl
+_fpsp_ovfl:
+
+#$# sub.l &24,%sp # make room for src/dst
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
+ bne.w fovfl_out
+
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+# since, I believe, only NORMs and DENORMs can come through here,
+# maybe we can avoid the subroutine call.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,STAG(%a6) # maybe NORM,DENORM
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# that can pass through fpsp_ovfl(). remember that fcmp, ftst, and fsincos
+# will never take this exception.
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b fovfl_extract # monadic
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fovfl_op2_done # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+fovfl_op2_done:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+fovfl_extract:
+
+#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x007f,%d1 # extract extension
+
+ andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+ mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+# the EXOP, if an exception occurred, is in fp1.
+# we must save the default result regardless of whether
+# traps are enabled or disabled.
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0
+ bsr.l store_fpreg
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+ btst &ovfl_bit,FPCR_ENABLE(%a6)
+ bne.b fovfl_ovfl_on
+
+ btst &inex2_bit,FPCR_ENABLE(%a6)
+ bne.b fovfl_inex_on
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+#$# add.l &24,%sp
+ bra.l _fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1. now, simply jump to _real_ovfl()!
+fovfl_ovfl_on:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
+
+ mov.w &0xe005,2+FP_SRC(%a6) # save exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
+
+ unlk %a6
+
+ bra.l _real_ovfl
+
+# overflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+fovfl_inex_on:
+
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
+
+ mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
+ mov.w &0xe001,2+FP_SRC(%a6) # save exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
+
+ unlk %a6
+
+ bra.l _real_inex
+
+########################################################################
+fovfl_out:
+
+
+#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+ mov.b &NORM,STAG(%a6) # set src optype tag
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ bsr.l fout
+
+ btst &ovfl_bit,FPCR_ENABLE(%a6)
+ bne.w fovfl_ovfl_on
+
+ btst &inex2_bit,FPCR_ENABLE(%a6)
+ bne.w fovfl_inex_on
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+#$# add.l &24,%sp
+
+ btst &0x7,(%sp) # is trace on?
+ beq.l _fpsp_done # no
+
+ fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
+ mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
+ bra.l _real_trace
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_unfl(): 060FPSP entry point for FP Underflow exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Underflow exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# fout() - emulate an opclass 3 instruction #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# _fpsp_done() - "callout" for 060FPSP exit (all work done!) #
+# _real_ovfl() - "callout" for Overflow exception enabled code #
+# _real_inex() - "callout" for Inexact exception enabled code #
+# _real_trace() - "callout" for Trace exception code #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP Unfl exception stack frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# Underflow Exception enabled: #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# Underflow Exception disabled: #
+# - The system stack is unchanged #
+# - The "exception present" flag in the fsave frame is cleared #
+# #
+# ALGORITHM *********************************************************** #
+# On the 060, if an FP underflow is present as the result of any #
+# instruction, the 060 will take an underflow exception whether the #
+# exception is enabled or disabled in the FPCR. For the disabled case, #
+# This handler emulates the instruction to determine what the correct #
+# default result should be for the operation. This default result is #
+# then stored in either the FP regfile, data regfile, or memory. #
+# Finally, the handler exits through the "callout" _fpsp_done() #
+# denoting that no exceptional conditions exist within the machine. #
+# If the exception is enabled, then this handler must create the #
+# exceptional operand and plave it in the fsave state frame, and store #
+# the default result (only if the instruction is opclass 3). For #
+# exceptions enabled, this handler must exit through the "callout" #
+# _real_unfl() so that the operating system enabled overflow handler #
+# can handle this case. #
+# Two other conditions exist. First, if underflow was disabled #
+# but the inexact exception was enabled and the result was inexact, #
+# this handler must exit through the "callout" _real_inex(). #
+# was inexact. #
+# Also, in the case of an opclass three instruction where #
+# underflow was disabled and the trace exception was enabled, this #
+# handler must exit through the "callout" _real_trace(). #
+# #
+#########################################################################
+
+ global _fpsp_unfl
+_fpsp_unfl:
+
+#$# sub.l &24,%sp # make room for src/dst
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &0x5,EXC_CMDREG(%a6) # is instr an fmove out?
+ bne.w funfl_out
+
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,STAG(%a6) # maybe NORM,DENORM
+
+# bit five of the fp ext word separates the monadic and dyadic operations
+# that can pass through fpsp_unfl(). remember that fcmp, and ftst
+# will never take this exception.
+ btst &0x5,1+EXC_CMDREG(%a6) # is op monadic or dyadic?
+ beq.b funfl_extract # monadic
+
+# now, what's left that's not dyadic is fsincos. we can distinguish it
+# from all dyadics by the '0110xxx pattern
+ btst &0x4,1+EXC_CMDREG(%a6) # is op an fsincos?
+ bne.b funfl_extract # yes
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b funfl_op2_done # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+funfl_op2_done:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+funfl_extract:
+
+#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+#$# mov.l FP_DST_EX(%a6),TRAP_DSTOP_EX(%a6)
+#$# mov.l FP_DST_HI(%a6),TRAP_DSTOP_HI(%a6)
+#$# mov.l FP_DST_LO(%a6),TRAP_DSTOP_LO(%a6)
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x007f,%d1 # extract extension
+
+ andi.l &0x00ff01ff,USER_FPSR(%a6)
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+# maybe we can make these entry points ONLY the OVFL entry points of each routine.
+ mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0
+ bsr.l store_fpreg
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we need to check
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for. We do these checks only in
+# funfl_{unfl,inex}_on() because w/ both exceptions disabled, this
+# special case will simply exit gracefully with the correct result.
+
+# the exceptional possibilities we have left ourselves with are ONLY overflow
+# and inexact. and, the inexact is such that overflow occurred and was disabled
+# but inexact was enabled.
+ btst &unfl_bit,FPCR_ENABLE(%a6)
+ bne.b funfl_unfl_on
+
+funfl_chkinex:
+ btst &inex2_bit,FPCR_ENABLE(%a6)
+ bne.b funfl_inex_on
+
+funfl_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+#$# add.l &24,%sp
+ bra.l _fpsp_done
+
+# overflow is enabled AND overflow, of course, occurred. so, we have the EXOP
+# in fp1 (don't forget to save fp0). what to do now?
+# well, we simply have to get to go to _real_unfl()!
+funfl_unfl_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception. Since this is incorrect, we check here to see
+# if our emulation, after re-doing the operation, decided that
+# no underflow was called for.
+ btst &unfl_bit,FPSR_EXCEPT(%a6)
+ beq.w funfl_chkinex
+
+funfl_unfl_on2:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP (fp1) to stack
+
+ mov.w &0xe003,2+FP_SRC(%a6) # save exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
+
+ unlk %a6
+
+ bra.l _real_unfl
+
+# undeflow occurred but is disabled. meanwhile, inexact is enabled. therefore,
+# we must jump to real_inex().
+funfl_inex_on:
+
+# The `060 FPU multiplier hardware is such that if the result of a
+# multiply operation is the smallest possible normalized number
+# (0x00000000_80000000_00000000), then the machine will take an
+# underflow exception.
+# But, whether bogus or not, if inexact is enabled AND it occurred,
+# then we have to branch to real_inex.
+
+ btst &inex2_bit,FPSR_EXCEPT(%a6)
+ beq.w funfl_exit
+
+funfl_inex_on2:
+
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP to stack
+
+ mov.b &0xc4,1+EXC_VOFF(%a6) # vector offset = 0xc4
+ mov.w &0xe001,2+FP_SRC(%a6) # save exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # do this after fmovm,other f<op>s!
+
+ unlk %a6
+
+ bra.l _real_inex
+
+#######################################################################
+funfl_out:
+
+
+#$# mov.l FP_SRC_EX(%a6),TRAP_SRCOP_EX(%a6)
+#$# mov.l FP_SRC_HI(%a6),TRAP_SRCOP_HI(%a6)
+#$# mov.l FP_SRC_LO(%a6),TRAP_SRCOP_LO(%a6)
+
+# the src operand is definitely a NORM(!), so tag it as such
+ mov.b &NORM,STAG(%a6) # set src optype tag
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ and.l &0xffff00ff,USER_FPSR(%a6) # zero all but accured field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ bsr.l fout
+
+ btst &unfl_bit,FPCR_ENABLE(%a6)
+ bne.w funfl_unfl_on2
+
+ btst &inex2_bit,FPCR_ENABLE(%a6)
+ bne.w funfl_inex_on2
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+#$# add.l &24,%sp
+
+ btst &0x7,(%sp) # is trace on?
+ beq.l _fpsp_done # no
+
+ fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
+ mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
+ bra.l _real_trace
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_unsupp(): 060FPSP entry point for FP "Unimplemented #
+# Data Type" exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Unimplemented Data Type exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_{word,long}() - read instruction word/longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# load_fpn1() - load src operand from FP regfile #
+# fout() - emulate an opclass 3 instruction #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# _real_inex() - "callout" to operating system inexact handler #
+# _fpsp_done() - "callout" for exit; work all done #
+# _real_trace() - "callout" for Trace enabled exception #
+# funimp_skew() - adjust fsave src ops to "incorrect" value #
+# _real_snan() - "callout" for SNAN exception #
+# _real_operr() - "callout" for OPERR exception #
+# _real_ovfl() - "callout" for OVFL exception #
+# _real_unfl() - "callout" for UNFL exception #
+# get_packed() - fetch packed operand from memory #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the "Unimp Data Type" stk frame #
+# - The fsave frame contains the ssrc op (for UNNORM/DENORM) #
+# #
+# OUTPUT ************************************************************** #
+# If Inexact exception (opclass 3): #
+# - The system stack is changed to an Inexact exception stk frame #
+# If SNAN exception (opclass 3): #
+# - The system stack is changed to an SNAN exception stk frame #
+# If OPERR exception (opclass 3): #
+# - The system stack is changed to an OPERR exception stk frame #
+# If OVFL exception (opclass 3): #
+# - The system stack is changed to an OVFL exception stk frame #
+# If UNFL exception (opclass 3): #
+# - The system stack is changed to an UNFL exception stack frame #
+# If Trace exception enabled: #
+# - The system stack is changed to a Trace exception stack frame #
+# Else: (normal case) #
+# - Correct result has been stored as appropriate #
+# #
+# ALGORITHM *********************************************************** #
+# Two main instruction types can enter here: (1) DENORM or UNNORM #
+# unimplemented data types. These can be either opclass 0,2 or 3 #
+# instructions, and (2) PACKED unimplemented data format instructions #
+# also of opclasses 0,2, or 3. #
+# For UNNORM/DENORM opclass 0 and 2, the handler fetches the src #
+# operand from the fsave state frame and the dst operand (if dyadic) #
+# from the FP register file. The instruction is then emulated by #
+# choosing an emulation routine from a table of routines indexed by #
+# instruction type. Once the instruction has been emulated and result #
+# saved, then we check to see if any enabled exceptions resulted from #
+# instruction emulation. If none, then we exit through the "callout" #
+# _fpsp_done(). If there is an enabled FP exception, then we insert #
+# this exception into the FPU in the fsave state frame and then exit #
+# through _fpsp_done(). #
+# PACKED opclass 0 and 2 is similar in how the instruction is #
+# emulated and exceptions handled. The differences occur in how the #
+# handler loads the packed op (by calling get_packed() routine) and #
+# by the fact that a Trace exception could be pending for PACKED ops. #
+# If a Trace exception is pending, then the current exception stack #
+# frame is changed to a Trace exception stack frame and an exit is #
+# made through _real_trace(). #
+# For UNNORM/DENORM opclass 3, the actual move out to memory is #
+# performed by calling the routine fout(). If no exception should occur #
+# as the result of emulation, then an exit either occurs through #
+# _fpsp_done() or through _real_trace() if a Trace exception is pending #
+# (a Trace stack frame must be created here, too). If an FP exception #
+# should occur, then we must create an exception stack frame of that #
+# type and jump to either _real_snan(), _real_operr(), _real_inex(), #
+# _real_unfl(), or _real_ovfl() as appropriate. PACKED opclass 3 #
+# emulation is performed in a similar manner. #
+# #
+#########################################################################
+
+#
+# (1) DENORM and UNNORM (unimplemented) data types:
+#
+# post-instruction
+# *****************
+# * EA *
+# pre-instruction * *
+# ***************** *****************
+# * 0x0 * 0x0dc * * 0x3 * 0x0dc *
+# ***************** *****************
+# * Next * * Next *
+# * PC * * PC *
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+#
+# (2) PACKED format (unsupported) opclasses two and three:
+# *****************
+# * EA *
+# * *
+# *****************
+# * 0x2 * 0x0dc *
+# *****************
+# * Next *
+# * PC *
+# *****************
+# * SR *
+# *****************
+#
+ global _fpsp_unsupp
+_fpsp_unsupp:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # save fp state
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor mode?
+ bne.b fu_s
+fu_u:
+ mov.l %usp,%a0 # fetch user stack pointer
+ mov.l %a0,EXC_A7(%a6) # save on stack
+ bra.b fu_cont
+# if the exception is an opclass zero or two unimplemented data type
+# exception, then the a7' calculated here is wrong since it doesn't
+# stack an ea. however, we don't need an a7' for this case anyways.
+fu_s:
+ lea 0x4+EXC_EA(%a6),%a0 # load old a7'
+ mov.l %a0,EXC_A7(%a6) # save on stack
+
+fu_cont:
+
+# the FPIAR holds the "current PC" of the faulting instruction
+# the FPIAR should be set correctly for ALL exceptions passing through
+# this point.
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
+
+############################
+
+ clr.b SPCOND_FLG(%a6) # clear special condition flag
+
+# Separate opclass three (fpn-to-mem) ops since they have a different
+# stack frame and protocol.
+ btst &0x5,EXC_CMDREG(%a6) # is it an fmove out?
+ bne.w fu_out # yes
+
+# Separate packed opclass two instructions.
+ bfextu EXC_CMDREG(%a6){&0:&6},%d0
+ cmpi.b %d0,&0x13
+ beq.w fu_in_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+ andi.l &0x00ff00ff,USER_FPSR(%a6) # zero exception field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+# Opclass two w/ memory-to-fpn operation will have an incorrect extended
+# precision format if the src format was single or double and the
+# source data type was an INF, NAN, DENORM, or UNNORM
+ lea FP_SRC(%a6),%a0 # pass ptr to input
+ bsr.l fix_skewed_ops
+
+# we don't know whether the src operand or the dst operand (or both) is the
+# UNNORM or DENORM. call the function that tags the operand type. if the
+# input is an UNNORM, then convert it to a NORM, DENORM, or ZERO.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fu_op2 # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+
+fu_op2:
+ mov.b %d0,STAG(%a6) # save src optype tag
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b fu_extract # monadic
+ cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
+ beq.b fu_extract # yes, so it's monadic, too
+
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fu_op2_done # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+fu_op2_done:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+fu_extract:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
+
+ bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+ mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+# BSUN : none
+# SNAN : all dyadic ops
+# OPERR : fsqrt(-NORM)
+# OVFL : all except ftst,fcmp
+# UNFL : all except ftst,fcmp
+# DZ : fdiv
+# INEX2 : all except ftst,fcmp
+# INEX1 : none (packed doesn't go through here)
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions set
+ bne.b fu_in_ena # some are enabled
+
+fu_in_cont:
+# fcmp and ftst do not store any result.
+ mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
+ andi.b &0x38,%d0 # extract bits 3-5
+ cmpi.b %d0,&0x38 # is instr fcmp or ftst?
+ beq.b fu_in_exit # yes
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l store_fpreg # store the result
+
+fu_in_exit:
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ bra.l _fpsp_done
+
+fu_in_ena:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b fu_in_exc # there is at least one set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+# if (OVFL && ovfl_disabled && inexact_enabled) {
+# branch to _real_inex() (even if the result was exact!);
+# } else {
+# save the result in the proper fp reg (unless the op is fcmp or ftst);
+# return;
+# }
+#
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+ beq.b fu_in_cont # no
+
+fu_in_ovflchk:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+ beq.b fu_in_cont # no
+ bra.w fu_in_exc_ovfl # go insert overflow frame
+
+#
+# An exception occurred and that exception was enabled:
+#
+# shift enabled exception field into lo byte of d0;
+# if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+# ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+# /*
+# * this is the case where we must call _real_inex() now or else
+# * there will be no other way to pass it the exceptional operand
+# */
+# call _real_inex();
+# } else {
+# restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+# }
+#
+fu_in_exc:
+ subi.l &24,%d0 # fix offset to be 0-8
+ cmpi.b %d0,&0x6 # is exception INEX? (6)
+ bne.b fu_in_exc_exit # no
+
+# the enabled exception was inexact
+ btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+ bne.w fu_in_exc_unfl # yes
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+ bne.w fu_in_exc_ovfl # yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+fu_in_exc_exit:
+ mov.l %d0,-(%sp) # save d0
+ bsr.l funimp_skew # skew sgl or dbl inputs
+ mov.l (%sp)+,%d0 # restore d0
+
+ mov.w (tbl_except.b,%pc,%d0.w*2),2+FP_SRC(%a6) # create exc status
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore src op
+
+ unlk %a6
+
+ bra.l _fpsp_done
+
+tbl_except:
+ short 0xe000,0xe006,0xe004,0xe005
+ short 0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_unfl:
+ mov.w &0x4,%d0
+ bra.b fu_in_exc_exit
+fu_in_exc_ovfl:
+ mov.w &0x03,%d0
+ bra.b fu_in_exc_exit
+
+# If the input operand to this operation was opclass two and a single
+# or double precision denorm, inf, or nan, the operand needs to be
+# "corrected" in order to have the proper equivalent extended precision
+# number.
+ global fix_skewed_ops
+fix_skewed_ops:
+ bfextu EXC_CMDREG(%a6){&0:&6},%d0 # extract opclass,src fmt
+ cmpi.b %d0,&0x11 # is class = 2 & fmt = sgl?
+ beq.b fso_sgl # yes
+ cmpi.b %d0,&0x15 # is class = 2 & fmt = dbl?
+ beq.b fso_dbl # yes
+ rts # no
+
+fso_sgl:
+ mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
+ andi.w &0x7fff,%d0 # strip sign
+ cmpi.w %d0,&0x3f80 # is |exp| == $3f80?
+ beq.b fso_sgl_dnrm_zero # yes
+ cmpi.w %d0,&0x407f # no; is |exp| == $407f?
+ beq.b fso_infnan # yes
+ rts # no
+
+fso_sgl_dnrm_zero:
+ andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+ beq.b fso_zero # it's a skewed zero
+fso_sgl_dnrm:
+# here, we count on norm not to alter a0...
+ bsr.l norm # normalize mantissa
+ neg.w %d0 # -shft amt
+ addi.w &0x3f81,%d0 # adjust new exponent
+ andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
+ or.w %d0,LOCAL_EX(%a0) # insert new exponent
+ rts
+
+fso_zero:
+ andi.w &0x8000,LOCAL_EX(%a0) # clear bogus exponent
+ rts
+
+fso_infnan:
+ andi.b &0x7f,LOCAL_HI(%a0) # clear j-bit
+ ori.w &0x7fff,LOCAL_EX(%a0) # make exponent = $7fff
+ rts
+
+fso_dbl:
+ mov.w LOCAL_EX(%a0),%d0 # fetch src exponent
+ andi.w &0x7fff,%d0 # strip sign
+ cmpi.w %d0,&0x3c00 # is |exp| == $3c00?
+ beq.b fso_dbl_dnrm_zero # yes
+ cmpi.w %d0,&0x43ff # no; is |exp| == $43ff?
+ beq.b fso_infnan # yes
+ rts # no
+
+fso_dbl_dnrm_zero:
+ andi.l &0x7fffffff,LOCAL_HI(%a0) # clear j-bit
+ bne.b fso_dbl_dnrm # it's a skewed denorm
+ tst.l LOCAL_LO(%a0) # is it a zero?
+ beq.b fso_zero # yes
+fso_dbl_dnrm:
+# here, we count on norm not to alter a0...
+ bsr.l norm # normalize mantissa
+ neg.w %d0 # -shft amt
+ addi.w &0x3c01,%d0 # adjust new exponent
+ andi.w &0x8000,LOCAL_EX(%a0) # clear old exponent
+ or.w %d0,LOCAL_EX(%a0) # insert new exponent
+ rts
+
+#################################################################
+
+# fmove out took an unimplemented data type exception.
+# the src operand is in FP_SRC. Call _fout() to write out the result and
+# to determine which exceptions, if any, to take.
+fu_out:
+
+# Separate packed move outs from the UNNORM and DENORM move outs.
+ bfextu EXC_CMDREG(%a6){&3:&3},%d0
+ cmpi.b %d0,&0x3
+ beq.w fu_out_pack
+ cmpi.b %d0,&0x7
+ beq.w fu_out_pack
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+ and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+# the src can ONLY be a DENORM or an UNNORM! so, don't make any big subroutine
+# call here. just figure out what it is...
+ mov.w FP_SRC_EX(%a6),%d0 # get exponent
+ andi.w &0x7fff,%d0 # strip sign
+ beq.b fu_out_denorm # it's a DENORM
+
+ lea FP_SRC(%a6),%a0
+ bsr.l unnorm_fix # yes; fix it
+
+ mov.b %d0,STAG(%a6)
+
+ bra.b fu_out_cont
+fu_out_denorm:
+ mov.b &DENORM,STAG(%a6)
+fu_out_cont:
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ mov.l (%a6),EXC_A6(%a6) # in case a6 changes
+ bsr.l fout # call fmove out routine
+
+# Exceptions in order of precedence:
+# BSUN : none
+# SNAN : none
+# OPERR : fmove.{b,w,l} out of large UNNORM
+# OVFL : fmove.{s,d}
+# UNFL : fmove.{s,d,x}
+# DZ : none
+# INEX2 : all
+# INEX1 : none (packed doesn't travel through here)
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.w fu_out_ena # some are enabled
+
+fu_out_done:
+
+ mov.l EXC_A6(%a6),(%a6) # in case a6 changed
+
+# on extended precision opclass three instructions using pre-decrement or
+# post-increment addressing mode, the address register is not updated. is the
+# address register was the stack pointer used from user mode, then let's update
+# it here. if it was used from supervisor mode, then we have to handle this
+# as a special case.
+ btst &0x5,EXC_SR(%a6)
+ bne.b fu_out_done_s
+
+ mov.l EXC_A7(%a6),%a0 # restore a7
+ mov.l %a0,%usp
+
+fu_out_done_cont:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ btst &0x7,(%sp) # is trace on?
+ bne.b fu_out_trace # yes
+
+ bra.l _fpsp_done
+
+# is the ea mode pre-decrement of the stack pointer from supervisor mode?
+# ("fmov.x fpm,-(a7)") if so,
+fu_out_done_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ bne.b fu_out_done_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place.
+# here, we're counting on the top of the stack to be the old place-holders
+# for fp0/fp1 which have already been restored. that way, we can write
+# over those destinations with the shifted stack frame.
+ fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+ mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+ mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+ btst &0x7,(%sp)
+ bne.b fu_out_trace
+
+ bra.l _fpsp_done
+
+fu_out_ena:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b fu_out_exc # there is at least one set
+
+# no exceptions were set.
+# if a disabled overflow occurred and inexact was enabled but the result
+# was exact, then a branch to _real_inex() is made.
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+ beq.w fu_out_done # no
+
+fu_out_ovflchk:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+ beq.w fu_out_done # no
+ bra.w fu_inex # yes
+
+#
+# The fp move out that took the "Unimplemented Data Type" exception was
+# being traced. Since the stack frames are similar, get the "current" PC
+# from FPIAR and put it in the trace stack frame then jump to _real_trace().
+#
+# UNSUPP FRAME TRACE FRAME
+# ***************** *****************
+# * EA * * Current *
+# * * * PC *
+# ***************** *****************
+# * 0x3 * 0x0dc * * 0x2 * 0x024 *
+# ***************** *****************
+# * Next * * Next *
+# * PC * * PC *
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+#
+fu_out_trace:
+ mov.w &0x2024,0x6(%sp)
+ fmov.l %fpiar,0x8(%sp)
+ bra.l _real_trace
+
+# an exception occurred and that exception was enabled.
+fu_out_exc:
+ subi.l &24,%d0 # fix offset to be 0-8
+
+# we don't mess with the existing fsave frame. just re-insert it and
+# jump to the "_real_{}()" handler...
+ mov.w (tbl_fu_out.b,%pc,%d0.w*2),%d0
+ jmp (tbl_fu_out.b,%pc,%d0.w*1)
+
+ swbeg &0x8
+tbl_fu_out:
+ short tbl_fu_out - tbl_fu_out # BSUN can't happen
+ short tbl_fu_out - tbl_fu_out # SNAN can't happen
+ short fu_operr - tbl_fu_out # OPERR
+ short fu_ovfl - tbl_fu_out # OVFL
+ short fu_unfl - tbl_fu_out # UNFL
+ short tbl_fu_out - tbl_fu_out # DZ can't happen
+ short fu_inex - tbl_fu_out # INEX2
+ short tbl_fu_out - tbl_fu_out # INEX1 won't make it here
+
+# for snan,operr,ovfl,unfl, src op is still in FP_SRC so just
+# frestore it.
+fu_snan:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd8
+ mov.w &0xe006,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+
+
+ bra.l _real_snan
+
+fu_operr:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
+ mov.w &0xe004,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+
+
+ bra.l _real_operr
+
+fu_ovfl:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d4,EXC_VOFF(%a6) # vector offset = 0xd4
+ mov.w &0xe005,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6) # restore EXOP
+
+ unlk %a6
+
+ bra.l _real_ovfl
+
+# underflow can happen for extended precision. extended precision opclass
+# three instruction exceptions don't update the stack pointer. so, if the
+# exception occurred from user mode, then simply update a7 and exit normally.
+# if the exception occurred from supervisor mode, check if
+fu_unfl:
+ mov.l EXC_A6(%a6),(%a6) # restore a6
+
+ btst &0x5,EXC_SR(%a6)
+ bne.w fu_unfl_s
+
+ mov.l EXC_A7(%a6),%a0 # restore a7 whether we need
+ mov.l %a0,%usp # to or not...
+
+fu_unfl_cont:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
+ mov.w &0xe003,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6) # restore EXOP
+
+ unlk %a6
+
+ bra.l _real_unfl
+
+fu_unfl_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg # was the <ea> mode -(sp)?
+ bne.b fu_unfl_cont
+
+# the extended precision result is still in fp0. but, we need to save it
+# somewhere on the stack until we can copy it to its final resting place
+# (where the exc frame is currently). make sure it's not at the top of the
+# frame or it will get overwritten when the exc stack frame is shifted "down".
+ fmovm.x &0x80,FP_SRC(%a6) # put answer on stack
+ fmovm.x &0x40,FP_DST(%a6) # put EXOP on stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30cc,EXC_VOFF(%a6) # vector offset = 0xcc
+ mov.w &0xe003,2+FP_DST(%a6)
+
+ frestore FP_DST(%a6) # restore EXOP
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+ mov.l LOCAL_SIZE+FP_SRC_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+ mov.l LOCAL_SIZE+FP_SRC_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_SRC_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+ bra.l _real_unfl
+
+# fmove in and out enter here.
+fu_inex:
+ fmovm.x &0x40,FP_SRC(%a6) # save EXOP to the stack
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
+ mov.w &0xe001,2+FP_SRC(%a6)
+
+ frestore FP_SRC(%a6) # restore EXOP
+
+ unlk %a6
+
+
+ bra.l _real_inex
+
+#########################################################################
+#########################################################################
+fu_in_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field
+ andi.l &0x0ff00ff,USER_FPSR(%a6) # zero exception field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ bsr.l get_packed # fetch packed src operand
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src
+ bsr.l set_tag_x # set src optype tag
+
+ mov.b %d0,STAG(%a6) # save src optype tag
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+
+# bit five of the fp extension word separates the monadic and dyadic operations
+# at this point
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b fu_extract_p # monadic
+ cmpi.b 1+EXC_CMDREG(%a6),&0x3a # is operation an ftst?
+ beq.b fu_extract_p # yes, so it's monadic, too
+
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fu_op2_done_p # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+fu_op2_done_p:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+fu_extract_p:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
+
+ bfextu 1+EXC_CMDREG(%a6){&1:&7},%d1 # extract extension
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+ mov.l (tbl_unsupp.l,%pc,%d1.l*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+# BSUN : none
+# SNAN : all dyadic ops
+# OPERR : fsqrt(-NORM)
+# OVFL : all except ftst,fcmp
+# UNFL : all except ftst,fcmp
+# DZ : fdiv
+# INEX2 : all except ftst,fcmp
+# INEX1 : all
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.w fu_in_ena_p # some are enabled
+
+fu_in_cont_p:
+# fcmp and ftst do not store any result.
+ mov.b 1+EXC_CMDREG(%a6),%d0 # fetch extension
+ andi.b &0x38,%d0 # extract bits 3-5
+ cmpi.b %d0,&0x38 # is instr fcmp or ftst?
+ beq.b fu_in_exit_p # yes
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l store_fpreg # store the result
+
+fu_in_exit_p:
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor?
+ bne.w fu_in_exit_s_p # supervisor
+
+ mov.l EXC_A7(%a6),%a0 # update user a7
+ mov.l %a0,%usp
+
+fu_in_exit_cont_p:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6 # unravel stack frame
+
+ btst &0x7,(%sp) # is trace on?
+ bne.w fu_trace_p # yes
+
+ bra.l _fpsp_done # exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was (a7)+. if so, we'll need to shift the
+# stack frame "up".
+fu_in_exit_s_p:
+ btst &mia7_bit,SPCOND_FLG(%a6) # was ea mode (a7)+
+ beq.b fu_in_exit_cont_p # no
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6 # unravel stack frame
+
+# shift the stack frame "up". we don't really care about the <ea> field.
+ mov.l 0x4(%sp),0x10(%sp)
+ mov.l 0x0(%sp),0xc(%sp)
+ add.l &0xc,%sp
+
+ btst &0x7,(%sp) # is trace on?
+ bne.w fu_trace_p # yes
+
+ bra.l _fpsp_done # exit to os
+
+fu_in_ena_p:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled & set
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b fu_in_exc_p # at least one was set
+
+#
+# No exceptions occurred that were also enabled. Now:
+#
+# if (OVFL && ovfl_disabled && inexact_enabled) {
+# branch to _real_inex() (even if the result was exact!);
+# } else {
+# save the result in the proper fp reg (unless the op is fcmp or ftst);
+# return;
+# }
+#
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # was overflow set?
+ beq.w fu_in_cont_p # no
+
+fu_in_ovflchk_p:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # was inexact enabled?
+ beq.w fu_in_cont_p # no
+ bra.w fu_in_exc_ovfl_p # do _real_inex() now
+
+#
+# An exception occurred and that exception was enabled:
+#
+# shift enabled exception field into lo byte of d0;
+# if (((INEX2 || INEX1) && inex_enabled && OVFL && ovfl_disabled) ||
+# ((INEX2 || INEX1) && inex_enabled && UNFL && unfl_disabled)) {
+# /*
+# * this is the case where we must call _real_inex() now or else
+# * there will be no other way to pass it the exceptional operand
+# */
+# call _real_inex();
+# } else {
+# restore exc state (SNAN||OPERR||OVFL||UNFL||DZ||INEX) into the FPU;
+# }
+#
+fu_in_exc_p:
+ subi.l &24,%d0 # fix offset to be 0-8
+ cmpi.b %d0,&0x6 # is exception INEX? (6 or 7)
+ blt.b fu_in_exc_exit_p # no
+
+# the enabled exception was inexact
+ btst &unfl_bit,FPSR_EXCEPT(%a6) # did disabled underflow occur?
+ bne.w fu_in_exc_unfl_p # yes
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did disabled overflow occur?
+ bne.w fu_in_exc_ovfl_p # yes
+
+# here, we insert the correct fsave status value into the fsave frame for the
+# corresponding exception. the operand in the fsave frame should be the original
+# src operand.
+# as a reminder for future predicted pain and agony, we are passing in fsave the
+# "non-skewed" operand for cases of sgl and dbl src INFs,NANs, and DENORMs.
+# this is INCORRECT for enabled SNAN which would give to the user the skewed SNAN!!!
+fu_in_exc_exit_p:
+ btst &0x5,EXC_SR(%a6) # user or supervisor?
+ bne.w fu_in_exc_exit_s_p # supervisor
+
+ mov.l EXC_A7(%a6),%a0 # update user a7
+ mov.l %a0,%usp
+
+fu_in_exc_exit_cont_p:
+ mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore src op
+
+ unlk %a6
+
+ btst &0x7,(%sp) # is trace enabled?
+ bne.w fu_trace_p # yes
+
+ bra.l _fpsp_done
+
+tbl_except_p:
+ short 0xe000,0xe006,0xe004,0xe005
+ short 0xe003,0xe002,0xe001,0xe001
+
+fu_in_exc_ovfl_p:
+ mov.w &0x3,%d0
+ bra.w fu_in_exc_exit_p
+
+fu_in_exc_unfl_p:
+ mov.w &0x4,%d0
+ bra.w fu_in_exc_exit_p
+
+fu_in_exc_exit_s_p:
+ btst &mia7_bit,SPCOND_FLG(%a6)
+ beq.b fu_in_exc_exit_cont_p
+
+ mov.w (tbl_except_p.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore src op
+
+ unlk %a6 # unravel stack frame
+
+# shift stack frame "up". who cares about <ea> field.
+ mov.l 0x4(%sp),0x10(%sp)
+ mov.l 0x0(%sp),0xc(%sp)
+ add.l &0xc,%sp
+
+ btst &0x7,(%sp) # is trace on?
+ bne.b fu_trace_p # yes
+
+ bra.l _fpsp_done # exit to os
+
+#
+# The opclass two PACKED instruction that took an "Unimplemented Data Type"
+# exception was being traced. Make the "current" PC the FPIAR and put it in the
+# trace stack frame then jump to _real_trace().
+#
+# UNSUPP FRAME TRACE FRAME
+# ***************** *****************
+# * EA * * Current *
+# * * * PC *
+# ***************** *****************
+# * 0x2 * 0x0dc * * 0x2 * 0x024 *
+# ***************** *****************
+# * Next * * Next *
+# * PC * * PC *
+# ***************** *****************
+# * SR * * SR *
+# ***************** *****************
+fu_trace_p:
+ mov.w &0x2024,0x6(%sp)
+ fmov.l %fpiar,0x8(%sp)
+
+ bra.l _real_trace
+
+#########################################################
+#########################################################
+fu_out_pack:
+
+
+# I'm not sure at this point what FPSR bits are valid for this instruction.
+# so, since the emulation routines re-create them anyways, zero exception field.
+# fmove out doesn't affect ccodes.
+ and.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0
+ bsr.l load_fpn1
+
+# unlike other opclass 3, unimplemented data type exceptions, packed must be
+# able to detect all operand types.
+ lea FP_SRC(%a6),%a0
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b fu_op2_p # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+
+fu_op2_p:
+ mov.b %d0,STAG(%a6) # save src optype tag
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # fetch rnd mode/prec
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ mov.l (%a6),EXC_A6(%a6) # in case a6 changes
+ bsr.l fout # call fmove out routine
+
+# Exceptions in order of precedence:
+# BSUN : no
+# SNAN : yes
+# OPERR : if ((k_factor > +17) || (dec. exp exceeds 3 digits))
+# OVFL : no
+# UNFL : no
+# DZ : no
+# INEX2 : yes
+# INEX1 : no
+
+# determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.w fu_out_ena_p # some are enabled
+
+fu_out_exit_p:
+ mov.l EXC_A6(%a6),(%a6) # restore a6
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor?
+ bne.b fu_out_exit_s_p # supervisor
+
+ mov.l EXC_A7(%a6),%a0 # update user a7
+ mov.l %a0,%usp
+
+fu_out_exit_cont_p:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6 # unravel stack frame
+
+ btst &0x7,(%sp) # is trace on?
+ bne.w fu_trace_p # yes
+
+ bra.l _fpsp_done # exit to os
+
+# the exception occurred in supervisor mode. check to see if the
+# addressing mode was -(a7). if so, we'll need to shift the
+# stack frame "down".
+fu_out_exit_s_p:
+ btst &mda7_bit,SPCOND_FLG(%a6) # was ea mode -(a7)
+ beq.b fu_out_exit_cont_p # no
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+
+# now, copy the result to the proper place on the stack
+ mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+EXC_SR+0x0(%sp)
+ mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+EXC_SR+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+EXC_SR+0x8(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+ btst &0x7,(%sp)
+ bne.w fu_trace_p
+
+ bra.l _fpsp_done
+
+fu_out_ena_p:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enabled
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ beq.w fu_out_exit_p
+
+ mov.l EXC_A6(%a6),(%a6) # restore a6
+
+# an exception occurred and that exception was enabled.
+# the only exception possible on packed move out are INEX, OPERR, and SNAN.
+fu_out_exc_p:
+ cmpi.b %d0,&0x1a
+ bgt.w fu_inex_p2
+ beq.w fu_operr_p
+
+fu_snan_p:
+ btst &0x5,EXC_SR(%a6)
+ bne.b fu_snan_s_p
+
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp
+ bra.w fu_snan
+
+fu_snan_s_p:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ bne.w fu_snan
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d8,EXC_VOFF(%a6) # vector offset = 0xd0
+ mov.w &0xe006,2+FP_SRC(%a6) # set fsave status
+
+ frestore FP_SRC(%a6) # restore src operand
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+ mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+ mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+
+ bra.l _real_snan
+
+fu_operr_p:
+ btst &0x5,EXC_SR(%a6)
+ bne.w fu_operr_p_s
+
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp
+ bra.w fu_operr
+
+fu_operr_p_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ bne.w fu_operr
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30d0,EXC_VOFF(%a6) # vector offset = 0xd0
+ mov.w &0xe004,2+FP_SRC(%a6) # set fsave status
+
+ frestore FP_SRC(%a6) # restore src operand
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+ mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+ mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+
+ bra.l _real_operr
+
+fu_inex_p2:
+ btst &0x5,EXC_SR(%a6)
+ bne.w fu_inex_s_p2
+
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp
+ bra.w fu_inex
+
+fu_inex_s_p2:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ bne.w fu_inex
+
+# the instruction was "fmove.p fpn,-(a7)" from supervisor mode.
+# the strategy is to move the exception frame "down" 12 bytes. then, we
+# can store the default result where the exception frame was.
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0/fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.w &0x30c4,EXC_VOFF(%a6) # vector offset = 0xc4
+ mov.w &0xe001,2+FP_SRC(%a6) # set fsave status
+
+ frestore FP_SRC(%a6) # restore src operand
+
+ mov.l (%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+2+EXC_PC(%sp),LOCAL_SIZE+2+EXC_PC-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+# now, we copy the default result to its proper location
+ mov.l LOCAL_SIZE+FP_DST_EX(%sp),LOCAL_SIZE+0x4(%sp)
+ mov.l LOCAL_SIZE+FP_DST_HI(%sp),LOCAL_SIZE+0x8(%sp)
+ mov.l LOCAL_SIZE+FP_DST_LO(%sp),LOCAL_SIZE+0xc(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+
+ bra.l _real_inex
+
+#########################################################################
+
+#
+# if we're stuffing a source operand back into an fsave frame then we
+# have to make sure that for single or double source operands that the
+# format stuffed is as weird as the hardware usually makes it.
+#
+ global funimp_skew
+funimp_skew:
+ bfextu EXC_EXTWORD(%a6){&3:&3},%d0 # extract src specifier
+ cmpi.b %d0,&0x1 # was src sgl?
+ beq.b funimp_skew_sgl # yes
+ cmpi.b %d0,&0x5 # was src dbl?
+ beq.b funimp_skew_dbl # yes
+ rts
+
+funimp_skew_sgl:
+ mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
+ andi.w &0x7fff,%d0 # strip sign
+ beq.b funimp_skew_sgl_not
+ cmpi.w %d0,&0x3f80
+ bgt.b funimp_skew_sgl_not
+ neg.w %d0 # make exponent negative
+ addi.w &0x3f81,%d0 # find amt to shift
+ mov.l FP_SRC_HI(%a6),%d1 # fetch DENORM hi(man)
+ lsr.l %d0,%d1 # shift it
+ bset &31,%d1 # set j-bit
+ mov.l %d1,FP_SRC_HI(%a6) # insert new hi(man)
+ andi.w &0x8000,FP_SRC_EX(%a6) # clear old exponent
+ ori.w &0x3f80,FP_SRC_EX(%a6) # insert new "skewed" exponent
+funimp_skew_sgl_not:
+ rts
+
+funimp_skew_dbl:
+ mov.w FP_SRC_EX(%a6),%d0 # fetch DENORM exponent
+ andi.w &0x7fff,%d0 # strip sign
+ beq.b funimp_skew_dbl_not
+ cmpi.w %d0,&0x3c00
+ bgt.b funimp_skew_dbl_not
+
+ tst.b FP_SRC_EX(%a6) # make "internal format"
+ smi.b 0x2+FP_SRC(%a6)
+ mov.w %d0,FP_SRC_EX(%a6) # insert exponent with cleared sign
+ clr.l %d0 # clear g,r,s
+ lea FP_SRC(%a6),%a0 # pass ptr to src op
+ mov.w &0x3c01,%d1 # pass denorm threshold
+ bsr.l dnrm_lp # denorm it
+ mov.w &0x3c00,%d0 # new exponent
+ tst.b 0x2+FP_SRC(%a6) # is sign set?
+ beq.b fss_dbl_denorm_done # no
+ bset &15,%d0 # set sign
+fss_dbl_denorm_done:
+ bset &0x7,FP_SRC_HI(%a6) # set j-bit
+ mov.w %d0,FP_SRC_EX(%a6) # insert new exponent
+funimp_skew_dbl_not:
+ rts
+
+#########################################################################
+ global _mem_write2
+_mem_write2:
+ btst &0x5,EXC_SR(%a6)
+ beq.l _dmem_write
+ mov.l 0x0(%a0),FP_DST_EX(%a6)
+ mov.l 0x4(%a0),FP_DST_HI(%a6)
+ mov.l 0x8(%a0),FP_DST_LO(%a6)
+ clr.l %d1
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_effadd(): 060FPSP entry point for FP "Unimplemented #
+# effective address" exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Unimplemented Effective Address exception in an operating #
+# system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# decbin() - convert packed data to FP binary data #
+# _real_fpu_disabled() - "callout" for "FPU disabled" exception #
+# _real_access() - "callout" for access error exception #
+# _mem_read() - read extended immediate operand from memory #
+# _fpsp_done() - "callout" for exit; work all done #
+# _real_trace() - "callout" for Trace enabled exception #
+# fmovm_dynamic() - emulate dynamic fmovm instruction #
+# fmovm_ctrl() - emulate fmovm control instruction #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the "Unimplemented <ea>" stk frame #
+# #
+# OUTPUT ************************************************************** #
+# If access error: #
+# - The system stack is changed to an access error stack frame #
+# If FPU disabled: #
+# - The system stack is changed to an FPU disabled stack frame #
+# If Trace exception enabled: #
+# - The system stack is changed to a Trace exception stack frame #
+# Else: (normal case) #
+# - None (correct result has been stored as appropriate) #
+# #
+# ALGORITHM *********************************************************** #
+# This exception handles 3 types of operations: #
+# (1) FP Instructions using extended precision or packed immediate #
+# addressing mode. #
+# (2) The "fmovm.x" instruction w/ dynamic register specification. #
+# (3) The "fmovm.l" instruction w/ 2 or 3 control registers. #
+# #
+# For immediate data operations, the data is read in w/ a #
+# _mem_read() "callout", converted to FP binary (if packed), and used #
+# as the source operand to the instruction specified by the instruction #
+# word. If no FP exception should be reported ads a result of the #
+# emulation, then the result is stored to the destination register and #
+# the handler exits through _fpsp_done(). If an enabled exc has been #
+# signalled as a result of emulation, then an fsave state frame #
+# corresponding to the FP exception type must be entered into the 060 #
+# FPU before exiting. In either the enabled or disabled cases, we #
+# must also check if a Trace exception is pending, in which case, we #
+# must create a Trace exception stack frame from the current exception #
+# stack frame. If no Trace is pending, we simply exit through #
+# _fpsp_done(). #
+# For "fmovm.x", call the routine fmovm_dynamic() which will #
+# decode and emulate the instruction. No FP exceptions can be pending #
+# as a result of this operation emulation. A Trace exception can be #
+# pending, though, which means the current stack frame must be changed #
+# to a Trace stack frame and an exit made through _real_trace(). #
+# For the case of "fmovm.x Dn,-(a7)", where the offending instruction #
+# was executed from supervisor mode, this handler must store the FP #
+# register file values to the system stack by itself since #
+# fmovm_dynamic() can't handle this. A normal exit is made through #
+# fpsp_done(). #
+# For "fmovm.l", fmovm_ctrl() is used to emulate the instruction. #
+# Again, a Trace exception may be pending and an exit made through #
+# _real_trace(). Else, a normal exit is made through _fpsp_done(). #
+# #
+# Before any of the above is attempted, it must be checked to #
+# see if the FPU is disabled. Since the "Unimp <ea>" exception is taken #
+# before the "FPU disabled" exception, but the "FPU disabled" exception #
+# has higher priority, we check the disabled bit in the PCR. If set, #
+# then we must create an 8 word "FPU disabled" exception stack frame #
+# from the current 4 word exception stack frame. This includes #
+# reproducing the effective address of the instruction to put on the #
+# new stack frame. #
+# #
+# In the process of all emulation work, if a _mem_read() #
+# "callout" returns a failing result indicating an access error, then #
+# we must create an access error stack frame from the current stack #
+# frame. This information includes a faulting address and a fault- #
+# status-longword. These are created within this handler. #
+# #
+#########################################################################
+
+ global _fpsp_effadd
+_fpsp_effadd:
+
+# This exception type takes priority over the "Line F Emulator"
+# exception. Therefore, the FPU could be disabled when entering here.
+# So, we must check to see if it's disabled and handle that case separately.
+ mov.l %d0,-(%sp) # save d0
+ movc %pcr,%d0 # load proc cr
+ btst &0x1,%d0 # is FPU disabled?
+ bne.w iea_disabled # yes
+ mov.l (%sp)+,%d0 # restore d0
+
+ link %a6,&-LOCAL_SIZE # init stack frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# PC of instruction that took the exception is the PC in the frame
+ mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
+
+#########################################################################
+
+ tst.w %d0 # is operation fmovem?
+ bmi.w iea_fmovm # yes
+
+#
+# here, we will have:
+# fabs fdabs fsabs facos fmod
+# fadd fdadd fsadd fasin frem
+# fcmp fatan fscale
+# fdiv fddiv fsdiv fatanh fsin
+# fint fcos fsincos
+# fintrz fcosh fsinh
+# fmove fdmove fsmove fetox ftan
+# fmul fdmul fsmul fetoxm1 ftanh
+# fneg fdneg fsneg fgetexp ftentox
+# fsgldiv fgetman ftwotox
+# fsglmul flog10
+# fsqrt flog2
+# fsub fdsub fssub flogn
+# ftst flognp1
+# which can all use f<op>.{x,p}
+# so, now it's immediate data extended precision AND PACKED FORMAT!
+#
+iea_op:
+ andi.l &0x00ff00ff,USER_FPSR(%a6)
+
+ btst &0xa,%d0 # is src fmt x or p?
+ bne.b iea_op_pack # packed
+
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
+ lea FP_SRC(%a6),%a1 # pass: ptr to super addr
+ mov.l &0xc,%d0 # pass: 12 bytes
+ bsr.l _imem_read # read extended immediate
+
+ tst.l %d1 # did ifetch fail?
+ bne.w iea_iacc # yes
+
+ bra.b iea_op_setsrc
+
+iea_op_pack:
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # pass: ptr to #<data>
+ lea FP_SRC(%a6),%a1 # pass: ptr to super dst
+ mov.l &0xc,%d0 # pass: 12 bytes
+ bsr.l _imem_read # read packed operand
+
+ tst.l %d1 # did ifetch fail?
+ bne.w iea_iacc # yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+ bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
+ cmpi.w %d0,&0x7fff # INF or NAN?
+ beq.b iea_op_setsrc # operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+ mov.b 3+FP_SRC(%a6),%d0 # get byte 4
+ andi.b &0x0f,%d0 # clear all but last nybble
+ bne.b iea_op_gp_not_spec # not a zero
+ tst.l FP_SRC_HI(%a6) # is lw 2 zero?
+ bne.b iea_op_gp_not_spec # not a zero
+ tst.l FP_SRC_LO(%a6) # is lw 3 zero?
+ beq.b iea_op_setsrc # operand is a ZERO
+iea_op_gp_not_spec:
+ lea FP_SRC(%a6),%a0 # pass: ptr to packed op
+ bsr.l decbin # convert to extended
+ fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
+
+iea_op_setsrc:
+ addi.l &0xc,EXC_EXTWPTR(%a6) # update extension word pointer
+
+# FP_SRC now holds the src operand.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,STAG(%a6) # could be ANYTHING!!!
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b iea_op_getdst # no
+ bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
+ mov.b %d0,STAG(%a6) # set new optype tag
+iea_op_getdst:
+ clr.b STORE_FLG(%a6) # clear "store result" boolean
+
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b iea_op_extract # monadic
+ btst &0x4,1+EXC_CMDREG(%a6) # is operation fsincos,ftst,fcmp?
+ bne.b iea_op_spec # yes
+
+iea_op_loaddst:
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+ bsr.l load_fpn2 # load dst operand
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,DTAG(%a6) # could be ANYTHING!!!
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b iea_op_extract # no
+ bsr.l unnorm_fix # yes; convert to NORM/DENORM/ZERO
+ mov.b %d0,DTAG(%a6) # set new optype tag
+ bra.b iea_op_extract
+
+# the operation is fsincos, ftst, or fcmp. only fcmp is dyadic
+iea_op_spec:
+ btst &0x3,1+EXC_CMDREG(%a6) # is operation fsincos?
+ beq.b iea_op_extract # yes
+# now, we're left with ftst and fcmp. so, first let's tag them so that they don't
+# store a result. then, only fcmp will branch back and pick up a dst operand.
+ st STORE_FLG(%a6) # don't store a final result
+ btst &0x1,1+EXC_CMDREG(%a6) # is operation fcmp?
+ beq.b iea_op_loaddst # yes
+
+iea_op_extract:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass: rnd mode,prec
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x007f,%d1 # extract extension
+
+ fmov.l &0x0,%fpcr
+ fmov.l &0x0,%fpsr
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+ mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+#
+# Exceptions in order of precedence:
+# BSUN : none
+# SNAN : all operations
+# OPERR : all reg-reg or mem-reg operations that can normally operr
+# OVFL : same as OPERR
+# UNFL : same as OPERR
+# DZ : same as OPERR
+# INEX2 : same as OPERR
+# INEX1 : all packed immediate operations
+#
+
+# we determine the highest priority exception(if any) set by the
+# emulation routine that has also been enabled by the user.
+ mov.b FPCR_ENABLE(%a6),%d0 # fetch exceptions enabled
+ bne.b iea_op_ena # some are enabled
+
+# now, we save the result, unless, of course, the operation was ftst or fcmp.
+# these don't save results.
+iea_op_save:
+ tst.b STORE_FLG(%a6) # does this op store a result?
+ bne.b iea_op_exit1 # exit with no frestore
+
+iea_op_store:
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # fetch dst regno
+ bsr.l store_fpreg # store the result
+
+iea_op_exit1:
+ mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+ mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6 # unravel the frame
+
+ btst &0x7,(%sp) # is trace on?
+ bne.w iea_op_trace # yes
+
+ bra.l _fpsp_done # exit to os
+
+iea_op_ena:
+ and.b FPSR_EXCEPT(%a6),%d0 # keep only ones enable and set
+ bfffo %d0{&24:&8},%d0 # find highest priority exception
+ bne.b iea_op_exc # at least one was set
+
+# no exception occurred. now, did a disabled, exact overflow occur with inexact
+# enabled? if so, then we have to stuff an overflow frame into the FPU.
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+ beq.b iea_op_save
+
+iea_op_ovfl:
+ btst &inex2_bit,FPCR_ENABLE(%a6) # is inexact enabled?
+ beq.b iea_op_store # no
+ bra.b iea_op_exc_ovfl # yes
+
+# an enabled exception occurred. we have to insert the exception type back into
+# the machine.
+iea_op_exc:
+ subi.l &24,%d0 # fix offset to be 0-8
+ cmpi.b %d0,&0x6 # is exception INEX?
+ bne.b iea_op_exc_force # no
+
+# the enabled exception was inexact. so, if it occurs with an overflow
+# or underflow that was disabled, then we have to force an overflow or
+# underflow frame.
+ btst &ovfl_bit,FPSR_EXCEPT(%a6) # did overflow occur?
+ bne.b iea_op_exc_ovfl # yes
+ btst &unfl_bit,FPSR_EXCEPT(%a6) # did underflow occur?
+ bne.b iea_op_exc_unfl # yes
+
+iea_op_exc_force:
+ mov.w (tbl_iea_except.b,%pc,%d0.w*2),2+FP_SRC(%a6)
+ bra.b iea_op_exit2 # exit with frestore
+
+tbl_iea_except:
+ short 0xe002, 0xe006, 0xe004, 0xe005
+ short 0xe003, 0xe002, 0xe001, 0xe001
+
+iea_op_exc_ovfl:
+ mov.w &0xe005,2+FP_SRC(%a6)
+ bra.b iea_op_exit2
+
+iea_op_exc_unfl:
+ mov.w &0xe003,2+FP_SRC(%a6)
+
+iea_op_exit2:
+ mov.l EXC_PC(%a6),USER_FPIAR(%a6) # set FPIAR to "Current PC"
+ mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set "Next PC" in exc frame
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6) # restore exceptional state
+
+ unlk %a6 # unravel the frame
+
+ btst &0x7,(%sp) # is trace on?
+ bne.b iea_op_trace # yes
+
+ bra.l _fpsp_done # exit to os
+
+#
+# The opclass two instruction that took an "Unimplemented Effective Address"
+# exception was being traced. Make the "current" PC the FPIAR and put it in
+# the trace stack frame then jump to _real_trace().
+#
+# UNIMP EA FRAME TRACE FRAME
+# ***************** *****************
+# * 0x0 * 0x0f0 * * Current *
+# ***************** * PC *
+# * Current * *****************
+# * PC * * 0x2 * 0x024 *
+# ***************** *****************
+# * SR * * Next *
+# ***************** * PC *
+# *****************
+# * SR *
+# *****************
+iea_op_trace:
+ mov.l (%sp),-(%sp) # shift stack frame "down"
+ mov.w 0x8(%sp),0x4(%sp)
+ mov.w &0x2024,0x6(%sp) # stk fmt = 0x2; voff = 0x024
+ fmov.l %fpiar,0x8(%sp) # "Current PC" is in FPIAR
+
+ bra.l _real_trace
+
+#########################################################################
+iea_fmovm:
+ btst &14,%d0 # ctrl or data reg
+ beq.w iea_fmovm_ctrl
+
+iea_fmovm_data:
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor mode
+ bne.b iea_fmovm_data_s
+
+iea_fmovm_data_u:
+ mov.l %usp,%a0
+ mov.l %a0,EXC_A7(%a6) # store current a7
+ bsr.l fmovm_dynamic # do dynamic fmovm
+ mov.l EXC_A7(%a6),%a0 # load possibly new a7
+ mov.l %a0,%usp # update usp
+ bra.w iea_fmovm_exit
+
+iea_fmovm_data_s:
+ clr.b SPCOND_FLG(%a6)
+ lea 0x2+EXC_VOFF(%a6),%a0
+ mov.l %a0,EXC_A7(%a6)
+ bsr.l fmovm_dynamic # do dynamic fmovm
+
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ beq.w iea_fmovm_data_predec
+ cmpi.b SPCOND_FLG(%a6),&mia7_flg
+ bne.w iea_fmovm_exit
+
+# right now, d0 = the size.
+# the data has been fetched from the supervisor stack, but we have not
+# incremented the stack pointer by the appropriate number of bytes.
+# do it here.
+iea_fmovm_data_postinc:
+ btst &0x7,EXC_SR(%a6)
+ bne.b iea_fmovm_data_pi_trace
+
+ mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
+ mov.l EXC_EXTWPTR(%a6),(EXC_PC,%a6,%d0)
+ mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
+
+ lea (EXC_SR,%a6,%d0),%a0
+ mov.l %a0,EXC_SR(%a6)
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+ mov.l (%sp)+,%sp
+ bra.l _fpsp_done
+
+iea_fmovm_data_pi_trace:
+ mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+ mov.l EXC_EXTWPTR(%a6),(EXC_PC-0x4,%a6,%d0)
+ mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
+ mov.l EXC_PC(%a6),(EXC_VOFF+0x2-0x4,%a6,%d0)
+
+ lea (EXC_SR-0x4,%a6,%d0),%a0
+ mov.l %a0,EXC_SR(%a6)
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+ mov.l (%sp)+,%sp
+ bra.l _real_trace
+
+# right now, d1 = size and d0 = the strg.
+iea_fmovm_data_predec:
+ mov.b %d1,EXC_VOFF(%a6) # store strg
+ mov.b %d0,0x1+EXC_VOFF(%a6) # store size
+
+ fmovm.x EXC_FP0(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ mov.l (%a6),-(%sp) # make a copy of a6
+ mov.l %d0,-(%sp) # save d0
+ mov.l %d1,-(%sp) # save d1
+ mov.l EXC_EXTWPTR(%a6),-(%sp) # make a copy of Next PC
+
+ clr.l %d0
+ mov.b 0x1+EXC_VOFF(%a6),%d0 # fetch size
+ neg.l %d0 # get negative of size
+
+ btst &0x7,EXC_SR(%a6) # is trace enabled?
+ beq.b iea_fmovm_data_p2
+
+ mov.w EXC_SR(%a6),(EXC_SR-0x4,%a6,%d0)
+ mov.l EXC_PC(%a6),(EXC_VOFF-0x2,%a6,%d0)
+ mov.l (%sp)+,(EXC_PC-0x4,%a6,%d0)
+ mov.w &0x2024,(EXC_VOFF-0x4,%a6,%d0)
+
+ pea (%a6,%d0) # create final sp
+ bra.b iea_fmovm_data_p3
+
+iea_fmovm_data_p2:
+ mov.w EXC_SR(%a6),(EXC_SR,%a6,%d0)
+ mov.l (%sp)+,(EXC_PC,%a6,%d0)
+ mov.w &0x00f0,(EXC_VOFF,%a6,%d0)
+
+ pea (0x4,%a6,%d0) # create final sp
+
+iea_fmovm_data_p3:
+ clr.l %d1
+ mov.b EXC_VOFF(%a6),%d1 # fetch strg
+
+ tst.b %d1
+ bpl.b fm_1
+ fmovm.x &0x80,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_1:
+ lsl.b &0x1,%d1
+ bpl.b fm_2
+ fmovm.x &0x40,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_2:
+ lsl.b &0x1,%d1
+ bpl.b fm_3
+ fmovm.x &0x20,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_3:
+ lsl.b &0x1,%d1
+ bpl.b fm_4
+ fmovm.x &0x10,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_4:
+ lsl.b &0x1,%d1
+ bpl.b fm_5
+ fmovm.x &0x08,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_5:
+ lsl.b &0x1,%d1
+ bpl.b fm_6
+ fmovm.x &0x04,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_6:
+ lsl.b &0x1,%d1
+ bpl.b fm_7
+ fmovm.x &0x02,(0x4+0x8,%a6,%d0)
+ addi.l &0xc,%d0
+fm_7:
+ lsl.b &0x1,%d1
+ bpl.b fm_end
+ fmovm.x &0x01,(0x4+0x8,%a6,%d0)
+fm_end:
+ mov.l 0x4(%sp),%d1
+ mov.l 0x8(%sp),%d0
+ mov.l 0xc(%sp),%a6
+ mov.l (%sp)+,%sp
+
+ btst &0x7,(%sp) # is trace enabled?
+ beq.l _fpsp_done
+ bra.l _real_trace
+
+#########################################################################
+iea_fmovm_ctrl:
+
+ bsr.l fmovm_ctrl # load ctrl regs
+
+iea_fmovm_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ btst &0x7,EXC_SR(%a6) # is trace on?
+ bne.b iea_fmovm_trace # yes
+
+ mov.l EXC_EXTWPTR(%a6),EXC_PC(%a6) # set Next PC
+
+ unlk %a6 # unravel the frame
+
+ bra.l _fpsp_done # exit to os
+
+#
+# The control reg instruction that took an "Unimplemented Effective Address"
+# exception was being traced. The "Current PC" for the trace frame is the
+# PC stacked for Unimp EA. The "Next PC" is in EXC_EXTWPTR.
+# After fixing the stack frame, jump to _real_trace().
+#
+# UNIMP EA FRAME TRACE FRAME
+# ***************** *****************
+# * 0x0 * 0x0f0 * * Current *
+# ***************** * PC *
+# * Current * *****************
+# * PC * * 0x2 * 0x024 *
+# ***************** *****************
+# * SR * * Next *
+# ***************** * PC *
+# *****************
+# * SR *
+# *****************
+# this ain't a pretty solution, but it works:
+# -restore a6 (not with unlk)
+# -shift stack frame down over where old a6 used to be
+# -add LOCAL_SIZE to stack pointer
+iea_fmovm_trace:
+ mov.l (%a6),%a6 # restore frame pointer
+ mov.w EXC_SR+LOCAL_SIZE(%sp),0x0+LOCAL_SIZE(%sp)
+ mov.l EXC_PC+LOCAL_SIZE(%sp),0x8+LOCAL_SIZE(%sp)
+ mov.l EXC_EXTWPTR+LOCAL_SIZE(%sp),0x2+LOCAL_SIZE(%sp)
+ mov.w &0x2024,0x6+LOCAL_SIZE(%sp) # stk fmt = 0x2; voff = 0x024
+ add.l &LOCAL_SIZE,%sp # clear stack frame
+
+ bra.l _real_trace
+
+#########################################################################
+# The FPU is disabled and so we should really have taken the "Line
+# F Emulator" exception. So, here we create an 8-word stack frame
+# from our 4-word stack frame. This means we must calculate the length
+# the faulting instruction to get the "next PC". This is trivial for
+# immediate operands but requires some extra work for fmovm dynamic
+# which can use most addressing modes.
+iea_disabled:
+ mov.l (%sp)+,%d0 # restore d0
+
+ link %a6,&-LOCAL_SIZE # init stack frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+
+# PC of instruction that took the exception is the PC in the frame
+ mov.l EXC_PC(%a6),EXC_EXTWPTR(%a6)
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6) # store OPWORD and EXTWORD
+
+ tst.w %d0 # is instr fmovm?
+ bmi.b iea_dis_fmovm # yes
+# instruction is using an extended precision immediate operand. therefore,
+# the total instruction length is 16 bytes.
+iea_dis_immed:
+ mov.l &0x10,%d0 # 16 bytes of instruction
+ bra.b iea_dis_cont
+iea_dis_fmovm:
+ btst &0xe,%d0 # is instr fmovm ctrl
+ bne.b iea_dis_fmovm_data # no
+# the instruction is a fmovm.l with 2 or 3 registers.
+ bfextu %d0{&19:&3},%d1
+ mov.l &0xc,%d0
+ cmpi.b %d1,&0x7 # move all regs?
+ bne.b iea_dis_cont
+ addq.l &0x4,%d0
+ bra.b iea_dis_cont
+# the instruction is an fmovm.x dynamic which can use many addressing
+# modes and thus can have several different total instruction lengths.
+# call fmovm_calc_ea which will go through the ea calc process and,
+# as a by-product, will tell us how long the instruction is.
+iea_dis_fmovm_data:
+ clr.l %d0
+ bsr.l fmovm_calc_ea
+ mov.l EXC_EXTWPTR(%a6),%d0
+ sub.l EXC_PC(%a6),%d0
+iea_dis_cont:
+ mov.w %d0,EXC_VOFF(%a6) # store stack shift value
+
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+# here, we actually create the 8-word frame from the 4-word frame,
+# with the "next PC" as additional info.
+# the <ea> field is let as undefined.
+ subq.l &0x8,%sp # make room for new stack
+ mov.l %d0,-(%sp) # save d0
+ mov.w 0xc(%sp),0x4(%sp) # move SR
+ mov.l 0xe(%sp),0x6(%sp) # move Current PC
+ clr.l %d0
+ mov.w 0x12(%sp),%d0
+ mov.l 0x6(%sp),0x10(%sp) # move Current PC
+ add.l %d0,0x6(%sp) # make Next PC
+ mov.w &0x402c,0xa(%sp) # insert offset,frame format
+ mov.l (%sp)+,%d0 # restore d0
+
+ bra.l _real_fpu_disabled
+
+##########
+
+iea_iacc:
+ movc %pcr,%d0
+ btst &0x1,%d0
+ bne.b iea_iacc_cont
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
+iea_iacc_cont:
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ subq.w &0x8,%sp # make stack frame bigger
+ mov.l 0x8(%sp),(%sp) # store SR,hi(PC)
+ mov.w 0xc(%sp),0x4(%sp) # store lo(PC)
+ mov.w &0x4008,0x6(%sp) # store voff
+ mov.l 0x2(%sp),0x8(%sp) # store ea
+ mov.l &0x09428001,0xc(%sp) # store fslw
+
+iea_acc_done:
+ btst &0x5,(%sp) # user or supervisor mode?
+ beq.b iea_acc_done2 # user
+ bset &0x2,0xd(%sp) # set supervisor TM bit
+
+iea_acc_done2:
+ bra.l _real_access
+
+iea_dacc:
+ lea -LOCAL_SIZE(%a6),%sp
+
+ movc %pcr,%d1
+ btst &0x1,%d1
+ bne.b iea_dacc_cont
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1 on stack
+ fmovm.l LOCAL_SIZE+USER_FPCR(%sp),%fpcr,%fpsr,%fpiar # restore ctrl regs
+iea_dacc_cont:
+ mov.l (%a6),%a6
+
+ mov.l 0x4+LOCAL_SIZE(%sp),-0x8+0x4+LOCAL_SIZE(%sp)
+ mov.w 0x8+LOCAL_SIZE(%sp),-0x8+0x8+LOCAL_SIZE(%sp)
+ mov.w &0x4008,-0x8+0xa+LOCAL_SIZE(%sp)
+ mov.l %a0,-0x8+0xc+LOCAL_SIZE(%sp)
+ mov.w %d0,-0x8+0x10+LOCAL_SIZE(%sp)
+ mov.w &0x0001,-0x8+0x12+LOCAL_SIZE(%sp)
+
+ movm.l LOCAL_SIZE+EXC_DREGS(%sp),&0x0303 # restore d0-d1/a0-a1
+ add.w &LOCAL_SIZE-0x4,%sp
+
+ bra.b iea_acc_done
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_operr(): 060FPSP entry point for FP Operr exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Operand Error exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# _real_operr() - "callout" to operating system operr handler #
+# _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
+# store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
+# facc_out_{b,w,l}() - store to memory took access error (opcl 3) #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP Operr exception frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# No access error: #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# #
+# ALGORITHM *********************************************************** #
+# In a system where the FP Operr exception is enabled, the goal #
+# is to get to the handler specified at _real_operr(). But, on the 060, #
+# for opclass zero and two instruction taking this exception, the #
+# input operand in the fsave frame may be incorrect for some cases #
+# and needs to be corrected. This handler calls fix_skewed_ops() to #
+# do just this and then exits through _real_operr(). #
+# For opclass 3 instructions, the 060 doesn't store the default #
+# operr result out to memory or data register file as it should. #
+# This code must emulate the move out before finally exiting through #
+# _real_inex(). The move out, if to memory, is performed using #
+# _mem_write() "callout" routines that may return a failing result. #
+# In this special case, the handler must exit through facc_out() #
+# which creates an access error stack frame from the current operr #
+# stack frame. #
+# #
+#########################################################################
+
+ global _fpsp_operr
+_fpsp_operr:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &13,%d0 # is instr an fmove out?
+ bne.b foperr_out # fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed, but can't
+# cause an operr so we don't need to check for them here.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+foperr_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+ bra.l _real_operr
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# operand error exceptions. we do this here before passing control to
+# the user operand error handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+# although packed opclass three operations can take operand error
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_operr() if necessary.
+#
+foperr_out:
+
+ mov.w FP_SRC_EX(%a6),%d1 # fetch exponent
+ andi.w &0x7fff,%d1
+ cmpi.w %d1,&0x7fff
+ bne.b foperr_out_not_qnan
+# the operand is either an infinity or a QNAN.
+ tst.l FP_SRC_LO(%a6)
+ bne.b foperr_out_qnan
+ mov.l FP_SRC_HI(%a6),%d1
+ andi.l &0x7fffffff,%d1
+ beq.b foperr_out_not_qnan
+foperr_out_qnan:
+ mov.l FP_SRC_HI(%a6),L_SCR1(%a6)
+ bra.b foperr_out_jmp
+
+foperr_out_not_qnan:
+ mov.l &0x7fffffff,%d1
+ tst.b FP_SRC_EX(%a6)
+ bpl.b foperr_out_not_qnan2
+ addq.l &0x1,%d1
+foperr_out_not_qnan2:
+ mov.l %d1,L_SCR1(%a6)
+
+foperr_out_jmp:
+ bfextu %d0{&19:&3},%d0 # extract dst format field
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
+ mov.w (tbl_operr.b,%pc,%d0.w*2),%a0
+ jmp (tbl_operr.b,%pc,%a0)
+
+tbl_operr:
+ short foperr_out_l - tbl_operr # long word integer
+ short tbl_operr - tbl_operr # sgl prec shouldn't happen
+ short tbl_operr - tbl_operr # ext prec shouldn't happen
+ short foperr_exit - tbl_operr # packed won't enter here
+ short foperr_out_w - tbl_operr # word integer
+ short tbl_operr - tbl_operr # dbl prec shouldn't happen
+ short foperr_out_b - tbl_operr # byte integer
+ short tbl_operr - tbl_operr # packed won't enter here
+
+foperr_out_b:
+ mov.b L_SCR1(%a6),%d0 # load positive default result
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b foperr_out_b_save_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_byte # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_b # yes
+
+ bra.w foperr_exit
+foperr_out_b_save_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_b # store result to regfile
+ bra.w foperr_exit
+
+foperr_out_w:
+ mov.w L_SCR1(%a6),%d0 # load positive default result
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b foperr_out_w_save_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_word # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_w # yes
+
+ bra.w foperr_exit
+foperr_out_w_save_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_w # store result to regfile
+ bra.w foperr_exit
+
+foperr_out_l:
+ mov.l L_SCR1(%a6),%d0 # load positive default result
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b foperr_out_l_save_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_long # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.w foperr_exit
+foperr_out_l_save_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_l # store result to regfile
+ bra.w foperr_exit
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_snan(): 060FPSP entry point for FP SNAN exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Signalling NAN exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# _real_snan() - "callout" to operating system SNAN handler #
+# _dmem_write_{byte,word,long}() - store data to mem (opclass 3) #
+# store_dreg_{b,w,l}() - store data to data regfile (opclass 3) #
+# facc_out_{b,w,l,d,x}() - store to mem took acc error (opcl 3) #
+# _calc_ea_fout() - fix An if <ea> is -() or ()+; also get <ea> #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP SNAN exception frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# No access error: #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# #
+# ALGORITHM *********************************************************** #
+# In a system where the FP SNAN exception is enabled, the goal #
+# is to get to the handler specified at _real_snan(). But, on the 060, #
+# for opclass zero and two instructions taking this exception, the #
+# input operand in the fsave frame may be incorrect for some cases #
+# and needs to be corrected. This handler calls fix_skewed_ops() to #
+# do just this and then exits through _real_snan(). #
+# For opclass 3 instructions, the 060 doesn't store the default #
+# SNAN result out to memory or data register file as it should. #
+# This code must emulate the move out before finally exiting through #
+# _real_snan(). The move out, if to memory, is performed using #
+# _mem_write() "callout" routines that may return a failing result. #
+# In this special case, the handler must exit through facc_out() #
+# which creates an access error stack frame from the current SNAN #
+# stack frame. #
+# For the case of an extended precision opclass 3 instruction, #
+# if the effective addressing mode was -() or ()+, then the address #
+# register must get updated by calling _calc_ea_fout(). If the <ea> #
+# was -(a7) from supervisor mode, then the exception frame currently #
+# on the system stack must be carefully moved "down" to make room #
+# for the operand being moved. #
+# #
+#########################################################################
+
+ global _fpsp_snan
+_fpsp_snan:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &13,%d0 # is instr an fmove out?
+ bne.w fsnan_out # fmove out
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source infinity or
+# denorm operand in the sgl or dbl format. NANs also become skewed and must be
+# fixed here.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+fsnan_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+ bra.l _real_snan
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# snan exceptions. we do this here before passing control to
+# the user snan handler.
+#
+# byte, word, long, and packed destination format operations can pass
+# through here. since packed format operations already were handled by
+# fpsp_unsupp(), then we need to do nothing else for them here.
+# for byte, word, and long, we simply need to test the sign of the src
+# operand and save the appropriate minimum or maximum integer value
+# to the effective address as pointed to by the stacked effective address.
+#
+fsnan_out:
+
+ bfextu %d0{&19:&3},%d0 # extract dst format field
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract <ea> mode,reg
+ mov.w (tbl_snan.b,%pc,%d0.w*2),%a0
+ jmp (tbl_snan.b,%pc,%a0)
+
+tbl_snan:
+ short fsnan_out_l - tbl_snan # long word integer
+ short fsnan_out_s - tbl_snan # sgl prec shouldn't happen
+ short fsnan_out_x - tbl_snan # ext prec shouldn't happen
+ short tbl_snan - tbl_snan # packed needs no help
+ short fsnan_out_w - tbl_snan # word integer
+ short fsnan_out_d - tbl_snan # dbl prec shouldn't happen
+ short fsnan_out_b - tbl_snan # byte integer
+ short tbl_snan - tbl_snan # packed needs no help
+
+fsnan_out_b:
+ mov.b FP_SRC_HI(%a6),%d0 # load upper byte of SNAN
+ bset &6,%d0 # set SNAN bit
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b fsnan_out_b_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_byte # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_b # yes
+
+ bra.w fsnan_exit
+fsnan_out_b_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_b # store result to regfile
+ bra.w fsnan_exit
+
+fsnan_out_w:
+ mov.w FP_SRC_HI(%a6),%d0 # load upper word of SNAN
+ bset &14,%d0 # set SNAN bit
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b fsnan_out_w_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_word # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_w # yes
+
+ bra.w fsnan_exit
+fsnan_out_w_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_w # store result to regfile
+ bra.w fsnan_exit
+
+fsnan_out_l:
+ mov.l FP_SRC_HI(%a6),%d0 # load upper longword of SNAN
+ bset &30,%d0 # set SNAN bit
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b fsnan_out_l_dn # yes
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_long # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.w fsnan_exit
+fsnan_out_l_dn:
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_l # store result to regfile
+ bra.w fsnan_exit
+
+fsnan_out_s:
+ cmpi.b %d1,&0x7 # is <ea> mode a data reg?
+ ble.b fsnan_out_d_dn # yes
+ mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
+ andi.l &0x80000000,%d0 # keep sign
+ ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
+ mov.l FP_SRC_HI(%a6),%d1 # load mantissa
+ lsr.l &0x8,%d1 # shift mantissa for sgl
+ or.l %d1,%d0 # create sgl SNAN
+ mov.l EXC_EA(%a6),%a0 # pass: <ea> of default result
+ bsr.l _dmem_write_long # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.w fsnan_exit
+fsnan_out_d_dn:
+ mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
+ andi.l &0x80000000,%d0 # keep sign
+ ori.l &0x7fc00000,%d0 # insert new exponent,SNAN bit
+ mov.l %d1,-(%sp)
+ mov.l FP_SRC_HI(%a6),%d1 # load mantissa
+ lsr.l &0x8,%d1 # shift mantissa for sgl
+ or.l %d1,%d0 # create sgl SNAN
+ mov.l (%sp)+,%d1
+ andi.w &0x0007,%d1
+ bsr.l store_dreg_l # store result to regfile
+ bra.w fsnan_exit
+
+fsnan_out_d:
+ mov.l FP_SRC_EX(%a6),%d0 # fetch SNAN sign
+ andi.l &0x80000000,%d0 # keep sign
+ ori.l &0x7ff80000,%d0 # insert new exponent,SNAN bit
+ mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
+ mov.l %d0,FP_SCR0_EX(%a6) # store to temp space
+ mov.l &11,%d0 # load shift amt
+ lsr.l %d0,%d1
+ or.l %d1,FP_SCR0_EX(%a6) # create dbl hi
+ mov.l FP_SRC_HI(%a6),%d1 # load hi mantissa
+ andi.l &0x000007ff,%d1
+ ror.l %d0,%d1
+ mov.l %d1,FP_SCR0_HI(%a6) # store to temp space
+ mov.l FP_SRC_LO(%a6),%d1 # load lo mantissa
+ lsr.l %d0,%d1
+ or.l %d1,FP_SCR0_HI(%a6) # create dbl lo
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ mov.l EXC_EA(%a6),%a1 # pass: dst addr
+ movq.l &0x8,%d0 # pass: size of 8 bytes
+ bsr.l _dmem_write # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_d # yes
+
+ bra.w fsnan_exit
+
+# for extended precision, if the addressing mode is pre-decrement or
+# post-increment, then the address register did not get updated.
+# in addition, for pre-decrement, the stacked <ea> is incorrect.
+fsnan_out_x:
+ clr.b SPCOND_FLG(%a6) # clear special case flag
+
+ mov.w FP_SRC_EX(%a6),FP_SCR0_EX(%a6)
+ clr.w 2+FP_SCR0(%a6)
+ mov.l FP_SRC_HI(%a6),%d0
+ bset &30,%d0
+ mov.l %d0,FP_SCR0_HI(%a6)
+ mov.l FP_SRC_LO(%a6),FP_SCR0_LO(%a6)
+
+ btst &0x5,EXC_SR(%a6) # supervisor mode exception?
+ bne.b fsnan_out_x_s # yes
+
+ mov.l %usp,%a0 # fetch user stack pointer
+ mov.l %a0,EXC_A7(%a6) # save on stack for calc_ea()
+ mov.l (%a6),EXC_A6(%a6)
+
+ bsr.l _calc_ea_fout # find the correct ea,update An
+ mov.l %a0,%a1
+ mov.l %a0,EXC_EA(%a6) # stack correct <ea>
+
+ mov.l EXC_A7(%a6),%a0
+ mov.l %a0,%usp # restore user stack pointer
+ mov.l EXC_A6(%a6),(%a6)
+
+fsnan_out_x_save:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ movq.l &0xc,%d0 # pass: size of extended
+ bsr.l _dmem_write # write the default result
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_x # yes
+
+ bra.w fsnan_exit
+
+fsnan_out_x_s:
+ mov.l (%a6),EXC_A6(%a6)
+
+ bsr.l _calc_ea_fout # find the correct ea,update An
+ mov.l %a0,%a1
+ mov.l %a0,EXC_EA(%a6) # stack correct <ea>
+
+ mov.l EXC_A6(%a6),(%a6)
+
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+ bne.b fsnan_out_x_save # no
+
+# the operation was "fmove.x SNAN,-(a7)" from supervisor mode.
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ mov.l EXC_A6(%a6),%a6 # restore frame pointer
+
+ mov.l LOCAL_SIZE+EXC_SR(%sp),LOCAL_SIZE+EXC_SR-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_PC+0x2(%sp),LOCAL_SIZE+EXC_PC+0x2-0xc(%sp)
+ mov.l LOCAL_SIZE+EXC_EA(%sp),LOCAL_SIZE+EXC_EA-0xc(%sp)
+
+ mov.l LOCAL_SIZE+FP_SCR0_EX(%sp),LOCAL_SIZE+EXC_SR(%sp)
+ mov.l LOCAL_SIZE+FP_SCR0_HI(%sp),LOCAL_SIZE+EXC_PC+0x2(%sp)
+ mov.l LOCAL_SIZE+FP_SCR0_LO(%sp),LOCAL_SIZE+EXC_EA(%sp)
+
+ add.l &LOCAL_SIZE-0x8,%sp
+
+ bra.l _real_snan
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_inex(): 060FPSP entry point for FP Inexact exception. #
+# #
+# This handler should be the first code executed upon taking the #
+# FP Inexact exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword #
+# fix_skewed_ops() - adjust src operand in fsave frame #
+# set_tag_x() - determine optype of src/dst operands #
+# store_fpreg() - store opclass 0 or 2 result to FP regfile #
+# unnorm_fix() - change UNNORM operands to NORM or ZERO #
+# load_fpn2() - load dst operand from FP regfile #
+# smovcr() - emulate an "fmovcr" instruction #
+# fout() - emulate an opclass 3 instruction #
+# tbl_unsupp - add of table of emulation routines for opclass 0,2 #
+# _real_inex() - "callout" to operating system inexact handler #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP Inexact exception frame #
+# - The fsave frame contains the source operand #
+# #
+# OUTPUT ************************************************************** #
+# - The system stack is unchanged #
+# - The fsave frame contains the adjusted src op for opclass 0,2 #
+# #
+# ALGORITHM *********************************************************** #
+# In a system where the FP Inexact exception is enabled, the goal #
+# is to get to the handler specified at _real_inex(). But, on the 060, #
+# for opclass zero and two instruction taking this exception, the #
+# hardware doesn't store the correct result to the destination FP #
+# register as did the '040 and '881/2. This handler must emulate the #
+# instruction in order to get this value and then store it to the #
+# correct register before calling _real_inex(). #
+# For opclass 3 instructions, the 060 doesn't store the default #
+# inexact result out to memory or data register file as it should. #
+# This code must emulate the move out by calling fout() before finally #
+# exiting through _real_inex(). #
+# #
+#########################################################################
+
+ global _fpsp_inex
+_fpsp_inex:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+ btst &13,%d0 # is instr an fmove out?
+ bne.w finex_out # fmove out
+
+
+# the hardware, for "fabs" and "fneg" w/ a long source format, puts the
+# longword integer directly into the upper longword of the mantissa along
+# w/ an exponent value of 0x401e. we convert this to extended precision here.
+ bfextu %d0{&19:&3},%d0 # fetch instr size
+ bne.b finex_cont # instr size is not long
+ cmpi.w FP_SRC_EX(%a6),&0x401e # is exponent 0x401e?
+ bne.b finex_cont # no
+ fmov.l &0x0,%fpcr
+ fmov.l FP_SRC_HI(%a6),%fp0 # load integer src
+ fmov.x %fp0,FP_SRC(%a6) # store integer as extended precision
+ mov.w &0xe001,0x2+FP_SRC(%a6)
+
+finex_cont:
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+# Here, we zero the ccode and exception byte field since we're going to
+# emulate the whole instruction. Notice, though, that we don't kill the
+# INEX1 bit. This is because a packed op has long since been converted
+# to extended before arriving here. Therefore, we need to retain the
+# INEX1 bit from when the operand was first converted.
+ andi.l &0x00ff01ff,USER_FPSR(%a6) # zero all but accured field
+
+ fmov.l &0x0,%fpcr # zero current control regs
+ fmov.l &0x0,%fpsr
+
+ bfextu EXC_EXTWORD(%a6){&0:&6},%d1 # extract upper 6 of cmdreg
+ cmpi.b %d1,&0x17 # is op an fmovecr?
+ beq.w finex_fmovcr # yes
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l set_tag_x # tag the operand type
+ mov.b %d0,STAG(%a6) # maybe NORM,DENORM
+
+# bits four and five of the fp extension word separate the monadic and dyadic
+# operations that can pass through fpsp_inex(). remember that fcmp and ftst
+# will never take this exception, but fsincos will.
+ btst &0x5,1+EXC_CMDREG(%a6) # is operation monadic or dyadic?
+ beq.b finex_extract # monadic
+
+ btst &0x4,1+EXC_CMDREG(%a6) # is operation an fsincos?
+ bne.b finex_extract # yes
+
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0 # dyadic; load dst reg
+ bsr.l load_fpn2 # load dst into FP_DST
+
+ lea FP_DST(%a6),%a0 # pass: ptr to dst op
+ bsr.l set_tag_x # tag the operand type
+ cmpi.b %d0,&UNNORM # is operand an UNNORM?
+ bne.b finex_op2_done # no
+ bsr.l unnorm_fix # yes; convert to NORM,DENORM,or ZERO
+finex_op2_done:
+ mov.b %d0,DTAG(%a6) # save dst optype tag
+
+finex_extract:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec/mode
+
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.w &0x007f,%d1 # extract extension
+
+ lea FP_SRC(%a6),%a0
+ lea FP_DST(%a6),%a1
+
+ mov.l (tbl_unsupp.l,%pc,%d1.w*4),%d1 # fetch routine addr
+ jsr (tbl_unsupp.l,%pc,%d1.l*1)
+
+# the operation has been emulated. the result is in fp0.
+finex_save:
+ bfextu EXC_CMDREG(%a6){&6:&3},%d0
+ bsr.l store_fpreg
+
+finex_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+ bra.l _real_inex
+
+finex_fmovcr:
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
+ mov.b 1+EXC_CMDREG(%a6),%d1
+ andi.l &0x0000007f,%d1 # pass rom offset
+ bsr.l smovcr
+ bra.b finex_save
+
+########################################################################
+
+#
+# the hardware does not save the default result to memory on enabled
+# inexact exceptions. we do this here before passing control to
+# the user inexact handler.
+#
+# byte, word, and long destination format operations can pass
+# through here. so can double and single precision.
+# although packed opclass three operations can take inexact
+# exceptions, they won't pass through here since they are caught
+# first by the unsupported data format exception handler. that handler
+# sends them directly to _real_inex() if necessary.
+#
+finex_out:
+
+ mov.b &NORM,STAG(%a6) # src is a NORM
+
+ clr.l %d0
+ mov.b FPCR_MODE(%a6),%d0 # pass rnd prec,mode
+
+ andi.l &0xffff00ff,USER_FPSR(%a6) # zero exception field
+
+ lea FP_SRC(%a6),%a0 # pass ptr to src operand
+
+ bsr.l fout # store the default result
+
+ bra.b finex_exit
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_dz(): 060FPSP entry point for FP DZ exception. #
+# #
+# This handler should be the first code executed upon taking #
+# the FP DZ exception in an operating system. #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read instruction longword from memory #
+# fix_skewed_ops() - adjust fsave operand #
+# _real_dz() - "callout" exit point from FP DZ handler #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains the FP DZ exception stack. #
+# - The fsave frame contains the source operand. #
+# #
+# OUTPUT ************************************************************** #
+# - The system stack contains the FP DZ exception stack. #
+# - The fsave frame contains the adjusted source operand. #
+# #
+# ALGORITHM *********************************************************** #
+# In a system where the DZ exception is enabled, the goal is to #
+# get to the handler specified at _real_dz(). But, on the 060, when the #
+# exception is taken, the input operand in the fsave state frame may #
+# be incorrect for some cases and need to be adjusted. So, this package #
+# adjusts the operand using fix_skewed_ops() and then branches to #
+# _real_dz(). #
+# #
+#########################################################################
+
+ global _fpsp_dz
+_fpsp_dz:
+
+ link.w %a6,&-LOCAL_SIZE # init stack frame
+
+ fsave FP_SRC(%a6) # grab the "busy" frame
+
+ movm.l &0x0303,EXC_DREGS(%a6) # save d0-d1/a0-a1
+ fmovm.l %fpcr,%fpsr,%fpiar,USER_FPCR(%a6) # save ctrl regs
+ fmovm.x &0xc0,EXC_FPREGS(%a6) # save fp0-fp1 on stack
+
+# the FPIAR holds the "current PC" of the faulting instruction
+ mov.l USER_FPIAR(%a6),EXC_EXTWPTR(%a6)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch the instruction words
+ mov.l %d0,EXC_OPWORD(%a6)
+
+##############################################################################
+
+
+# here, we simply see if the operand in the fsave frame needs to be "unskewed".
+# this would be the case for opclass two operations with a source zero
+# in the sgl or dbl format.
+ lea FP_SRC(%a6),%a0 # pass: ptr to src op
+ bsr.l fix_skewed_ops # fix src op
+
+fdz_exit:
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ frestore FP_SRC(%a6)
+
+ unlk %a6
+ bra.l _real_dz
+
+#########################################################################
+# XDEF **************************************************************** #
+# _fpsp_fline(): 060FPSP entry point for "Line F emulator" #
+# exception when the "reduced" version of the #
+# FPSP is implemented that does not emulate #
+# FP unimplemented instructions. #
+# #
+# This handler should be the first code executed upon taking a #
+# "Line F Emulator" exception in an operating system integrating #
+# the reduced version of 060FPSP. #
+# #
+# XREF **************************************************************** #
+# _real_fpu_disabled() - Handle "FPU disabled" exceptions #
+# _real_fline() - Handle all other cases (treated equally) #
+# #
+# INPUT *************************************************************** #
+# - The system stack contains a "Line F Emulator" exception #
+# stack frame. #
+# #
+# OUTPUT ************************************************************** #
+# - The system stack is unchanged. #
+# #
+# ALGORITHM *********************************************************** #
+# When a "Line F Emulator" exception occurs in a system where #
+# "FPU Unimplemented" instructions will not be emulated, the exception #
+# can occur because then FPU is disabled or the instruction is to be #
+# classifed as "Line F". This module determines which case exists and #
+# calls the appropriate "callout". #
+# #
+#########################################################################
+
+ global _fpsp_fline
+_fpsp_fline:
+
+# check to see if the FPU is disabled. if so, jump to the OS entry
+# point for that condition.
+ cmpi.w 0x6(%sp),&0x402c
+ beq.l _real_fpu_disabled
+
+ bra.l _real_fline
+
+#########################################################################
+# XDEF **************************************************************** #
+# _dcalc_ea(): calc correct <ea> from <ea> stacked on exception #
+# #
+# XREF **************************************************************** #
+# inc_areg() - increment an address register #
+# dec_areg() - decrement an address register #
+# #
+# INPUT *************************************************************** #
+# d0 = number of bytes to adjust <ea> by #
+# #
+# OUTPUT ************************************************************** #
+# None #
+# #
+# ALGORITHM *********************************************************** #
+# "Dummy" CALCulate Effective Address: #
+# The stacked <ea> for FP unimplemented instructions and opclass #
+# two packed instructions is correct with the exception of... #
+# #
+# 1) -(An) : The register is not updated regardless of size. #
+# Also, for extended precision and packed, the #
+# stacked <ea> value is 8 bytes too big #
+# 2) (An)+ : The register is not updated. #
+# 3) #<data> : The upper longword of the immediate operand is #
+# stacked b,w,l and s sizes are completely stacked. #
+# d,x, and p are not. #
+# #
+#########################################################################
+
+ global _dcalc_ea
+_dcalc_ea:
+ mov.l %d0, %a0 # move # bytes to %a0
+
+ mov.b 1+EXC_OPWORD(%a6), %d0 # fetch opcode word
+ mov.l %d0, %d1 # make a copy
+
+ andi.w &0x38, %d0 # extract mode field
+ andi.l &0x7, %d1 # extract reg field
+
+ cmpi.b %d0,&0x18 # is mode (An)+ ?
+ beq.b dcea_pi # yes
+
+ cmpi.b %d0,&0x20 # is mode -(An) ?
+ beq.b dcea_pd # yes
+
+ or.w %d1,%d0 # concat mode,reg
+ cmpi.b %d0,&0x3c # is mode #<data>?
+
+ beq.b dcea_imm # yes
+
+ mov.l EXC_EA(%a6),%a0 # return <ea>
+ rts
+
+# need to set immediate data flag here since we'll need to do
+# an imem_read to fetch this later.
+dcea_imm:
+ mov.b &immed_flg,SPCOND_FLG(%a6)
+ lea ([USER_FPIAR,%a6],0x4),%a0 # no; return <ea>
+ rts
+
+# here, the <ea> is stacked correctly. however, we must update the
+# address register...
+dcea_pi:
+ mov.l %a0,%d0 # pass amt to inc by
+ bsr.l inc_areg # inc addr register
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ rts
+
+# the <ea> is stacked correctly for all but extended and packed which
+# the <ea>s are 8 bytes too large.
+# it would make no sense to have a pre-decrement to a7 in supervisor
+# mode so we don't even worry about this tricky case here : )
+dcea_pd:
+ mov.l %a0,%d0 # pass amt to dec by
+ bsr.l dec_areg # dec addr register
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+
+ cmpi.b %d0,&0xc # is opsize ext or packed?
+ beq.b dcea_pd2 # yes
+ rts
+dcea_pd2:
+ sub.l &0x8,%a0 # correct <ea>
+ mov.l %a0,EXC_EA(%a6) # put correct <ea> on stack
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _calc_ea_fout(): calculate correct stacked <ea> for extended #
+# and packed data opclass 3 operations. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# a0 = return correct effective address #
+# #
+# ALGORITHM *********************************************************** #
+# For opclass 3 extended and packed data operations, the <ea> #
+# stacked for the exception is incorrect for -(an) and (an)+ addressing #
+# modes. Also, while we're at it, the index register itself must get #
+# updated. #
+# So, for -(an), we must subtract 8 off of the stacked <ea> value #
+# and return that value as the correct <ea> and store that value in An. #
+# For (an)+, the stacked <ea> is correct but we must adjust An by +12. #
+# #
+#########################################################################
+
+# This calc_ea is currently used to retrieve the correct <ea>
+# for fmove outs of type extended and packed.
+ global _calc_ea_fout
+_calc_ea_fout:
+ mov.b 1+EXC_OPWORD(%a6),%d0 # fetch opcode word
+ mov.l %d0,%d1 # make a copy
+
+ andi.w &0x38,%d0 # extract mode field
+ andi.l &0x7,%d1 # extract reg field
+
+ cmpi.b %d0,&0x18 # is mode (An)+ ?
+ beq.b ceaf_pi # yes
+
+ cmpi.b %d0,&0x20 # is mode -(An) ?
+ beq.w ceaf_pd # yes
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ rts
+
+# (An)+ : extended and packed fmove out
+# : stacked <ea> is correct
+# : "An" not updated
+ceaf_pi:
+ mov.w (tbl_ceaf_pi.b,%pc,%d1.w*2),%d1
+ mov.l EXC_EA(%a6),%a0
+ jmp (tbl_ceaf_pi.b,%pc,%d1.w*1)
+
+ swbeg &0x8
+tbl_ceaf_pi:
+ short ceaf_pi0 - tbl_ceaf_pi
+ short ceaf_pi1 - tbl_ceaf_pi
+ short ceaf_pi2 - tbl_ceaf_pi
+ short ceaf_pi3 - tbl_ceaf_pi
+ short ceaf_pi4 - tbl_ceaf_pi
+ short ceaf_pi5 - tbl_ceaf_pi
+ short ceaf_pi6 - tbl_ceaf_pi
+ short ceaf_pi7 - tbl_ceaf_pi
+
+ceaf_pi0:
+ addi.l &0xc,EXC_DREGS+0x8(%a6)
+ rts
+ceaf_pi1:
+ addi.l &0xc,EXC_DREGS+0xc(%a6)
+ rts
+ceaf_pi2:
+ add.l &0xc,%a2
+ rts
+ceaf_pi3:
+ add.l &0xc,%a3
+ rts
+ceaf_pi4:
+ add.l &0xc,%a4
+ rts
+ceaf_pi5:
+ add.l &0xc,%a5
+ rts
+ceaf_pi6:
+ addi.l &0xc,EXC_A6(%a6)
+ rts
+ceaf_pi7:
+ mov.b &mia7_flg,SPCOND_FLG(%a6)
+ addi.l &0xc,EXC_A7(%a6)
+ rts
+
+# -(An) : extended and packed fmove out
+# : stacked <ea> = actual <ea> + 8
+# : "An" not updated
+ceaf_pd:
+ mov.w (tbl_ceaf_pd.b,%pc,%d1.w*2),%d1
+ mov.l EXC_EA(%a6),%a0
+ sub.l &0x8,%a0
+ sub.l &0x8,EXC_EA(%a6)
+ jmp (tbl_ceaf_pd.b,%pc,%d1.w*1)
+
+ swbeg &0x8
+tbl_ceaf_pd:
+ short ceaf_pd0 - tbl_ceaf_pd
+ short ceaf_pd1 - tbl_ceaf_pd
+ short ceaf_pd2 - tbl_ceaf_pd
+ short ceaf_pd3 - tbl_ceaf_pd
+ short ceaf_pd4 - tbl_ceaf_pd
+ short ceaf_pd5 - tbl_ceaf_pd
+ short ceaf_pd6 - tbl_ceaf_pd
+ short ceaf_pd7 - tbl_ceaf_pd
+
+ceaf_pd0:
+ mov.l %a0,EXC_DREGS+0x8(%a6)
+ rts
+ceaf_pd1:
+ mov.l %a0,EXC_DREGS+0xc(%a6)
+ rts
+ceaf_pd2:
+ mov.l %a0,%a2
+ rts
+ceaf_pd3:
+ mov.l %a0,%a3
+ rts
+ceaf_pd4:
+ mov.l %a0,%a4
+ rts
+ceaf_pd5:
+ mov.l %a0,%a5
+ rts
+ceaf_pd6:
+ mov.l %a0,EXC_A6(%a6)
+ rts
+ceaf_pd7:
+ mov.l %a0,EXC_A7(%a6)
+ mov.b &mda7_flg,SPCOND_FLG(%a6)
+ rts
+
+#
+# This table holds the offsets of the emulation routines for each individual
+# math operation relative to the address of this table. Included are
+# routines like fadd/fmul/fabs. The transcendentals ARE NOT. This is because
+# this table is for the version if the 060FPSP without transcendentals.
+# The location within the table is determined by the extension bits of the
+# operation longword.
+#
+
+ swbeg &109
+tbl_unsupp:
+ long fin - tbl_unsupp # 00: fmove
+ long fint - tbl_unsupp # 01: fint
+ long tbl_unsupp - tbl_unsupp # 02: fsinh
+ long fintrz - tbl_unsupp # 03: fintrz
+ long fsqrt - tbl_unsupp # 04: fsqrt
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp # 06: flognp1
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp # 08: fetoxm1
+ long tbl_unsupp - tbl_unsupp # 09: ftanh
+ long tbl_unsupp - tbl_unsupp # 0a: fatan
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp # 0c: fasin
+ long tbl_unsupp - tbl_unsupp # 0d: fatanh
+ long tbl_unsupp - tbl_unsupp # 0e: fsin
+ long tbl_unsupp - tbl_unsupp # 0f: ftan
+ long tbl_unsupp - tbl_unsupp # 10: fetox
+ long tbl_unsupp - tbl_unsupp # 11: ftwotox
+ long tbl_unsupp - tbl_unsupp # 12: ftentox
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp # 14: flogn
+ long tbl_unsupp - tbl_unsupp # 15: flog10
+ long tbl_unsupp - tbl_unsupp # 16: flog2
+ long tbl_unsupp - tbl_unsupp
+ long fabs - tbl_unsupp # 18: fabs
+ long tbl_unsupp - tbl_unsupp # 19: fcosh
+ long fneg - tbl_unsupp # 1a: fneg
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp # 1c: facos
+ long tbl_unsupp - tbl_unsupp # 1d: fcos
+ long tbl_unsupp - tbl_unsupp # 1e: fgetexp
+ long tbl_unsupp - tbl_unsupp # 1f: fgetman
+ long fdiv - tbl_unsupp # 20: fdiv
+ long tbl_unsupp - tbl_unsupp # 21: fmod
+ long fadd - tbl_unsupp # 22: fadd
+ long fmul - tbl_unsupp # 23: fmul
+ long fsgldiv - tbl_unsupp # 24: fsgldiv
+ long tbl_unsupp - tbl_unsupp # 25: frem
+ long tbl_unsupp - tbl_unsupp # 26: fscale
+ long fsglmul - tbl_unsupp # 27: fsglmul
+ long fsub - tbl_unsupp # 28: fsub
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp # 30: fsincos
+ long tbl_unsupp - tbl_unsupp # 31: fsincos
+ long tbl_unsupp - tbl_unsupp # 32: fsincos
+ long tbl_unsupp - tbl_unsupp # 33: fsincos
+ long tbl_unsupp - tbl_unsupp # 34: fsincos
+ long tbl_unsupp - tbl_unsupp # 35: fsincos
+ long tbl_unsupp - tbl_unsupp # 36: fsincos
+ long tbl_unsupp - tbl_unsupp # 37: fsincos
+ long fcmp - tbl_unsupp # 38: fcmp
+ long tbl_unsupp - tbl_unsupp
+ long ftst - tbl_unsupp # 3a: ftst
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fsin - tbl_unsupp # 40: fsmove
+ long fssqrt - tbl_unsupp # 41: fssqrt
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fdin - tbl_unsupp # 44: fdmove
+ long fdsqrt - tbl_unsupp # 45: fdsqrt
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fsabs - tbl_unsupp # 58: fsabs
+ long tbl_unsupp - tbl_unsupp
+ long fsneg - tbl_unsupp # 5a: fsneg
+ long tbl_unsupp - tbl_unsupp
+ long fdabs - tbl_unsupp # 5c: fdabs
+ long tbl_unsupp - tbl_unsupp
+ long fdneg - tbl_unsupp # 5e: fdneg
+ long tbl_unsupp - tbl_unsupp
+ long fsdiv - tbl_unsupp # 60: fsdiv
+ long tbl_unsupp - tbl_unsupp
+ long fsadd - tbl_unsupp # 62: fsadd
+ long fsmul - tbl_unsupp # 63: fsmul
+ long fddiv - tbl_unsupp # 64: fddiv
+ long tbl_unsupp - tbl_unsupp
+ long fdadd - tbl_unsupp # 66: fdadd
+ long fdmul - tbl_unsupp # 67: fdmul
+ long fssub - tbl_unsupp # 68: fssub
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long tbl_unsupp - tbl_unsupp
+ long fdsub - tbl_unsupp # 6c: fdsub
+
+#################################################
+# Add this here so non-fp modules can compile.
+# (smovcr is called from fpsp_inex.)
+ global smovcr
+smovcr:
+ bra.b smovcr
+
+#########################################################################
+# XDEF **************************************************************** #
+# fmovm_dynamic(): emulate "fmovm" dynamic instruction #
+# #
+# XREF **************************************************************** #
+# fetch_dreg() - fetch data register #
+# {i,d,}mem_read() - fetch data from memory #
+# _mem_write() - write data to memory #
+# iea_iacc() - instruction memory access error occurred #
+# iea_dacc() - data memory access error occurred #
+# restore() - restore An index regs if access error occurred #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# If instr is "fmovm Dn,-(A7)" from supervisor mode, #
+# d0 = size of dump #
+# d1 = Dn #
+# Else if instruction access error, #
+# d0 = FSLW #
+# Else if data access error, #
+# d0 = FSLW #
+# a0 = address of fault #
+# Else #
+# none. #
+# #
+# ALGORITHM *********************************************************** #
+# The effective address must be calculated since this is entered #
+# from an "Unimplemented Effective Address" exception handler. So, we #
+# have our own fcalc_ea() routine here. If an access error is flagged #
+# by a _{i,d,}mem_read() call, we must exit through the special #
+# handler. #
+# The data register is determined and its value loaded to get the #
+# string of FP registers affected. This value is used as an index into #
+# a lookup table such that we can determine the number of bytes #
+# involved. #
+# If the instruction is "fmovm.x <ea>,Dn", a _mem_read() is used #
+# to read in all FP values. Again, _mem_read() may fail and require a #
+# special exit. #
+# If the instruction is "fmovm.x DN,<ea>", a _mem_write() is used #
+# to write all FP values. _mem_write() may also fail. #
+# If the instruction is "fmovm.x DN,-(a7)" from supervisor mode, #
+# then we return the size of the dump and the string to the caller #
+# so that the move can occur outside of this routine. This special #
+# case is required so that moves to the system stack are handled #
+# correctly. #
+# #
+# DYNAMIC: #
+# fmovm.x dn, <ea> #
+# fmovm.x <ea>, dn #
+# #
+# <WORD 1> <WORD2> #
+# 1111 0010 00 |<ea>| 11@& 1000 0$$$ 0000 #
+# #
+# & = (0): predecrement addressing mode #
+# (1): postincrement or control addressing mode #
+# @ = (0): move listed regs from memory to the FPU #
+# (1): move listed regs from the FPU to memory #
+# $$$ : index of data register holding reg select mask #
+# #
+# NOTES: #
+# If the data register holds a zero, then the #
+# instruction is a nop. #
+# #
+#########################################################################
+
+ global fmovm_dynamic
+fmovm_dynamic:
+
+# extract the data register in which the bit string resides...
+ mov.b 1+EXC_EXTWORD(%a6),%d1 # fetch extword
+ andi.w &0x70,%d1 # extract reg bits
+ lsr.b &0x4,%d1 # shift into lo bits
+
+# fetch the bit string into d0...
+ bsr.l fetch_dreg # fetch reg string
+
+ andi.l &0x000000ff,%d0 # keep only lo byte
+
+ mov.l %d0,-(%sp) # save strg
+ mov.b (tbl_fmovm_size.w,%pc,%d0),%d0
+ mov.l %d0,-(%sp) # save size
+ bsr.l fmovm_calc_ea # calculate <ea>
+ mov.l (%sp)+,%d0 # restore size
+ mov.l (%sp)+,%d1 # restore strg
+
+# if the bit string is a zero, then the operation is a no-op
+# but, make sure that we've calculated ea and advanced the opword pointer
+ beq.w fmovm_data_done
+
+# separate move ins from move outs...
+ btst &0x5,EXC_EXTWORD(%a6) # is it a move in or out?
+ beq.w fmovm_data_in # it's a move out
+
+#############
+# MOVE OUT: #
+#############
+fmovm_data_out:
+ btst &0x4,EXC_EXTWORD(%a6) # control or predecrement?
+ bne.w fmovm_out_ctrl # control
+
+############################
+fmovm_out_predec:
+# for predecrement mode, the bit string is the opposite of both control
+# operations and postincrement mode. (bit7 = FP7 ... bit0 = FP0)
+# here, we convert it to be just like the others...
+ mov.b (tbl_fmovm_convert.w,%pc,%d1.w*1),%d1
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor mode?
+ beq.b fmovm_out_ctrl # user
+
+fmovm_out_predec_s:
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg # is <ea> mode -(a7)?
+ bne.b fmovm_out_ctrl
+
+# the operation was unfortunately an: fmovm.x dn,-(sp)
+# called from supervisor mode.
+# we're also passing "size" and "strg" back to the calling routine
+ rts
+
+############################
+fmovm_out_ctrl:
+ mov.l %a0,%a1 # move <ea> to a1
+
+ sub.l %d0,%sp # subtract size of dump
+ lea (%sp),%a0
+
+ tst.b %d1 # should FP0 be moved?
+ bpl.b fmovm_out_ctrl_fp1 # no
+
+ mov.l 0x0+EXC_FP0(%a6),(%a0)+ # yes
+ mov.l 0x4+EXC_FP0(%a6),(%a0)+
+ mov.l 0x8+EXC_FP0(%a6),(%a0)+
+
+fmovm_out_ctrl_fp1:
+ lsl.b &0x1,%d1 # should FP1 be moved?
+ bpl.b fmovm_out_ctrl_fp2 # no
+
+ mov.l 0x0+EXC_FP1(%a6),(%a0)+ # yes
+ mov.l 0x4+EXC_FP1(%a6),(%a0)+
+ mov.l 0x8+EXC_FP1(%a6),(%a0)+
+
+fmovm_out_ctrl_fp2:
+ lsl.b &0x1,%d1 # should FP2 be moved?
+ bpl.b fmovm_out_ctrl_fp3 # no
+
+ fmovm.x &0x20,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp3:
+ lsl.b &0x1,%d1 # should FP3 be moved?
+ bpl.b fmovm_out_ctrl_fp4 # no
+
+ fmovm.x &0x10,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp4:
+ lsl.b &0x1,%d1 # should FP4 be moved?
+ bpl.b fmovm_out_ctrl_fp5 # no
+
+ fmovm.x &0x08,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp5:
+ lsl.b &0x1,%d1 # should FP5 be moved?
+ bpl.b fmovm_out_ctrl_fp6 # no
+
+ fmovm.x &0x04,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp6:
+ lsl.b &0x1,%d1 # should FP6 be moved?
+ bpl.b fmovm_out_ctrl_fp7 # no
+
+ fmovm.x &0x02,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_fp7:
+ lsl.b &0x1,%d1 # should FP7 be moved?
+ bpl.b fmovm_out_ctrl_done # no
+
+ fmovm.x &0x01,(%a0) # yes
+ add.l &0xc,%a0
+
+fmovm_out_ctrl_done:
+ mov.l %a1,L_SCR1(%a6)
+
+ lea (%sp),%a0 # pass: supervisor src
+ mov.l %d0,-(%sp) # save size
+ bsr.l _dmem_write # copy data to user mem
+
+ mov.l (%sp)+,%d0
+ add.l %d0,%sp # clear fpreg data from stack
+
+ tst.l %d1 # did dstore err?
+ bne.w fmovm_out_err # yes
+
+ rts
+
+############
+# MOVE IN: #
+############
+fmovm_data_in:
+ mov.l %a0,L_SCR1(%a6)
+
+ sub.l %d0,%sp # make room for fpregs
+ lea (%sp),%a1
+
+ mov.l %d1,-(%sp) # save bit string for later
+ mov.l %d0,-(%sp) # save # of bytes
+
+ bsr.l _dmem_read # copy data from user mem
+
+ mov.l (%sp)+,%d0 # retrieve # of bytes
+
+ tst.l %d1 # did dfetch fail?
+ bne.w fmovm_in_err # yes
+
+ mov.l (%sp)+,%d1 # load bit string
+
+ lea (%sp),%a0 # addr of stack
+
+ tst.b %d1 # should FP0 be moved?
+ bpl.b fmovm_data_in_fp1 # no
+
+ mov.l (%a0)+,0x0+EXC_FP0(%a6) # yes
+ mov.l (%a0)+,0x4+EXC_FP0(%a6)
+ mov.l (%a0)+,0x8+EXC_FP0(%a6)
+
+fmovm_data_in_fp1:
+ lsl.b &0x1,%d1 # should FP1 be moved?
+ bpl.b fmovm_data_in_fp2 # no
+
+ mov.l (%a0)+,0x0+EXC_FP1(%a6) # yes
+ mov.l (%a0)+,0x4+EXC_FP1(%a6)
+ mov.l (%a0)+,0x8+EXC_FP1(%a6)
+
+fmovm_data_in_fp2:
+ lsl.b &0x1,%d1 # should FP2 be moved?
+ bpl.b fmovm_data_in_fp3 # no
+
+ fmovm.x (%a0)+,&0x20 # yes
+
+fmovm_data_in_fp3:
+ lsl.b &0x1,%d1 # should FP3 be moved?
+ bpl.b fmovm_data_in_fp4 # no
+
+ fmovm.x (%a0)+,&0x10 # yes
+
+fmovm_data_in_fp4:
+ lsl.b &0x1,%d1 # should FP4 be moved?
+ bpl.b fmovm_data_in_fp5 # no
+
+ fmovm.x (%a0)+,&0x08 # yes
+
+fmovm_data_in_fp5:
+ lsl.b &0x1,%d1 # should FP5 be moved?
+ bpl.b fmovm_data_in_fp6 # no
+
+ fmovm.x (%a0)+,&0x04 # yes
+
+fmovm_data_in_fp6:
+ lsl.b &0x1,%d1 # should FP6 be moved?
+ bpl.b fmovm_data_in_fp7 # no
+
+ fmovm.x (%a0)+,&0x02 # yes
+
+fmovm_data_in_fp7:
+ lsl.b &0x1,%d1 # should FP7 be moved?
+ bpl.b fmovm_data_in_done # no
+
+ fmovm.x (%a0)+,&0x01 # yes
+
+fmovm_data_in_done:
+ add.l %d0,%sp # remove fpregs from stack
+ rts
+
+#####################################
+
+fmovm_data_done:
+ rts
+
+##############################################################################
+
+#
+# table indexed by the operation's bit string that gives the number
+# of bytes that will be moved.
+#
+# number of bytes = (# of 1's in bit string) * 12(bytes/fpreg)
+#
+tbl_fmovm_size:
+ byte 0x00,0x0c,0x0c,0x18,0x0c,0x18,0x18,0x24
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x0c,0x18,0x18,0x24,0x18,0x24,0x24,0x30
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x18,0x24,0x24,0x30,0x24,0x30,0x30,0x3c
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x24,0x30,0x30,0x3c,0x30,0x3c,0x3c,0x48
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x30,0x3c,0x3c,0x48,0x3c,0x48,0x48,0x54
+ byte 0x3c,0x48,0x48,0x54,0x48,0x54,0x54,0x60
+
+#
+# table to convert a pre-decrement bit string into a post-increment
+# or control bit string.
+# ex: 0x00 ==> 0x00
+# 0x01 ==> 0x80
+# 0x02 ==> 0x40
+# .
+# .
+# 0xfd ==> 0xbf
+# 0xfe ==> 0x7f
+# 0xff ==> 0xff
+#
+tbl_fmovm_convert:
+ byte 0x00,0x80,0x40,0xc0,0x20,0xa0,0x60,0xe0
+ byte 0x10,0x90,0x50,0xd0,0x30,0xb0,0x70,0xf0
+ byte 0x08,0x88,0x48,0xc8,0x28,0xa8,0x68,0xe8
+ byte 0x18,0x98,0x58,0xd8,0x38,0xb8,0x78,0xf8
+ byte 0x04,0x84,0x44,0xc4,0x24,0xa4,0x64,0xe4
+ byte 0x14,0x94,0x54,0xd4,0x34,0xb4,0x74,0xf4
+ byte 0x0c,0x8c,0x4c,0xcc,0x2c,0xac,0x6c,0xec
+ byte 0x1c,0x9c,0x5c,0xdc,0x3c,0xbc,0x7c,0xfc
+ byte 0x02,0x82,0x42,0xc2,0x22,0xa2,0x62,0xe2
+ byte 0x12,0x92,0x52,0xd2,0x32,0xb2,0x72,0xf2
+ byte 0x0a,0x8a,0x4a,0xca,0x2a,0xaa,0x6a,0xea
+ byte 0x1a,0x9a,0x5a,0xda,0x3a,0xba,0x7a,0xfa
+ byte 0x06,0x86,0x46,0xc6,0x26,0xa6,0x66,0xe6
+ byte 0x16,0x96,0x56,0xd6,0x36,0xb6,0x76,0xf6
+ byte 0x0e,0x8e,0x4e,0xce,0x2e,0xae,0x6e,0xee
+ byte 0x1e,0x9e,0x5e,0xde,0x3e,0xbe,0x7e,0xfe
+ byte 0x01,0x81,0x41,0xc1,0x21,0xa1,0x61,0xe1
+ byte 0x11,0x91,0x51,0xd1,0x31,0xb1,0x71,0xf1
+ byte 0x09,0x89,0x49,0xc9,0x29,0xa9,0x69,0xe9
+ byte 0x19,0x99,0x59,0xd9,0x39,0xb9,0x79,0xf9
+ byte 0x05,0x85,0x45,0xc5,0x25,0xa5,0x65,0xe5
+ byte 0x15,0x95,0x55,0xd5,0x35,0xb5,0x75,0xf5
+ byte 0x0d,0x8d,0x4d,0xcd,0x2d,0xad,0x6d,0xed
+ byte 0x1d,0x9d,0x5d,0xdd,0x3d,0xbd,0x7d,0xfd
+ byte 0x03,0x83,0x43,0xc3,0x23,0xa3,0x63,0xe3
+ byte 0x13,0x93,0x53,0xd3,0x33,0xb3,0x73,0xf3
+ byte 0x0b,0x8b,0x4b,0xcb,0x2b,0xab,0x6b,0xeb
+ byte 0x1b,0x9b,0x5b,0xdb,0x3b,0xbb,0x7b,0xfb
+ byte 0x07,0x87,0x47,0xc7,0x27,0xa7,0x67,0xe7
+ byte 0x17,0x97,0x57,0xd7,0x37,0xb7,0x77,0xf7
+ byte 0x0f,0x8f,0x4f,0xcf,0x2f,0xaf,0x6f,0xef
+ byte 0x1f,0x9f,0x5f,0xdf,0x3f,0xbf,0x7f,0xff
+
+ global fmovm_calc_ea
+###############################################
+# _fmovm_calc_ea: calculate effective address #
+###############################################
+fmovm_calc_ea:
+ mov.l %d0,%a0 # move # bytes to a0
+
+# currently, MODE and REG are taken from the EXC_OPWORD. this could be
+# easily changed if they were inputs passed in registers.
+ mov.w EXC_OPWORD(%a6),%d0 # fetch opcode word
+ mov.w %d0,%d1 # make a copy
+
+ andi.w &0x3f,%d0 # extract mode field
+ andi.l &0x7,%d1 # extract reg field
+
+# jump to the corresponding function for each {MODE,REG} pair.
+ mov.w (tbl_fea_mode.b,%pc,%d0.w*2),%d0 # fetch jmp distance
+ jmp (tbl_fea_mode.b,%pc,%d0.w*1) # jmp to correct ea mode
+
+ swbeg &64
+tbl_fea_mode:
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+
+ short faddr_ind_a0 - tbl_fea_mode
+ short faddr_ind_a1 - tbl_fea_mode
+ short faddr_ind_a2 - tbl_fea_mode
+ short faddr_ind_a3 - tbl_fea_mode
+ short faddr_ind_a4 - tbl_fea_mode
+ short faddr_ind_a5 - tbl_fea_mode
+ short faddr_ind_a6 - tbl_fea_mode
+ short faddr_ind_a7 - tbl_fea_mode
+
+ short faddr_ind_p_a0 - tbl_fea_mode
+ short faddr_ind_p_a1 - tbl_fea_mode
+ short faddr_ind_p_a2 - tbl_fea_mode
+ short faddr_ind_p_a3 - tbl_fea_mode
+ short faddr_ind_p_a4 - tbl_fea_mode
+ short faddr_ind_p_a5 - tbl_fea_mode
+ short faddr_ind_p_a6 - tbl_fea_mode
+ short faddr_ind_p_a7 - tbl_fea_mode
+
+ short faddr_ind_m_a0 - tbl_fea_mode
+ short faddr_ind_m_a1 - tbl_fea_mode
+ short faddr_ind_m_a2 - tbl_fea_mode
+ short faddr_ind_m_a3 - tbl_fea_mode
+ short faddr_ind_m_a4 - tbl_fea_mode
+ short faddr_ind_m_a5 - tbl_fea_mode
+ short faddr_ind_m_a6 - tbl_fea_mode
+ short faddr_ind_m_a7 - tbl_fea_mode
+
+ short faddr_ind_disp_a0 - tbl_fea_mode
+ short faddr_ind_disp_a1 - tbl_fea_mode
+ short faddr_ind_disp_a2 - tbl_fea_mode
+ short faddr_ind_disp_a3 - tbl_fea_mode
+ short faddr_ind_disp_a4 - tbl_fea_mode
+ short faddr_ind_disp_a5 - tbl_fea_mode
+ short faddr_ind_disp_a6 - tbl_fea_mode
+ short faddr_ind_disp_a7 - tbl_fea_mode
+
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+ short faddr_ind_ext - tbl_fea_mode
+
+ short fabs_short - tbl_fea_mode
+ short fabs_long - tbl_fea_mode
+ short fpc_ind - tbl_fea_mode
+ short fpc_ind_ext - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+ short tbl_fea_mode - tbl_fea_mode
+
+###################################
+# Address register indirect: (An) #
+###################################
+faddr_ind_a0:
+ mov.l EXC_DREGS+0x8(%a6),%a0 # Get current a0
+ rts
+
+faddr_ind_a1:
+ mov.l EXC_DREGS+0xc(%a6),%a0 # Get current a1
+ rts
+
+faddr_ind_a2:
+ mov.l %a2,%a0 # Get current a2
+ rts
+
+faddr_ind_a3:
+ mov.l %a3,%a0 # Get current a3
+ rts
+
+faddr_ind_a4:
+ mov.l %a4,%a0 # Get current a4
+ rts
+
+faddr_ind_a5:
+ mov.l %a5,%a0 # Get current a5
+ rts
+
+faddr_ind_a6:
+ mov.l (%a6),%a0 # Get current a6
+ rts
+
+faddr_ind_a7:
+ mov.l EXC_A7(%a6),%a0 # Get current a7
+ rts
+
+#####################################################
+# Address register indirect w/ postincrement: (An)+ #
+#####################################################
+faddr_ind_p_a0:
+ mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,EXC_DREGS+0x8(%a6) # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a1:
+ mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,EXC_DREGS+0xc(%a6) # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a2:
+ mov.l %a2,%d0 # Get current a2
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,%a2 # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a3:
+ mov.l %a3,%d0 # Get current a3
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,%a3 # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a4:
+ mov.l %a4,%d0 # Get current a4
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,%a4 # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a5:
+ mov.l %a5,%d0 # Get current a5
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,%a5 # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a6:
+ mov.l (%a6),%d0 # Get current a6
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,(%a6) # Save incr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_p_a7:
+ mov.b &mia7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+ mov.l EXC_A7(%a6),%d0 # Get current a7
+ mov.l %d0,%d1
+ add.l %a0,%d1 # Increment
+ mov.l %d1,EXC_A7(%a6) # Save incr value
+ mov.l %d0,%a0
+ rts
+
+####################################################
+# Address register indirect w/ predecrement: -(An) #
+####################################################
+faddr_ind_m_a0:
+ mov.l EXC_DREGS+0x8(%a6),%d0 # Get current a0
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_DREGS+0x8(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a1:
+ mov.l EXC_DREGS+0xc(%a6),%d0 # Get current a1
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_DREGS+0xc(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a2:
+ mov.l %a2,%d0 # Get current a2
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,%a2 # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a3:
+ mov.l %a3,%d0 # Get current a3
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,%a3 # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a4:
+ mov.l %a4,%d0 # Get current a4
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,%a4 # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a5:
+ mov.l %a5,%d0 # Get current a5
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,%a5 # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a6:
+ mov.l (%a6),%d0 # Get current a6
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+faddr_ind_m_a7:
+ mov.b &mda7_flg,SPCOND_FLG(%a6) # set "special case" flag
+
+ mov.l EXC_A7(%a6),%d0 # Get current a7
+ sub.l %a0,%d0 # Decrement
+ mov.l %d0,EXC_A7(%a6) # Save decr value
+ mov.l %d0,%a0
+ rts
+
+########################################################
+# Address register indirect w/ displacement: (d16, An) #
+########################################################
+faddr_ind_disp_a0:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_DREGS+0x8(%a6),%a0 # a0 + d16
+ rts
+
+faddr_ind_disp_a1:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_DREGS+0xc(%a6),%a0 # a1 + d16
+ rts
+
+faddr_ind_disp_a2:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l %a2,%a0 # a2 + d16
+ rts
+
+faddr_ind_disp_a3:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l %a3,%a0 # a3 + d16
+ rts
+
+faddr_ind_disp_a4:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l %a4,%a0 # a4 + d16
+ rts
+
+faddr_ind_disp_a5:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l %a5,%a0 # a5 + d16
+ rts
+
+faddr_ind_disp_a6:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l (%a6),%a0 # a6 + d16
+ rts
+
+faddr_ind_disp_a7:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_A7(%a6),%a0 # a7 + d16
+ rts
+
+########################################################################
+# Address register indirect w/ index(8-bit displacement): (d8, An, Xn) #
+# " " " w/ " (base displacement): (bd, An, Xn) #
+# Memory indirect postindexed: ([bd, An], Xn, od) #
+# Memory indirect preindexed: ([bd, An, Xn], od) #
+########################################################################
+faddr_ind_ext:
+ addq.l &0x8,%d1
+ bsr.l fetch_dreg # fetch base areg
+ mov.l %d0,-(%sp)
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch extword in d0
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l (%sp)+,%a0
+
+ btst &0x8,%d0
+ bne.w fcalc_mem_ind
+
+ mov.l %d0,L_SCR1(%a6) # hold opword
+
+ mov.l %d0,%d1
+ rol.w &0x4,%d1
+ andi.w &0xf,%d1 # extract index regno
+
+# count on fetch_dreg() not to alter a0...
+ bsr.l fetch_dreg # fetch index
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l L_SCR1(%a6),%d2 # fetch opword
+
+ btst &0xb,%d2 # is it word or long?
+ bne.b faii8_long
+ ext.l %d0 # sign extend word index
+faii8_long:
+ mov.l %d2,%d1
+ rol.w &0x7,%d1
+ andi.l &0x3,%d1 # extract scale value
+
+ lsl.l %d1,%d0 # shift index by scale
+
+ extb.l %d2 # sign extend displacement
+ add.l %d2,%d0 # index + disp
+ add.l %d0,%a0 # An + (index + disp)
+
+ mov.l (%sp)+,%d2 # restore old d2
+ rts
+
+###########################
+# Absolute short: (XXX).W #
+###########################
+fabs_short:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch short address
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # return <ea> in a0
+ rts
+
+##########################
+# Absolute long: (XXX).L #
+##########################
+fabs_long:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch long address
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,%a0 # return <ea> in a0
+ rts
+
+#######################################################
+# Program counter indirect w/ displacement: (d16, PC) #
+#######################################################
+fpc_ind:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch word displacement
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.w %d0,%a0 # sign extend displacement
+
+ add.l EXC_EXTWPTR(%a6),%a0 # pc + d16
+
+# _imem_read_word() increased the extwptr by 2. need to adjust here.
+ subq.l &0x2,%a0 # adjust <ea>
+ rts
+
+##########################################################
+# PC indirect w/ index(8-bit displacement): (d8, PC, An) #
+# " " w/ " (base displacement): (bd, PC, An) #
+# PC memory indirect postindexed: ([bd, PC], Xn, od) #
+# PC memory indirect preindexed: ([bd, PC, Xn], od) #
+##########################################################
+fpc_ind_ext:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word # fetch ext word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # put base in a0
+ subq.l &0x2,%a0 # adjust base
+
+ btst &0x8,%d0 # is disp only 8 bits?
+ bne.w fcalc_mem_ind # calc memory indirect
+
+ mov.l %d0,L_SCR1(%a6) # store opword
+
+ mov.l %d0,%d1 # make extword copy
+ rol.w &0x4,%d1 # rotate reg num into place
+ andi.w &0xf,%d1 # extract register number
+
+# count on fetch_dreg() not to alter a0...
+ bsr.l fetch_dreg # fetch index
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l L_SCR1(%a6),%d2 # fetch opword
+
+ btst &0xb,%d2 # is index word or long?
+ bne.b fpii8_long # long
+ ext.l %d0 # sign extend word index
+fpii8_long:
+ mov.l %d2,%d1
+ rol.w &0x7,%d1 # rotate scale value into place
+ andi.l &0x3,%d1 # extract scale value
+
+ lsl.l %d1,%d0 # shift index by scale
+
+ extb.l %d2 # sign extend displacement
+ add.l %d2,%d0 # disp + index
+ add.l %d0,%a0 # An + (index + disp)
+
+ mov.l (%sp)+,%d2 # restore temp register
+ rts
+
+# d2 = index
+# d3 = base
+# d4 = od
+# d5 = extword
+fcalc_mem_ind:
+ btst &0x6,%d0 # is the index suppressed?
+ beq.b fcalc_index
+
+ movm.l &0x3c00,-(%sp) # save d2-d5
+
+ mov.l %d0,%d5 # put extword in d5
+ mov.l %a0,%d3 # put base in d3
+
+ clr.l %d2 # yes, so index = 0
+ bra.b fbase_supp_ck
+
+# index:
+fcalc_index:
+ mov.l %d0,L_SCR1(%a6) # save d0 (opword)
+ bfextu %d0{&16:&4},%d1 # fetch dreg index
+ bsr.l fetch_dreg
+
+ movm.l &0x3c00,-(%sp) # save d2-d5
+ mov.l %d0,%d2 # put index in d2
+ mov.l L_SCR1(%a6),%d5
+ mov.l %a0,%d3
+
+ btst &0xb,%d5 # is index word or long?
+ bne.b fno_ext
+ ext.l %d2
+
+fno_ext:
+ bfextu %d5{&21:&2},%d0
+ lsl.l %d0,%d2
+
+# base address (passed as parameter in d3):
+# we clear the value here if it should actually be suppressed.
+fbase_supp_ck:
+ btst &0x7,%d5 # is the bd suppressed?
+ beq.b fno_base_sup
+ clr.l %d3
+
+# base displacement:
+fno_base_sup:
+ bfextu %d5{&26:&2},%d0 # get bd size
+# beq.l fmovm_error # if (size == 0) it's reserved
+
+ cmpi.b %d0,&0x2
+ blt.b fno_bd
+ beq.b fget_word_bd
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long
+
+ tst.l %d1 # did ifetch fail?
+ bne.l fcea_iacc # yes
+
+ bra.b fchk_ind
+
+fget_word_bd:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l fcea_iacc # yes
+
+ ext.l %d0 # sign extend bd
+
+fchk_ind:
+ add.l %d0,%d3 # base += bd
+
+# outer displacement:
+fno_bd:
+ bfextu %d5{&30:&2},%d0 # is od suppressed?
+ beq.w faii_bd
+
+ cmpi.b %d0,&0x2
+ blt.b fnull_od
+ beq.b fword_od
+
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long
+
+ tst.l %d1 # did ifetch fail?
+ bne.l fcea_iacc # yes
+
+ bra.b fadd_them
+
+fword_od:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x2,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_word
+
+ tst.l %d1 # did ifetch fail?
+ bne.l fcea_iacc # yes
+
+ ext.l %d0 # sign extend od
+ bra.b fadd_them
+
+fnull_od:
+ clr.l %d0
+
+fadd_them:
+ mov.l %d0,%d4
+
+ btst &0x2,%d5 # pre or post indexing?
+ beq.b fpre_indexed
+
+ mov.l %d3,%a0
+ bsr.l _dmem_read_long
+
+ tst.l %d1 # did dfetch fail?
+ bne.w fcea_err # yes
+
+ add.l %d2,%d0 # <ea> += index
+ add.l %d4,%d0 # <ea> += od
+ bra.b fdone_ea
+
+fpre_indexed:
+ add.l %d2,%d3 # preindexing
+ mov.l %d3,%a0
+ bsr.l _dmem_read_long
+
+ tst.l %d1 # did dfetch fail?
+ bne.w fcea_err # yes
+
+ add.l %d4,%d0 # ea += od
+ bra.b fdone_ea
+
+faii_bd:
+ add.l %d2,%d3 # ea = (base + bd) + index
+ mov.l %d3,%d0
+fdone_ea:
+ mov.l %d0,%a0
+
+ movm.l (%sp)+,&0x003c # restore d2-d5
+ rts
+
+#########################################################
+fcea_err:
+ mov.l %d3,%a0
+
+ movm.l (%sp)+,&0x003c # restore d2-d5
+ mov.w &0x0101,%d0
+ bra.l iea_dacc
+
+fcea_iacc:
+ movm.l (%sp)+,&0x003c # restore d2-d5
+ bra.l iea_iacc
+
+fmovm_out_err:
+ bsr.l restore
+ mov.w &0x00e1,%d0
+ bra.b fmovm_err
+
+fmovm_in_err:
+ bsr.l restore
+ mov.w &0x0161,%d0
+
+fmovm_err:
+ mov.l L_SCR1(%a6),%a0
+ bra.l iea_dacc
+
+#########################################################################
+# XDEF **************************************************************** #
+# fmovm_ctrl(): emulate fmovm.l of control registers instr #
+# #
+# XREF **************************************************************** #
+# _imem_read_long() - read longword from memory #
+# iea_iacc() - _imem_read_long() failed; error recovery #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# If _imem_read_long() doesn't fail: #
+# USER_FPCR(a6) = new FPCR value #
+# USER_FPSR(a6) = new FPSR value #
+# USER_FPIAR(a6) = new FPIAR value #
+# #
+# ALGORITHM *********************************************************** #
+# Decode the instruction type by looking at the extension word #
+# in order to see how many control registers to fetch from memory. #
+# Fetch them using _imem_read_long(). If this fetch fails, exit through #
+# the special access error exit handler iea_iacc(). #
+# #
+# Instruction word decoding: #
+# #
+# fmovem.l #<data>, {FPIAR&|FPCR&|FPSR} #
+# #
+# WORD1 WORD2 #
+# 1111 0010 00 111100 100$ $$00 0000 0000 #
+# #
+# $$$ (100): FPCR #
+# (010): FPSR #
+# (001): FPIAR #
+# (000): FPIAR #
+# #
+#########################################################################
+
+ global fmovm_ctrl
+fmovm_ctrl:
+ mov.b EXC_EXTWORD(%a6),%d0 # fetch reg select bits
+ cmpi.b %d0,&0x9c # fpcr & fpsr & fpiar ?
+ beq.w fctrl_in_7 # yes
+ cmpi.b %d0,&0x98 # fpcr & fpsr ?
+ beq.w fctrl_in_6 # yes
+ cmpi.b %d0,&0x94 # fpcr & fpiar ?
+ beq.b fctrl_in_5 # yes
+
+# fmovem.l #<data>, fpsr/fpiar
+fctrl_in_3:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPSR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPSR(%a6) # store new FPSR to stack
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPIAR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
+ rts
+
+# fmovem.l #<data>, fpcr/fpiar
+fctrl_in_5:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPCR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPCR(%a6) # store new FPCR to stack
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPIAR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to stack
+ rts
+
+# fmovem.l #<data>, fpcr/fpsr
+fctrl_in_6:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPCR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPSR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
+ rts
+
+# fmovem.l #<data>, fpcr/fpsr/fpiar
+fctrl_in_7:
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPCR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPCR(%a6) # store new FPCR to mem
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPSR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPSR(%a6) # store new FPSR to mem
+ mov.l EXC_EXTWPTR(%a6),%a0 # fetch instruction addr
+ addq.l &0x4,EXC_EXTWPTR(%a6) # incr instruction ptr
+ bsr.l _imem_read_long # fetch FPIAR from mem
+
+ tst.l %d1 # did ifetch fail?
+ bne.l iea_iacc # yes
+
+ mov.l %d0,USER_FPIAR(%a6) # store new FPIAR to mem
+ rts
+
+##########################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# addsub_scaler2(): scale inputs to fadd/fsub such that no #
+# OVFL/UNFL exceptions will result #
+# #
+# XREF **************************************************************** #
+# norm() - normalize mantissa after adjusting exponent #
+# #
+# INPUT *************************************************************** #
+# FP_SRC(a6) = fp op1(src) #
+# FP_DST(a6) = fp op2(dst) #
+# #
+# OUTPUT ************************************************************** #
+# FP_SRC(a6) = fp op1 scaled(src) #
+# FP_DST(a6) = fp op2 scaled(dst) #
+# d0 = scale amount #
+# #
+# ALGORITHM *********************************************************** #
+# If the DST exponent is > the SRC exponent, set the DST exponent #
+# equal to 0x3fff and scale the SRC exponent by the value that the #
+# DST exponent was scaled by. If the SRC exponent is greater or equal, #
+# do the opposite. Return this scale factor in d0. #
+# If the two exponents differ by > the number of mantissa bits #
+# plus two, then set the smallest exponent to a very small value as a #
+# quick shortcut. #
+# #
+#########################################################################
+
+ global addsub_scaler2
+addsub_scaler2:
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+ mov.w SRC_EX(%a0),%d0
+ mov.w DST_EX(%a1),%d1
+ mov.w %d0,FP_SCR0_EX(%a6)
+ mov.w %d1,FP_SCR1_EX(%a6)
+
+ andi.w &0x7fff,%d0
+ andi.w &0x7fff,%d1
+ mov.w %d0,L_SCR1(%a6) # store src exponent
+ mov.w %d1,2+L_SCR1(%a6) # store dst exponent
+
+ cmp.w %d0, %d1 # is src exp >= dst exp?
+ bge.l src_exp_ge2
+
+# dst exp is > src exp; scale dst to exp = 0x3fff
+dst_exp_gt2:
+ bsr.l scale_to_zero_dst
+ mov.l %d0,-(%sp) # save scale factor
+
+ cmpi.b STAG(%a6),&DENORM # is dst denormalized?
+ bne.b cmpexp12
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize the denorm; result is new exp
+ neg.w %d0 # new exp = -(shft val)
+ mov.w %d0,L_SCR1(%a6) # inset new exp
+
+cmpexp12:
+ mov.w 2+L_SCR1(%a6),%d0
+ subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
+
+ cmp.w %d0,L_SCR1(%a6) # is difference >= len(mantissa)+2?
+ bge.b quick_scale12
+
+ mov.w L_SCR1(%a6),%d0
+ add.w 0x2(%sp),%d0 # scale src exponent by scale factor
+ mov.w FP_SCR0_EX(%a6),%d1
+ and.w &0x8000,%d1
+ or.w %d1,%d0 # concat {sgn,new exp}
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new dst exponent
+
+ mov.l (%sp)+,%d0 # return SCALE factor
+ rts
+
+quick_scale12:
+ andi.w &0x8000,FP_SCR0_EX(%a6) # zero src exponent
+ bset &0x0,1+FP_SCR0_EX(%a6) # set exp = 1
+
+ mov.l (%sp)+,%d0 # return SCALE factor
+ rts
+
+# src exp is >= dst exp; scale src to exp = 0x3fff
+src_exp_ge2:
+ bsr.l scale_to_zero_src
+ mov.l %d0,-(%sp) # save scale factor
+
+ cmpi.b DTAG(%a6),&DENORM # is dst denormalized?
+ bne.b cmpexp22
+ lea FP_SCR1(%a6),%a0
+ bsr.l norm # normalize the denorm; result is new exp
+ neg.w %d0 # new exp = -(shft val)
+ mov.w %d0,2+L_SCR1(%a6) # inset new exp
+
+cmpexp22:
+ mov.w L_SCR1(%a6),%d0
+ subi.w &mantissalen+2,%d0 # subtract mantissalen+2 from larger exp
+
+ cmp.w %d0,2+L_SCR1(%a6) # is difference >= len(mantissa)+2?
+ bge.b quick_scale22
+
+ mov.w 2+L_SCR1(%a6),%d0
+ add.w 0x2(%sp),%d0 # scale dst exponent by scale factor
+ mov.w FP_SCR1_EX(%a6),%d1
+ andi.w &0x8000,%d1
+ or.w %d1,%d0 # concat {sgn,new exp}
+ mov.w %d0,FP_SCR1_EX(%a6) # insert new dst exponent
+
+ mov.l (%sp)+,%d0 # return SCALE factor
+ rts
+
+quick_scale22:
+ andi.w &0x8000,FP_SCR1_EX(%a6) # zero dst exponent
+ bset &0x0,1+FP_SCR1_EX(%a6) # set exp = 1
+
+ mov.l (%sp)+,%d0 # return SCALE factor
+ rts
+
+##########################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# scale_to_zero_src(): scale the exponent of extended precision #
+# value at FP_SCR0(a6). #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa if the operand was a DENORM #
+# #
+# INPUT *************************************************************** #
+# FP_SCR0(a6) = extended precision operand to be scaled #
+# #
+# OUTPUT ************************************************************** #
+# FP_SCR0(a6) = scaled extended precision operand #
+# d0 = scale value #
+# #
+# ALGORITHM *********************************************************** #
+# Set the exponent of the input operand to 0x3fff. Save the value #
+# of the difference between the original and new exponent. Then, #
+# normalize the operand if it was a DENORM. Add this normalization #
+# value to the previous value. Return the result. #
+# #
+#########################################################################
+
+ global scale_to_zero_src
+scale_to_zero_src:
+ mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
+ mov.w %d1,%d0 # make a copy
+
+ andi.l &0x7fff,%d1 # extract operand's exponent
+
+ andi.w &0x8000,%d0 # extract operand's sgn
+ or.w &0x3fff,%d0 # insert new operand's exponent(=0)
+
+ mov.w %d0,FP_SCR0_EX(%a6) # insert biased exponent
+
+ cmpi.b STAG(%a6),&DENORM # is operand normalized?
+ beq.b stzs_denorm # normalize the DENORM
+
+stzs_norm:
+ mov.l &0x3fff,%d0
+ sub.l %d1,%d0 # scale = BIAS + (-exp)
+
+ rts
+
+stzs_denorm:
+ lea FP_SCR0(%a6),%a0 # pass ptr to src op
+ bsr.l norm # normalize denorm
+ neg.l %d0 # new exponent = -(shft val)
+ mov.l %d0,%d1 # prepare for op_norm call
+ bra.b stzs_norm # finish scaling
+
+###
+
+#########################################################################
+# XDEF **************************************************************** #
+# scale_sqrt(): scale the input operand exponent so a subsequent #
+# fsqrt operation won't take an exception. #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa if the operand was a DENORM #
+# #
+# INPUT *************************************************************** #
+# FP_SCR0(a6) = extended precision operand to be scaled #
+# #
+# OUTPUT ************************************************************** #
+# FP_SCR0(a6) = scaled extended precision operand #
+# d0 = scale value #
+# #
+# ALGORITHM *********************************************************** #
+# If the input operand is a DENORM, normalize it. #
+# If the exponent of the input operand is even, set the exponent #
+# to 0x3ffe and return a scale factor of "(exp-0x3ffe)/2". If the #
+# exponent of the input operand is off, set the exponent to ox3fff and #
+# return a scale factor of "(exp-0x3fff)/2". #
+# #
+#########################################################################
+
+ global scale_sqrt
+scale_sqrt:
+ cmpi.b STAG(%a6),&DENORM # is operand normalized?
+ beq.b ss_denorm # normalize the DENORM
+
+ mov.w FP_SCR0_EX(%a6),%d1 # extract operand's {sgn,exp}
+ andi.l &0x7fff,%d1 # extract operand's exponent
+
+ andi.w &0x8000,FP_SCR0_EX(%a6) # extract operand's sgn
+
+ btst &0x0,%d1 # is exp even or odd?
+ beq.b ss_norm_even
+
+ ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
+
+ mov.l &0x3fff,%d0
+ sub.l %d1,%d0 # scale = BIAS + (-exp)
+ asr.l &0x1,%d0 # divide scale factor by 2
+ rts
+
+ss_norm_even:
+ ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
+
+ mov.l &0x3ffe,%d0
+ sub.l %d1,%d0 # scale = BIAS + (-exp)
+ asr.l &0x1,%d0 # divide scale factor by 2
+ rts
+
+ss_denorm:
+ lea FP_SCR0(%a6),%a0 # pass ptr to src op
+ bsr.l norm # normalize denorm
+
+ btst &0x0,%d0 # is exp even or odd?
+ beq.b ss_denorm_even
+
+ ori.w &0x3fff,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
+
+ add.l &0x3fff,%d0
+ asr.l &0x1,%d0 # divide scale factor by 2
+ rts
+
+ss_denorm_even:
+ ori.w &0x3ffe,FP_SCR0_EX(%a6) # insert new operand's exponent(=0)
+
+ add.l &0x3ffe,%d0
+ asr.l &0x1,%d0 # divide scale factor by 2
+ rts
+
+###
+
+#########################################################################
+# XDEF **************************************************************** #
+# scale_to_zero_dst(): scale the exponent of extended precision #
+# value at FP_SCR1(a6). #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa if the operand was a DENORM #
+# #
+# INPUT *************************************************************** #
+# FP_SCR1(a6) = extended precision operand to be scaled #
+# #
+# OUTPUT ************************************************************** #
+# FP_SCR1(a6) = scaled extended precision operand #
+# d0 = scale value #
+# #
+# ALGORITHM *********************************************************** #
+# Set the exponent of the input operand to 0x3fff. Save the value #
+# of the difference between the original and new exponent. Then, #
+# normalize the operand if it was a DENORM. Add this normalization #
+# value to the previous value. Return the result. #
+# #
+#########################################################################
+
+ global scale_to_zero_dst
+scale_to_zero_dst:
+ mov.w FP_SCR1_EX(%a6),%d1 # extract operand's {sgn,exp}
+ mov.w %d1,%d0 # make a copy
+
+ andi.l &0x7fff,%d1 # extract operand's exponent
+
+ andi.w &0x8000,%d0 # extract operand's sgn
+ or.w &0x3fff,%d0 # insert new operand's exponent(=0)
+
+ mov.w %d0,FP_SCR1_EX(%a6) # insert biased exponent
+
+ cmpi.b DTAG(%a6),&DENORM # is operand normalized?
+ beq.b stzd_denorm # normalize the DENORM
+
+stzd_norm:
+ mov.l &0x3fff,%d0
+ sub.l %d1,%d0 # scale = BIAS + (-exp)
+ rts
+
+stzd_denorm:
+ lea FP_SCR1(%a6),%a0 # pass ptr to dst op
+ bsr.l norm # normalize denorm
+ neg.l %d0 # new exponent = -(shft val)
+ mov.l %d0,%d1 # prepare for op_norm call
+ bra.b stzd_norm # finish scaling
+
+##########################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# res_qnan(): return default result w/ QNAN operand for dyadic #
+# res_snan(): return default result w/ SNAN operand for dyadic #
+# res_qnan_1op(): return dflt result w/ QNAN operand for monadic #
+# res_snan_1op(): return dflt result w/ SNAN operand for monadic #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# FP_SRC(a6) = pointer to extended precision src operand #
+# FP_DST(a6) = pointer to extended precision dst operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default result #
+# #
+# ALGORITHM *********************************************************** #
+# If either operand (but not both operands) of an operation is a #
+# nonsignalling NAN, then that NAN is returned as the result. If both #
+# operands are nonsignalling NANs, then the destination operand #
+# nonsignalling NAN is returned as the result. #
+# If either operand to an operation is a signalling NAN (SNAN), #
+# then, the SNAN bit is set in the FPSR EXC byte. If the SNAN trap #
+# enable bit is set in the FPCR, then the trap is taken and the #
+# destination is not modified. If the SNAN trap enable bit is not set, #
+# then the SNAN is converted to a nonsignalling NAN (by setting the #
+# SNAN bit in the operand to one), and the operation continues as #
+# described in the preceding paragraph, for nonsignalling NANs. #
+# Make sure the appropriate FPSR bits are set before exiting. #
+# #
+#########################################################################
+
+ global res_qnan
+ global res_snan
+res_qnan:
+res_snan:
+ cmp.b DTAG(%a6), &SNAN # is the dst an SNAN?
+ beq.b dst_snan2
+ cmp.b DTAG(%a6), &QNAN # is the dst a QNAN?
+ beq.b dst_qnan2
+src_nan:
+ cmp.b STAG(%a6), &QNAN
+ beq.b src_qnan2
+ global res_snan_1op
+res_snan_1op:
+src_snan2:
+ bset &0x6, FP_SRC_HI(%a6) # set SNAN bit
+ or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+ lea FP_SRC(%a6), %a0
+ bra.b nan_comp
+ global res_qnan_1op
+res_qnan_1op:
+src_qnan2:
+ or.l &nan_mask, USER_FPSR(%a6)
+ lea FP_SRC(%a6), %a0
+ bra.b nan_comp
+dst_snan2:
+ or.l &nan_mask+aiop_mask+snan_mask, USER_FPSR(%a6)
+ bset &0x6, FP_DST_HI(%a6) # set SNAN bit
+ lea FP_DST(%a6), %a0
+ bra.b nan_comp
+dst_qnan2:
+ lea FP_DST(%a6), %a0
+ cmp.b STAG(%a6), &SNAN
+ bne nan_done
+ or.l &aiop_mask+snan_mask, USER_FPSR(%a6)
+nan_done:
+ or.l &nan_mask, USER_FPSR(%a6)
+nan_comp:
+ btst &0x7, FTEMP_EX(%a0) # is NAN neg?
+ beq.b nan_not_neg
+ or.l &neg_mask, USER_FPSR(%a6)
+nan_not_neg:
+ fmovm.x (%a0), &0x80
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# res_operr(): return default result during operand error #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = default operand error result #
+# #
+# ALGORITHM *********************************************************** #
+# An nonsignalling NAN is returned as the default result when #
+# an operand error occurs for the following cases: #
+# #
+# Multiply: (Infinity x Zero) #
+# Divide : (Zero / Zero) || (Infinity / Infinity) #
+# #
+#########################################################################
+
+ global res_operr
+res_operr:
+ or.l &nan_mask+operr_mask+aiop_mask, USER_FPSR(%a6)
+ fmovm.x nan_return(%pc), &0x80
+ rts
+
+nan_return:
+ long 0x7fff0000, 0xffffffff, 0xffffffff
+
+#########################################################################
+# XDEF **************************************************************** #
+# _denorm(): denormalize an intermediate result #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = points to the operand to be denormalized #
+# (in the internal extended format) #
+# #
+# d0 = rounding precision #
+# #
+# OUTPUT ************************************************************** #
+# a0 = pointer to the denormalized result #
+# (in the internal extended format) #
+# #
+# d0 = guard,round,sticky #
+# #
+# ALGORITHM *********************************************************** #
+# According to the exponent underflow threshold for the given #
+# precision, shift the mantissa bits to the right in order raise the #
+# exponent of the operand to the threshold value. While shifting the #
+# mantissa bits right, maintain the value of the guard, round, and #
+# sticky bits. #
+# other notes: #
+# (1) _denorm() is called by the underflow routines #
+# (2) _denorm() does NOT affect the status register #
+# #
+#########################################################################
+
+#
+# table of exponent threshold values for each precision
+#
+tbl_thresh:
+ short 0x0
+ short sgl_thresh
+ short dbl_thresh
+
+ global _denorm
+_denorm:
+#
+# Load the exponent threshold for the precision selected and check
+# to see if (threshold - exponent) is > 65 in which case we can
+# simply calculate the sticky bit and zero the mantissa. otherwise
+# we have to call the denormalization routine.
+#
+ lsr.b &0x2, %d0 # shift prec to lo bits
+ mov.w (tbl_thresh.b,%pc,%d0.w*2), %d1 # load prec threshold
+ mov.w %d1, %d0 # copy d1 into d0
+ sub.w FTEMP_EX(%a0), %d0 # diff = threshold - exp
+ cmpi.w %d0, &66 # is diff > 65? (mant + g,r bits)
+ bpl.b denorm_set_stky # yes; just calc sticky
+
+ clr.l %d0 # clear g,r,s
+ btst &inex2_bit, FPSR_EXCEPT(%a6) # yes; was INEX2 set?
+ beq.b denorm_call # no; don't change anything
+ bset &29, %d0 # yes; set sticky bit
+
+denorm_call:
+ bsr.l dnrm_lp # denormalize the number
+ rts
+
+#
+# all bit would have been shifted off during the denorm so simply
+# calculate if the sticky should be set and clear the entire mantissa.
+#
+denorm_set_stky:
+ mov.l &0x20000000, %d0 # set sticky bit in return value
+ mov.w %d1, FTEMP_EX(%a0) # load exp with threshold
+ clr.l FTEMP_HI(%a0) # set d1 = 0 (ms mantissa)
+ clr.l FTEMP_LO(%a0) # set d2 = 0 (ms mantissa)
+ rts
+
+# #
+# dnrm_lp(): normalize exponent/mantissa to specified threshhold #
+# #
+# INPUT: #
+# %a0 : points to the operand to be denormalized #
+# %d0{31:29} : initial guard,round,sticky #
+# %d1{15:0} : denormalization threshold #
+# OUTPUT: #
+# %a0 : points to the denormalized operand #
+# %d0{31:29} : final guard,round,sticky #
+# #
+
+# *** Local Equates *** #
+set GRS, L_SCR2 # g,r,s temp storage
+set FTEMP_LO2, L_SCR1 # FTEMP_LO copy
+
+ global dnrm_lp
+dnrm_lp:
+
+#
+# make a copy of FTEMP_LO and place the g,r,s bits directly after it
+# in memory so as to make the bitfield extraction for denormalization easier.
+#
+ mov.l FTEMP_LO(%a0), FTEMP_LO2(%a6) # make FTEMP_LO copy
+ mov.l %d0, GRS(%a6) # place g,r,s after it
+
+#
+# check to see how much less than the underflow threshold the operand
+# exponent is.
+#
+ mov.l %d1, %d0 # copy the denorm threshold
+ sub.w FTEMP_EX(%a0), %d1 # d1 = threshold - uns exponent
+ ble.b dnrm_no_lp # d1 <= 0
+ cmpi.w %d1, &0x20 # is ( 0 <= d1 < 32) ?
+ blt.b case_1 # yes
+ cmpi.w %d1, &0x40 # is (32 <= d1 < 64) ?
+ blt.b case_2 # yes
+ bra.w case_3 # (d1 >= 64)
+
+#
+# No normalization necessary
+#
+dnrm_no_lp:
+ mov.l GRS(%a6), %d0 # restore original g,r,s
+ rts
+
+#
+# case (0<d1<32)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+# ---------------------------------------------------------
+# | FTEMP_HI | FTEMP_LO |grs000.........000|
+# ---------------------------------------------------------
+# <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# \ \ \ \
+# <-(n)-><-(32 - n)-><------(32)-------><------(32)------->
+# ---------------------------------------------------------
+# |0.....0| NEW_HI | NEW_FTEMP_LO |grs |
+# ---------------------------------------------------------
+#
+case_1:
+ mov.l %d2, -(%sp) # create temp storage
+
+ mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
+ mov.l &32, %d0
+ sub.w %d1, %d0 # %d0 = 32 - %d1
+
+ cmpi.w %d1, &29 # is shft amt >= 29
+ blt.b case1_extract # no; no fix needed
+ mov.b GRS(%a6), %d2
+ or.b %d2, 3+FTEMP_LO2(%a6)
+
+case1_extract:
+ bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_HI
+ bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new FTEMP_LO
+ bfextu FTEMP_LO2(%a6){%d0:&32}, %d0 # %d0 = new G,R,S
+
+ mov.l %d2, FTEMP_HI(%a0) # store new FTEMP_HI
+ mov.l %d1, FTEMP_LO(%a0) # store new FTEMP_LO
+
+ bftst %d0{&2:&30} # were bits shifted off?
+ beq.b case1_sticky_clear # no; go finish
+ bset &rnd_stky_bit, %d0 # yes; set sticky bit
+
+case1_sticky_clear:
+ and.l &0xe0000000, %d0 # clear all but G,R,S
+ mov.l (%sp)+, %d2 # restore temp register
+ rts
+
+#
+# case (32<=d1<64)
+#
+# %d0 = denorm threshold
+# %d1 = "n" = amt to shift
+#
+# ---------------------------------------------------------
+# | FTEMP_HI | FTEMP_LO |grs000.........000|
+# ---------------------------------------------------------
+# <-(32 - n)-><-(n)-><-(32 - n)-><-(n)-><-(32 - n)-><-(n)->
+# \ \ \
+# \ \ \
+# \ \ -------------------
+# \ -------------------- \
+# ------------------- \ \
+# \ \ \
+# \ \ \
+# \ \ \
+# <-------(32)------><-(n)-><-(32 - n)-><------(32)------->
+# ---------------------------------------------------------
+# |0...............0|0....0| NEW_LO |grs |
+# ---------------------------------------------------------
+#
+case_2:
+ mov.l %d2, -(%sp) # create temp storage
+
+ mov.w %d0, FTEMP_EX(%a0) # exponent = denorm threshold
+ subi.w &0x20, %d1 # %d1 now between 0 and 32
+ mov.l &0x20, %d0
+ sub.w %d1, %d0 # %d0 = 32 - %d1
+
+# subtle step here; or in the g,r,s at the bottom of FTEMP_LO to minimize
+# the number of bits to check for the sticky detect.
+# it only plays a role in shift amounts of 61-63.
+ mov.b GRS(%a6), %d2
+ or.b %d2, 3+FTEMP_LO2(%a6)
+
+ bfextu FTEMP_HI(%a0){&0:%d0}, %d2 # %d2 = new FTEMP_LO
+ bfextu FTEMP_HI(%a0){%d0:&32}, %d1 # %d1 = new G,R,S
+
+ bftst %d1{&2:&30} # were any bits shifted off?
+ bne.b case2_set_sticky # yes; set sticky bit
+ bftst FTEMP_LO2(%a6){%d0:&31} # were any bits shifted off?
+ bne.b case2_set_sticky # yes; set sticky bit
+
+ mov.l %d1, %d0 # move new G,R,S to %d0
+ bra.b case2_end
+
+case2_set_sticky:
+ mov.l %d1, %d0 # move new G,R,S to %d0
+ bset &rnd_stky_bit, %d0 # set sticky bit
+
+case2_end:
+ clr.l FTEMP_HI(%a0) # store FTEMP_HI = 0
+ mov.l %d2, FTEMP_LO(%a0) # store FTEMP_LO
+ and.l &0xe0000000, %d0 # clear all but G,R,S
+
+ mov.l (%sp)+,%d2 # restore temp register
+ rts
+
+#
+# case (d1>=64)
+#
+# %d0 = denorm threshold
+# %d1 = amt to shift
+#
+case_3:
+ mov.w %d0, FTEMP_EX(%a0) # insert denorm threshold
+
+ cmpi.w %d1, &65 # is shift amt > 65?
+ blt.b case3_64 # no; it's == 64
+ beq.b case3_65 # no; it's == 65
+
+#
+# case (d1>65)
+#
+# Shift value is > 65 and out of range. All bits are shifted off.
+# Return a zero mantissa with the sticky bit set
+#
+ clr.l FTEMP_HI(%a0) # clear hi(mantissa)
+ clr.l FTEMP_LO(%a0) # clear lo(mantissa)
+ mov.l &0x20000000, %d0 # set sticky bit
+ rts
+
+#
+# case (d1 == 64)
+#
+# ---------------------------------------------------------
+# | FTEMP_HI | FTEMP_LO |grs000.........000|
+# ---------------------------------------------------------
+# <-------(32)------>
+# \ \
+# \ \
+# \ \
+# \ ------------------------------
+# ------------------------------- \
+# \ \
+# \ \
+# \ \
+# <-------(32)------>
+# ---------------------------------------------------------
+# |0...............0|0................0|grs |
+# ---------------------------------------------------------
+#
+case3_64:
+ mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
+ mov.l %d0, %d1 # make a copy
+ and.l &0xc0000000, %d0 # extract G,R
+ and.l &0x3fffffff, %d1 # extract other bits
+
+ bra.b case3_complete
+
+#
+# case (d1 == 65)
+#
+# ---------------------------------------------------------
+# | FTEMP_HI | FTEMP_LO |grs000.........000|
+# ---------------------------------------------------------
+# <-------(32)------>
+# \ \
+# \ \
+# \ \
+# \ ------------------------------
+# -------------------------------- \
+# \ \
+# \ \
+# \ \
+# <-------(31)----->
+# ---------------------------------------------------------
+# |0...............0|0................0|0rs |
+# ---------------------------------------------------------
+#
+case3_65:
+ mov.l FTEMP_HI(%a0), %d0 # fetch hi(mantissa)
+ and.l &0x80000000, %d0 # extract R bit
+ lsr.l &0x1, %d0 # shift high bit into R bit
+ and.l &0x7fffffff, %d1 # extract other bits
+
+case3_complete:
+# last operation done was an "and" of the bits shifted off so the condition
+# codes are already set so branch accordingly.
+ bne.b case3_set_sticky # yes; go set new sticky
+ tst.l FTEMP_LO(%a0) # were any bits shifted off?
+ bne.b case3_set_sticky # yes; go set new sticky
+ tst.b GRS(%a6) # were any bits shifted off?
+ bne.b case3_set_sticky # yes; go set new sticky
+
+#
+# no bits were shifted off so don't set the sticky bit.
+# the guard and
+# the entire mantissa is zero.
+#
+ clr.l FTEMP_HI(%a0) # clear hi(mantissa)
+ clr.l FTEMP_LO(%a0) # clear lo(mantissa)
+ rts
+
+#
+# some bits were shifted off so set the sticky bit.
+# the entire mantissa is zero.
+#
+case3_set_sticky:
+ bset &rnd_stky_bit,%d0 # set new sticky bit
+ clr.l FTEMP_HI(%a0) # clear hi(mantissa)
+ clr.l FTEMP_LO(%a0) # clear lo(mantissa)
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# _round(): round result according to precision/mode #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = ptr to input operand in internal extended format #
+# d1(hi) = contains rounding precision: #
+# ext = $0000xxxx #
+# sgl = $0004xxxx #
+# dbl = $0008xxxx #
+# d1(lo) = contains rounding mode: #
+# RN = $xxxx0000 #
+# RZ = $xxxx0001 #
+# RM = $xxxx0002 #
+# RP = $xxxx0003 #
+# d0{31:29} = contains the g,r,s bits (extended) #
+# #
+# OUTPUT ************************************************************** #
+# a0 = pointer to rounded result #
+# #
+# ALGORITHM *********************************************************** #
+# On return the value pointed to by a0 is correctly rounded, #
+# a0 is preserved and the g-r-s bits in d0 are cleared. #
+# The result is not typed - the tag field is invalid. The #
+# result is still in the internal extended format. #
+# #
+# The INEX bit of USER_FPSR will be set if the rounded result was #
+# inexact (i.e. if any of the g-r-s bits were set). #
+# #
+#########################################################################
+
+ global _round
+_round:
+#
+# ext_grs() looks at the rounding precision and sets the appropriate
+# G,R,S bits.
+# If (G,R,S == 0) then result is exact and round is done, else set
+# the inex flag in status reg and continue.
+#
+ bsr.l ext_grs # extract G,R,S
+
+ tst.l %d0 # are G,R,S zero?
+ beq.w truncate # yes; round is complete
+
+ or.w &inx2a_mask, 2+USER_FPSR(%a6) # set inex2/ainex
+
+#
+# Use rounding mode as an index into a jump table for these modes.
+# All of the following assumes grs != 0.
+#
+ mov.w (tbl_mode.b,%pc,%d1.w*2), %a1 # load jump offset
+ jmp (tbl_mode.b,%pc,%a1) # jmp to rnd mode handler
+
+tbl_mode:
+ short rnd_near - tbl_mode
+ short truncate - tbl_mode # RZ always truncates
+ short rnd_mnus - tbl_mode
+ short rnd_plus - tbl_mode
+
+#################################################################
+# ROUND PLUS INFINITY #
+# #
+# If sign of fp number = 0 (positive), then add 1 to l. #
+#################################################################
+rnd_plus:
+ tst.b FTEMP_SGN(%a0) # check for sign
+ bmi.w truncate # if positive then truncate
+
+ mov.l &0xffffffff, %d0 # force g,r,s to be all f's
+ swap %d1 # set up d1 for round prec.
+
+ cmpi.b %d1, &s_mode # is prec = sgl?
+ beq.w add_sgl # yes
+ bgt.w add_dbl # no; it's dbl
+ bra.w add_ext # no; it's ext
+
+#################################################################
+# ROUND MINUS INFINITY #
+# #
+# If sign of fp number = 1 (negative), then add 1 to l. #
+#################################################################
+rnd_mnus:
+ tst.b FTEMP_SGN(%a0) # check for sign
+ bpl.w truncate # if negative then truncate
+
+ mov.l &0xffffffff, %d0 # force g,r,s to be all f's
+ swap %d1 # set up d1 for round prec.
+
+ cmpi.b %d1, &s_mode # is prec = sgl?
+ beq.w add_sgl # yes
+ bgt.w add_dbl # no; it's dbl
+ bra.w add_ext # no; it's ext
+
+#################################################################
+# ROUND NEAREST #
+# #
+# If (g=1), then add 1 to l and if (r=s=0), then clear l #
+# Note that this will round to even in case of a tie. #
+#################################################################
+rnd_near:
+ asl.l &0x1, %d0 # shift g-bit to c-bit
+ bcc.w truncate # if (g=1) then
+
+ swap %d1 # set up d1 for round prec.
+
+ cmpi.b %d1, &s_mode # is prec = sgl?
+ beq.w add_sgl # yes
+ bgt.w add_dbl # no; it's dbl
+ bra.w add_ext # no; it's ext
+
+# *** LOCAL EQUATES ***
+set ad_1_sgl, 0x00000100 # constant to add 1 to l-bit in sgl prec
+set ad_1_dbl, 0x00000800 # constant to add 1 to l-bit in dbl prec
+
+#########################
+# ADD SINGLE #
+#########################
+add_sgl:
+ add.l &ad_1_sgl, FTEMP_HI(%a0)
+ bcc.b scc_clr # no mantissa overflow
+ roxr.w FTEMP_HI(%a0) # shift v-bit back in
+ roxr.w FTEMP_HI+2(%a0) # shift v-bit back in
+ add.w &0x1, FTEMP_EX(%a0) # and incr exponent
+scc_clr:
+ tst.l %d0 # test for rs = 0
+ bne.b sgl_done
+ and.w &0xfe00, FTEMP_HI+2(%a0) # clear the l-bit
+sgl_done:
+ and.l &0xffffff00, FTEMP_HI(%a0) # truncate bits beyond sgl limit
+ clr.l FTEMP_LO(%a0) # clear d2
+ rts
+
+#########################
+# ADD EXTENDED #
+#########################
+add_ext:
+ addq.l &1,FTEMP_LO(%a0) # add 1 to l-bit
+ bcc.b xcc_clr # test for carry out
+ addq.l &1,FTEMP_HI(%a0) # propagate carry
+ bcc.b xcc_clr
+ roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
+ roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
+ roxr.w FTEMP_LO(%a0)
+ roxr.w FTEMP_LO+2(%a0)
+ add.w &0x1,FTEMP_EX(%a0) # and inc exp
+xcc_clr:
+ tst.l %d0 # test rs = 0
+ bne.b add_ext_done
+ and.b &0xfe,FTEMP_LO+3(%a0) # clear the l bit
+add_ext_done:
+ rts
+
+#########################
+# ADD DOUBLE #
+#########################
+add_dbl:
+ add.l &ad_1_dbl, FTEMP_LO(%a0) # add 1 to lsb
+ bcc.b dcc_clr # no carry
+ addq.l &0x1, FTEMP_HI(%a0) # propagate carry
+ bcc.b dcc_clr # no carry
+
+ roxr.w FTEMP_HI(%a0) # mant is 0 so restore v-bit
+ roxr.w FTEMP_HI+2(%a0) # mant is 0 so restore v-bit
+ roxr.w FTEMP_LO(%a0)
+ roxr.w FTEMP_LO+2(%a0)
+ addq.w &0x1, FTEMP_EX(%a0) # incr exponent
+dcc_clr:
+ tst.l %d0 # test for rs = 0
+ bne.b dbl_done
+ and.w &0xf000, FTEMP_LO+2(%a0) # clear the l-bit
+
+dbl_done:
+ and.l &0xfffff800,FTEMP_LO(%a0) # truncate bits beyond dbl limit
+ rts
+
+###########################
+# Truncate all other bits #
+###########################
+truncate:
+ swap %d1 # select rnd prec
+
+ cmpi.b %d1, &s_mode # is prec sgl?
+ beq.w sgl_done # yes
+ bgt.b dbl_done # no; it's dbl
+ rts # no; it's ext
+
+
+#
+# ext_grs(): extract guard, round and sticky bits according to
+# rounding precision.
+#
+# INPUT
+# d0 = extended precision g,r,s (in d0{31:29})
+# d1 = {PREC,ROUND}
+# OUTPUT
+# d0{31:29} = guard, round, sticky
+#
+# The ext_grs extract the guard/round/sticky bits according to the
+# selected rounding precision. It is called by the round subroutine
+# only. All registers except d0 are kept intact. d0 becomes an
+# updated guard,round,sticky in d0{31:29}
+#
+# Notes: the ext_grs uses the round PREC, and therefore has to swap d1
+# prior to usage, and needs to restore d1 to original. this
+# routine is tightly tied to the round routine and not meant to
+# uphold standard subroutine calling practices.
+#
+
+ext_grs:
+ swap %d1 # have d1.w point to round precision
+ tst.b %d1 # is rnd prec = extended?
+ bne.b ext_grs_not_ext # no; go handle sgl or dbl
+
+#
+# %d0 actually already hold g,r,s since _round() had it before calling
+# this function. so, as long as we don't disturb it, we are "returning" it.
+#
+ext_grs_ext:
+ swap %d1 # yes; return to correct positions
+ rts
+
+ext_grs_not_ext:
+ movm.l &0x3000, -(%sp) # make some temp registers {d2/d3}
+
+ cmpi.b %d1, &s_mode # is rnd prec = sgl?
+ bne.b ext_grs_dbl # no; go handle dbl
+
+#
+# sgl:
+# 96 64 40 32 0
+# -----------------------------------------------------
+# | EXP |XXXXXXX| |xx | |grs|
+# -----------------------------------------------------
+# <--(24)--->nn\ /
+# ee ---------------------
+# ww |
+# v
+# gr new sticky
+#
+ext_grs_sgl:
+ bfextu FTEMP_HI(%a0){&24:&2}, %d3 # sgl prec. g-r are 2 bits right
+ mov.l &30, %d2 # of the sgl prec. limits
+ lsl.l %d2, %d3 # shift g-r bits to MSB of d3
+ mov.l FTEMP_HI(%a0), %d2 # get word 2 for s-bit test
+ and.l &0x0000003f, %d2 # s bit is the or of all other
+ bne.b ext_grs_st_stky # bits to the right of g-r
+ tst.l FTEMP_LO(%a0) # test lower mantissa
+ bne.b ext_grs_st_stky # if any are set, set sticky
+ tst.l %d0 # test original g,r,s
+ bne.b ext_grs_st_stky # if any are set, set sticky
+ bra.b ext_grs_end_sd # if words 3 and 4 are clr, exit
+
+#
+# dbl:
+# 96 64 32 11 0
+# -----------------------------------------------------
+# | EXP |XXXXXXX| | |xx |grs|
+# -----------------------------------------------------
+# nn\ /
+# ee -------
+# ww |
+# v
+# gr new sticky
+#
+ext_grs_dbl:
+ bfextu FTEMP_LO(%a0){&21:&2}, %d3 # dbl-prec. g-r are 2 bits right
+ mov.l &30, %d2 # of the dbl prec. limits
+ lsl.l %d2, %d3 # shift g-r bits to the MSB of d3
+ mov.l FTEMP_LO(%a0), %d2 # get lower mantissa for s-bit test
+ and.l &0x000001ff, %d2 # s bit is the or-ing of all
+ bne.b ext_grs_st_stky # other bits to the right of g-r
+ tst.l %d0 # test word original g,r,s
+ bne.b ext_grs_st_stky # if any are set, set sticky
+ bra.b ext_grs_end_sd # if clear, exit
+
+ext_grs_st_stky:
+ bset &rnd_stky_bit, %d3 # set sticky bit
+ext_grs_end_sd:
+ mov.l %d3, %d0 # return grs to d0
+
+ movm.l (%sp)+, &0xc # restore scratch registers {d2/d3}
+
+ swap %d1 # restore d1 to original
+ rts
+
+#########################################################################
+# norm(): normalize the mantissa of an extended precision input. the #
+# input operand should not be normalized already. #
+# #
+# XDEF **************************************************************** #
+# norm() #
+# #
+# XREF **************************************************************** #
+# none #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer fp extended precision operand to normalize #
+# #
+# OUTPUT ************************************************************** #
+# d0 = number of bit positions the mantissa was shifted #
+# a0 = the input operand's mantissa is normalized; the exponent #
+# is unchanged. #
+# #
+#########################################################################
+ global norm
+norm:
+ mov.l %d2, -(%sp) # create some temp regs
+ mov.l %d3, -(%sp)
+
+ mov.l FTEMP_HI(%a0), %d0 # load hi(mantissa)
+ mov.l FTEMP_LO(%a0), %d1 # load lo(mantissa)
+
+ bfffo %d0{&0:&32}, %d2 # how many places to shift?
+ beq.b norm_lo # hi(man) is all zeroes!
+
+norm_hi:
+ lsl.l %d2, %d0 # left shift hi(man)
+ bfextu %d1{&0:%d2}, %d3 # extract lo bits
+
+ or.l %d3, %d0 # create hi(man)
+ lsl.l %d2, %d1 # create lo(man)
+
+ mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
+ mov.l %d1, FTEMP_LO(%a0) # store new lo(man)
+
+ mov.l %d2, %d0 # return shift amount
+
+ mov.l (%sp)+, %d3 # restore temp regs
+ mov.l (%sp)+, %d2
+
+ rts
+
+norm_lo:
+ bfffo %d1{&0:&32}, %d2 # how many places to shift?
+ lsl.l %d2, %d1 # shift lo(man)
+ add.l &32, %d2 # add 32 to shft amount
+
+ mov.l %d1, FTEMP_HI(%a0) # store hi(man)
+ clr.l FTEMP_LO(%a0) # lo(man) is now zero
+
+ mov.l %d2, %d0 # return shift amount
+
+ mov.l (%sp)+, %d3 # restore temp regs
+ mov.l (%sp)+, %d2
+
+ rts
+
+#########################################################################
+# unnorm_fix(): - changes an UNNORM to one of NORM, DENORM, or ZERO #
+# - returns corresponding optype tag #
+# #
+# XDEF **************************************************************** #
+# unnorm_fix() #
+# #
+# XREF **************************************************************** #
+# norm() - normalize the mantissa #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to unnormalized extended precision number #
+# #
+# OUTPUT ************************************************************** #
+# d0 = optype tag - is corrected to one of NORM, DENORM, or ZERO #
+# a0 = input operand has been converted to a norm, denorm, or #
+# zero; both the exponent and mantissa are changed. #
+# #
+#########################################################################
+
+ global unnorm_fix
+unnorm_fix:
+ bfffo FTEMP_HI(%a0){&0:&32}, %d0 # how many shifts are needed?
+ bne.b unnorm_shift # hi(man) is not all zeroes
+
+#
+# hi(man) is all zeroes so see if any bits in lo(man) are set
+#
+unnorm_chk_lo:
+ bfffo FTEMP_LO(%a0){&0:&32}, %d0 # is operand really a zero?
+ beq.w unnorm_zero # yes
+
+ add.w &32, %d0 # no; fix shift distance
+
+#
+# d0 = # shifts needed for complete normalization
+#
+unnorm_shift:
+ clr.l %d1 # clear top word
+ mov.w FTEMP_EX(%a0), %d1 # extract exponent
+ and.w &0x7fff, %d1 # strip off sgn
+
+ cmp.w %d0, %d1 # will denorm push exp < 0?
+ bgt.b unnorm_nrm_zero # yes; denorm only until exp = 0
+
+#
+# exponent would not go < 0. therefore, number stays normalized
+#
+ sub.w %d0, %d1 # shift exponent value
+ mov.w FTEMP_EX(%a0), %d0 # load old exponent
+ and.w &0x8000, %d0 # save old sign
+ or.w %d0, %d1 # {sgn,new exp}
+ mov.w %d1, FTEMP_EX(%a0) # insert new exponent
+
+ bsr.l norm # normalize UNNORM
+
+ mov.b &NORM, %d0 # return new optype tag
+ rts
+
+#
+# exponent would go < 0, so only denormalize until exp = 0
+#
+unnorm_nrm_zero:
+ cmp.b %d1, &32 # is exp <= 32?
+ bgt.b unnorm_nrm_zero_lrg # no; go handle large exponent
+
+ bfextu FTEMP_HI(%a0){%d1:&32}, %d0 # extract new hi(man)
+ mov.l %d0, FTEMP_HI(%a0) # save new hi(man)
+
+ mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
+ lsl.l %d1, %d0 # extract new lo(man)
+ mov.l %d0, FTEMP_LO(%a0) # save new lo(man)
+
+ and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
+
+ mov.b &DENORM, %d0 # return new optype tag
+ rts
+
+#
+# only mantissa bits set are in lo(man)
+#
+unnorm_nrm_zero_lrg:
+ sub.w &32, %d1 # adjust shft amt by 32
+
+ mov.l FTEMP_LO(%a0), %d0 # fetch old lo(man)
+ lsl.l %d1, %d0 # left shift lo(man)
+
+ mov.l %d0, FTEMP_HI(%a0) # store new hi(man)
+ clr.l FTEMP_LO(%a0) # lo(man) = 0
+
+ and.w &0x8000, FTEMP_EX(%a0) # set exp = 0
+
+ mov.b &DENORM, %d0 # return new optype tag
+ rts
+
+#
+# whole mantissa is zero so this UNNORM is actually a zero
+#
+unnorm_zero:
+ and.w &0x8000, FTEMP_EX(%a0) # force exponent to zero
+
+ mov.b &ZERO, %d0 # fix optype tag
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# set_tag_x(): return the optype of the input ext fp number #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision operand #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of type tag #
+# one of: NORM, INF, QNAN, SNAN, DENORM, UNNORM, ZERO #
+# #
+# ALGORITHM *********************************************************** #
+# Simply test the exponent, j-bit, and mantissa values to #
+# determine the type of operand. #
+# If it's an unnormalized zero, alter the operand and force it #
+# to be a normal zero. #
+# #
+#########################################################################
+
+ global set_tag_x
+set_tag_x:
+ mov.w FTEMP_EX(%a0), %d0 # extract exponent
+ andi.w &0x7fff, %d0 # strip off sign
+ cmpi.w %d0, &0x7fff # is (EXP == MAX)?
+ beq.b inf_or_nan_x
+not_inf_or_nan_x:
+ btst &0x7,FTEMP_HI(%a0)
+ beq.b not_norm_x
+is_norm_x:
+ mov.b &NORM, %d0
+ rts
+not_norm_x:
+ tst.w %d0 # is exponent = 0?
+ bne.b is_unnorm_x
+not_unnorm_x:
+ tst.l FTEMP_HI(%a0)
+ bne.b is_denorm_x
+ tst.l FTEMP_LO(%a0)
+ bne.b is_denorm_x
+is_zero_x:
+ mov.b &ZERO, %d0
+ rts
+is_denorm_x:
+ mov.b &DENORM, %d0
+ rts
+# must distinguish now "Unnormalized zeroes" which we
+# must convert to zero.
+is_unnorm_x:
+ tst.l FTEMP_HI(%a0)
+ bne.b is_unnorm_reg_x
+ tst.l FTEMP_LO(%a0)
+ bne.b is_unnorm_reg_x
+# it's an "unnormalized zero". let's convert it to an actual zero...
+ andi.w &0x8000,FTEMP_EX(%a0) # clear exponent
+ mov.b &ZERO, %d0
+ rts
+is_unnorm_reg_x:
+ mov.b &UNNORM, %d0
+ rts
+inf_or_nan_x:
+ tst.l FTEMP_LO(%a0)
+ bne.b is_nan_x
+ mov.l FTEMP_HI(%a0), %d0
+ and.l &0x7fffffff, %d0 # msb is a don't care!
+ bne.b is_nan_x
+is_inf_x:
+ mov.b &INF, %d0
+ rts
+is_nan_x:
+ btst &0x6, FTEMP_HI(%a0)
+ beq.b is_snan_x
+ mov.b &QNAN, %d0
+ rts
+is_snan_x:
+ mov.b &SNAN, %d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# set_tag_d(): return the optype of the input dbl fp number #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = points to double precision operand #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of type tag #
+# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
+# #
+# ALGORITHM *********************************************************** #
+# Simply test the exponent, j-bit, and mantissa values to #
+# determine the type of operand. #
+# #
+#########################################################################
+
+ global set_tag_d
+set_tag_d:
+ mov.l FTEMP(%a0), %d0
+ mov.l %d0, %d1
+
+ andi.l &0x7ff00000, %d0
+ beq.b zero_or_denorm_d
+
+ cmpi.l %d0, &0x7ff00000
+ beq.b inf_or_nan_d
+
+is_norm_d:
+ mov.b &NORM, %d0
+ rts
+zero_or_denorm_d:
+ and.l &0x000fffff, %d1
+ bne is_denorm_d
+ tst.l 4+FTEMP(%a0)
+ bne is_denorm_d
+is_zero_d:
+ mov.b &ZERO, %d0
+ rts
+is_denorm_d:
+ mov.b &DENORM, %d0
+ rts
+inf_or_nan_d:
+ and.l &0x000fffff, %d1
+ bne is_nan_d
+ tst.l 4+FTEMP(%a0)
+ bne is_nan_d
+is_inf_d:
+ mov.b &INF, %d0
+ rts
+is_nan_d:
+ btst &19, %d1
+ bne is_qnan_d
+is_snan_d:
+ mov.b &SNAN, %d0
+ rts
+is_qnan_d:
+ mov.b &QNAN, %d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# set_tag_s(): return the optype of the input sgl fp number #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to single precision operand #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of type tag #
+# one of: NORM, INF, QNAN, SNAN, DENORM, ZERO #
+# #
+# ALGORITHM *********************************************************** #
+# Simply test the exponent, j-bit, and mantissa values to #
+# determine the type of operand. #
+# #
+#########################################################################
+
+ global set_tag_s
+set_tag_s:
+ mov.l FTEMP(%a0), %d0
+ mov.l %d0, %d1
+
+ andi.l &0x7f800000, %d0
+ beq.b zero_or_denorm_s
+
+ cmpi.l %d0, &0x7f800000
+ beq.b inf_or_nan_s
+
+is_norm_s:
+ mov.b &NORM, %d0
+ rts
+zero_or_denorm_s:
+ and.l &0x007fffff, %d1
+ bne is_denorm_s
+is_zero_s:
+ mov.b &ZERO, %d0
+ rts
+is_denorm_s:
+ mov.b &DENORM, %d0
+ rts
+inf_or_nan_s:
+ and.l &0x007fffff, %d1
+ bne is_nan_s
+is_inf_s:
+ mov.b &INF, %d0
+ rts
+is_nan_s:
+ btst &22, %d1
+ bne is_qnan_s
+is_snan_s:
+ mov.b &SNAN, %d0
+ rts
+is_qnan_s:
+ mov.b &QNAN, %d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# unf_res(): routine to produce default underflow result of a #
+# scaled extended precision number; this is used by #
+# fadd/fdiv/fmul/etc. emulation routines. #
+# unf_res4(): same as above but for fsglmul/fsgldiv which use #
+# single round prec and extended prec mode. #
+# #
+# XREF **************************************************************** #
+# _denorm() - denormalize according to scale factor #
+# _round() - round denormalized number according to rnd prec #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precison operand #
+# d0 = scale factor #
+# d1 = rounding precision/mode #
+# #
+# OUTPUT ************************************************************** #
+# a0 = pointer to default underflow result in extended precision #
+# d0.b = result FPSR_cc which caller may or may not want to save #
+# #
+# ALGORITHM *********************************************************** #
+# Convert the input operand to "internal format" which means the #
+# exponent is extended to 16 bits and the sign is stored in the unused #
+# portion of the extended precison operand. Denormalize the number #
+# according to the scale factor passed in d0. Then, round the #
+# denormalized result. #
+# Set the FPSR_exc bits as appropriate but return the cc bits in #
+# d0 in case the caller doesn't want to save them (as is the case for #
+# fmove out). #
+# unf_res4() for fsglmul/fsgldiv forces the denorm to extended #
+# precision and the rounding mode to single. #
+# #
+#########################################################################
+ global unf_res
+unf_res:
+ mov.l %d1, -(%sp) # save rnd prec,mode on stack
+
+ btst &0x7, FTEMP_EX(%a0) # make "internal" format
+ sne FTEMP_SGN(%a0)
+
+ mov.w FTEMP_EX(%a0), %d1 # extract exponent
+ and.w &0x7fff, %d1
+ sub.w %d0, %d1
+ mov.w %d1, FTEMP_EX(%a0) # insert 16 bit exponent
+
+ mov.l %a0, -(%sp) # save operand ptr during calls
+
+ mov.l 0x4(%sp),%d0 # pass rnd prec.
+ andi.w &0x00c0,%d0
+ lsr.w &0x4,%d0
+ bsr.l _denorm # denorm result
+
+ mov.l (%sp),%a0
+ mov.w 0x6(%sp),%d1 # load prec:mode into %d1
+ andi.w &0xc0,%d1 # extract rnd prec
+ lsr.w &0x4,%d1
+ swap %d1
+ mov.w 0x6(%sp),%d1
+ andi.w &0x30,%d1
+ lsr.w &0x4,%d1
+ bsr.l _round # round the denorm
+
+ mov.l (%sp)+, %a0
+
+# result is now rounded properly. convert back to normal format
+ bclr &0x7, FTEMP_EX(%a0) # clear sgn first; may have residue
+ tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
+ beq.b unf_res_chkifzero # no; result is positive
+ bset &0x7, FTEMP_EX(%a0) # set result sgn
+ clr.b FTEMP_SGN(%a0) # clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res_chkifzero:
+ clr.l %d0
+ tst.l FTEMP_HI(%a0) # is value now a zero?
+ bne.b unf_res_cont # no
+ tst.l FTEMP_LO(%a0)
+ bne.b unf_res_cont # no
+# bset &z_bit, FPSR_CC(%a6) # yes; set zero ccode bit
+ bset &z_bit, %d0 # yes; set zero ccode bit
+
+unf_res_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+ btst &inex2_bit, FPSR_EXCEPT(%a6) # is INEX2 set?
+ beq.b unf_res_end # no
+ bset &aunfl_bit, FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res_end:
+ add.l &0x4, %sp # clear stack
+ rts
+
+# unf_res() for fsglmul() and fsgldiv().
+ global unf_res4
+unf_res4:
+ mov.l %d1,-(%sp) # save rnd prec,mode on stack
+
+ btst &0x7,FTEMP_EX(%a0) # make "internal" format
+ sne FTEMP_SGN(%a0)
+
+ mov.w FTEMP_EX(%a0),%d1 # extract exponent
+ and.w &0x7fff,%d1
+ sub.w %d0,%d1
+ mov.w %d1,FTEMP_EX(%a0) # insert 16 bit exponent
+
+ mov.l %a0,-(%sp) # save operand ptr during calls
+
+ clr.l %d0 # force rnd prec = ext
+ bsr.l _denorm # denorm result
+
+ mov.l (%sp),%a0
+ mov.w &s_mode,%d1 # force rnd prec = sgl
+ swap %d1
+ mov.w 0x6(%sp),%d1 # load rnd mode
+ andi.w &0x30,%d1 # extract rnd prec
+ lsr.w &0x4,%d1
+ bsr.l _round # round the denorm
+
+ mov.l (%sp)+,%a0
+
+# result is now rounded properly. convert back to normal format
+ bclr &0x7,FTEMP_EX(%a0) # clear sgn first; may have residue
+ tst.b FTEMP_SGN(%a0) # is "internal result" sign set?
+ beq.b unf_res4_chkifzero # no; result is positive
+ bset &0x7,FTEMP_EX(%a0) # set result sgn
+ clr.b FTEMP_SGN(%a0) # clear temp sign
+
+# the number may have become zero after rounding. set ccodes accordingly.
+unf_res4_chkifzero:
+ clr.l %d0
+ tst.l FTEMP_HI(%a0) # is value now a zero?
+ bne.b unf_res4_cont # no
+ tst.l FTEMP_LO(%a0)
+ bne.b unf_res4_cont # no
+# bset &z_bit,FPSR_CC(%a6) # yes; set zero ccode bit
+ bset &z_bit,%d0 # yes; set zero ccode bit
+
+unf_res4_cont:
+
+#
+# can inex1 also be set along with unfl and inex2???
+#
+# we know that underflow has occurred. aunfl should be set if INEX2 is also set.
+#
+ btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+ beq.b unf_res4_end # no
+ bset &aunfl_bit,FPSR_AEXCEPT(%a6) # yes; set aunfl
+
+unf_res4_end:
+ add.l &0x4,%sp # clear stack
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# ovf_res(): routine to produce the default overflow result of #
+# an overflowing number. #
+# ovf_res2(): same as above but the rnd mode/prec are passed #
+# differently. #
+# #
+# XREF **************************************************************** #
+# none #
+# #
+# INPUT *************************************************************** #
+# d1.b = '-1' => (-); '0' => (+) #
+# ovf_res(): #
+# d0 = rnd mode/prec #
+# ovf_res2(): #
+# hi(d0) = rnd prec #
+# lo(d0) = rnd mode #
+# #
+# OUTPUT ************************************************************** #
+# a0 = points to extended precision result #
+# d0.b = condition code bits #
+# #
+# ALGORITHM *********************************************************** #
+# The default overflow result can be determined by the sign of #
+# the result and the rounding mode/prec in effect. These bits are #
+# concatenated together to create an index into the default result #
+# table. A pointer to the correct result is returned in a0. The #
+# resulting condition codes are returned in d0 in case the caller #
+# doesn't want FPSR_cc altered (as is the case for fmove out). #
+# #
+#########################################################################
+
+ global ovf_res
+ovf_res:
+ andi.w &0x10,%d1 # keep result sign
+ lsr.b &0x4,%d0 # shift prec/mode
+ or.b %d0,%d1 # concat the two
+ mov.w %d1,%d0 # make a copy
+ lsl.b &0x1,%d1 # multiply d1 by 2
+ bra.b ovf_res_load
+
+ global ovf_res2
+ovf_res2:
+ and.w &0x10, %d1 # keep result sign
+ or.b %d0, %d1 # insert rnd mode
+ swap %d0
+ or.b %d0, %d1 # insert rnd prec
+ mov.w %d1, %d0 # make a copy
+ lsl.b &0x1, %d1 # shift left by 1
+
+#
+# use the rounding mode, precision, and result sign as in index into the
+# two tables below to fetch the default result and the result ccodes.
+#
+ovf_res_load:
+ mov.b (tbl_ovfl_cc.b,%pc,%d0.w*1), %d0 # fetch result ccodes
+ lea (tbl_ovfl_result.b,%pc,%d1.w*8), %a0 # return result ptr
+
+ rts
+
+tbl_ovfl_cc:
+ byte 0x2, 0x0, 0x0, 0x2
+ byte 0x2, 0x0, 0x0, 0x2
+ byte 0x2, 0x0, 0x0, 0x2
+ byte 0x0, 0x0, 0x0, 0x0
+ byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
+ byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
+ byte 0x2+0x8, 0x8, 0x2+0x8, 0x8
+
+tbl_ovfl_result:
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+ long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RZ
+ long 0x7ffe0000,0xffffffff,0xffffffff,0x00000000 # +EXT; RM
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+ long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RZ
+ long 0x407e0000,0xffffff00,0x00000000,0x00000000 # +SGL; RM
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RN
+ long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RZ
+ long 0x43fe0000,0xffffffff,0xfffff800,0x00000000 # +DBL; RM
+ long 0x7fff0000,0x00000000,0x00000000,0x00000000 # +INF; RP
+
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+ long 0x00000000,0x00000000,0x00000000,0x00000000
+
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+ long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RZ
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+ long 0xfffe0000,0xffffffff,0xffffffff,0x00000000 # -EXT; RP
+
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+ long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RZ
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+ long 0xc07e0000,0xffffff00,0x00000000,0x00000000 # -SGL; RP
+
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RN
+ long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RZ
+ long 0xffff0000,0x00000000,0x00000000,0x00000000 # -INF; RM
+ long 0xc3fe0000,0xffffffff,0xfffff800,0x00000000 # -DBL; RP
+
+#########################################################################
+# XDEF **************************************************************** #
+# fout(): move from fp register to memory or data register #
+# #
+# XREF **************************************************************** #
+# _round() - needed to create EXOP for sgl/dbl precision #
+# norm() - needed to create EXOP for extended precision #
+# ovf_res() - create default overflow result for sgl/dbl precision#
+# unf_res() - create default underflow result for sgl/dbl prec. #
+# dst_dbl() - create rounded dbl precision result. #
+# dst_sgl() - create rounded sgl precision result. #
+# fetch_dreg() - fetch dynamic k-factor reg for packed. #
+# bindec() - convert FP binary number to packed number. #
+# _mem_write() - write data to memory. #
+# _mem_write2() - write data to memory unless supv mode -(a7) exc.#
+# _dmem_write_{byte,word,long}() - write data to memory. #
+# store_dreg_{b,w,l}() - store data to data register file. #
+# facc_out_{b,w,l,d,x}() - data access error occurred. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = round prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 : intermediate underflow or overflow result if #
+# OVFL/UNFL occurred for a sgl or dbl operand #
+# #
+# ALGORITHM *********************************************************** #
+# This routine is accessed by many handlers that need to do an #
+# opclass three move of an operand out to memory. #
+# Decode an fmove out (opclass 3) instruction to determine if #
+# it's b,w,l,s,d,x, or p in size. b,w,l can be stored to either a data #
+# register or memory. The algorithm uses a standard "fmove" to create #
+# the rounded result. Also, since exceptions are disabled, this also #
+# create the correct OPERR default result if appropriate. #
+# For sgl or dbl precision, overflow or underflow can occur. If #
+# either occurs and is enabled, the EXOP. #
+# For extended precision, the stacked <ea> must be fixed along #
+# w/ the address index register as appropriate w/ _calc_ea_fout(). If #
+# the source is a denorm and if underflow is enabled, an EXOP must be #
+# created. #
+# For packed, the k-factor must be fetched from the instruction #
+# word or a data register. The <ea> must be fixed as w/ extended #
+# precision. Then, bindec() is called to create the appropriate #
+# packed result. #
+# If at any time an access error is flagged by one of the move- #
+# to-memory routines, then a special exit must be made so that the #
+# access error can be handled properly. #
+# #
+#########################################################################
+
+ global fout
+fout:
+ bfextu EXC_CMDREG(%a6){&3:&3},%d1 # extract dst fmt
+ mov.w (tbl_fout.b,%pc,%d1.w*2),%a1 # use as index
+ jmp (tbl_fout.b,%pc,%a1) # jump to routine
+
+ swbeg &0x8
+tbl_fout:
+ short fout_long - tbl_fout
+ short fout_sgl - tbl_fout
+ short fout_ext - tbl_fout
+ short fout_pack - tbl_fout
+ short fout_word - tbl_fout
+ short fout_dbl - tbl_fout
+ short fout_byte - tbl_fout
+ short fout_pack - tbl_fout
+
+#################################################################
+# fmove.b out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_byte:
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_byte_denorm # no
+
+ fmovm.x SRC(%a0),&0x80 # load value
+
+fout_byte_norm:
+ fmov.l %d0,%fpcr # insert rnd prec,mode
+
+ fmov.b %fp0,%d0 # exec move out w/ correct rnd mode
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch FPSR
+ or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_byte_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_byte # write byte
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_b # yes
+
+ rts
+
+fout_byte_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_b
+ rts
+
+fout_byte_denorm:
+ mov.l SRC_EX(%a0),%d1
+ andi.l &0x80000000,%d1 # keep DENORM sign
+ ori.l &0x00800000,%d1 # make smallest sgl
+ fmov.s %d1,%fp0
+ bra.b fout_byte_norm
+
+#################################################################
+# fmove.w out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_word:
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_word_denorm # no
+
+ fmovm.x SRC(%a0),&0x80 # load value
+
+fout_word_norm:
+ fmov.l %d0,%fpcr # insert rnd prec:mode
+
+ fmov.w %fp0,%d0 # exec move out w/ correct rnd mode
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch FPSR
+ or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_word_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_word # write word
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_w # yes
+
+ rts
+
+fout_word_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_w
+ rts
+
+fout_word_denorm:
+ mov.l SRC_EX(%a0),%d1
+ andi.l &0x80000000,%d1 # keep DENORM sign
+ ori.l &0x00800000,%d1 # make smallest sgl
+ fmov.s %d1,%fp0
+ bra.b fout_word_norm
+
+#################################################################
+# fmove.l out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+fout_long:
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_long_denorm # no
+
+ fmovm.x SRC(%a0),&0x80 # load value
+
+fout_long_norm:
+ fmov.l %d0,%fpcr # insert rnd prec:mode
+
+ fmov.l %fp0,%d0 # exec move out w/ correct rnd mode
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch FPSR
+ or.w %d1,2+USER_FPSR(%a6) # save new exc,accrued bits
+
+fout_long_write:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_long_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_long # write long
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ rts
+
+fout_long_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_l
+ rts
+
+fout_long_denorm:
+ mov.l SRC_EX(%a0),%d1
+ andi.l &0x80000000,%d1 # keep DENORM sign
+ ori.l &0x00800000,%d1 # make smallest sgl
+ fmov.s %d1,%fp0
+ bra.b fout_long_norm
+
+#################################################################
+# fmove.x out ###################################################
+#################################################################
+
+# Only "Unimplemented Data Type" exceptions enter here. The operand
+# is either a DENORM or a NORM.
+# The DENORM causes an Underflow exception.
+fout_ext:
+
+# we copy the extended precision result to FP_SCR0 so that the reserved
+# 16-bit field gets zeroed. we do this since we promise not to disturb
+# what's at SRC(a0).
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ clr.w 2+FP_SCR0_EX(%a6) # clear reserved field
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ fmovm.x SRC(%a0),&0x80 # return result
+
+ bsr.l _calc_ea_fout # fix stacked <ea>
+
+ mov.l %a0,%a1 # pass: dst addr
+ lea FP_SCR0(%a6),%a0 # pass: src addr
+ mov.l &0xc,%d0 # pass: opsize is 12 bytes
+
+# we must not yet write the extended precision data to the stack
+# in the pre-decrement case from supervisor mode or else we'll corrupt
+# the stack frame. so, leave it in FP_SRC for now and deal with it later...
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ beq.b fout_ext_a7
+
+ bsr.l _dmem_write # write ext prec number to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.w fout_ext_err # yes
+
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_ext_denorm # no
+ rts
+
+# the number is a DENORM. must set the underflow exception bit
+fout_ext_denorm:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set underflow exc bit
+
+ mov.b FPCR_ENABLE(%a6),%d0
+ andi.b &0x0a,%d0 # is UNFL or INEX enabled?
+ bne.b fout_ext_exc # yes
+ rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_ext_a7:
+ bsr.l _mem_write2 # write ext prec number to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.w fout_ext_err # yes
+
+ tst.b STAG(%a6) # is operand normalized?
+ bne.b fout_ext_denorm # no
+ rts
+
+fout_ext_exc:
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize the mantissa
+ neg.w %d0 # new exp = -(shft amt)
+ andi.w &0x7fff,%d0
+ andi.w &0x8000,FP_SCR0_EX(%a6) # keep only old sign
+ or.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+fout_ext_err:
+ mov.l EXC_A6(%a6),(%a6) # fix stacked a6
+ bra.l facc_out_x
+
+#########################################################################
+# fmove.s out ###########################################################
+#########################################################################
+fout_sgl:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+ mov.w SRC_EX(%a0),%d0 # extract exponent
+ andi.w &0x7fff,%d0 # strip sign
+
+ cmpi.w %d0,&SGL_HI # will operand overflow?
+ bgt.w fout_sgl_ovfl # yes; go handle OVFL
+ beq.w fout_sgl_may_ovfl # maybe; go handle possible OVFL
+ cmpi.w %d0,&SGL_LO # will operand underflow?
+ blt.w fout_sgl_unfl # yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.s"
+# Unnormalized inputs can come through this point.
+#
+fout_sgl_exg:
+ fmovm.x SRC(%a0),&0x80 # fetch fop from stack
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmov.s %fp0,%d0 # store does convert and round
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.w %d1,2+USER_FPSR(%a6) # set possible inex2/ainex
+
+fout_sgl_exg_write:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_sgl_exg_write_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_long # write long
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ rts
+
+fout_sgl_exg_write_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_l
+ rts
+
+#
+# here, we know that the operand would UNFL if moved out to single prec,
+# so, denorm and round and then use generic store single routine to
+# write the value to memory.
+#
+fout_sgl_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.l %a0,-(%sp)
+
+ clr.l %d0 # pass: S.F. = 0
+
+ cmpi.b STAG(%a6),&DENORM # fetch src optype tag
+ bne.b fout_sgl_unfl_cont # let DENORMs fall through
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize the DENORM
+
+fout_sgl_unfl_cont:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calc default underflow result
+
+ lea FP_SCR0(%a6),%a0 # pass: ptr to fop
+ bsr.l dst_sgl # convert to single prec
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_sgl_unfl_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_long # write long
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.b fout_sgl_unfl_chkexc
+
+fout_sgl_unfl_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_l
+
+fout_sgl_unfl_chkexc:
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0a,%d1 # is UNFL or INEX enabled?
+ bne.w fout_sd_exc_unfl # yes
+ addq.l &0x4,%sp
+ rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_sgl_ovfl:
+ tst.b 3+SRC_HI(%a0) # is result inexact?
+ bne.b fout_sgl_ovfl_inex2
+ tst.l SRC_LO(%a0) # is result inexact?
+ bne.b fout_sgl_ovfl_inex2
+ ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+ bra.b fout_sgl_ovfl_cont
+fout_sgl_ovfl_inex2:
+ ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_sgl_ovfl_cont:
+ mov.l %a0,-(%sp)
+
+# call ovf_res() w/ sgl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+ tst.b SRC_EX(%a0) # is operand negative?
+ smi %d1 # set if so
+ mov.l L_SCR3(%a6),%d0 # pass: sgl prec,rnd mode
+ bsr.l ovf_res # calc OVFL result
+ fmovm.x (%a0),&0x80 # load default overflow result
+ fmov.s %fp0,%d0 # store to single
+
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract dst mode
+ andi.b &0x38,%d1 # is mode == 0? (Dreg dst)
+ beq.b fout_sgl_ovfl_dn # must save to integer regfile
+
+ mov.l EXC_EA(%a6),%a0 # stacked <ea> is correct
+ bsr.l _dmem_write_long # write long
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_l # yes
+
+ bra.b fout_sgl_ovfl_chkexc
+
+fout_sgl_ovfl_dn:
+ mov.b 1+EXC_OPWORD(%a6),%d1 # extract Dn
+ andi.w &0x7,%d1
+ bsr.l store_dreg_l
+
+fout_sgl_ovfl_chkexc:
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0a,%d1 # is UNFL or INEX enabled?
+ bne.w fout_sd_exc_ovfl # yes
+ addq.l &0x4,%sp
+ rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+# for the correct result.
+# if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_sgl_may_ovfl:
+ mov.w SRC_EX(%a0),%d1 # fetch current sign
+ andi.w &0x8000,%d1 # keep it,clear exp
+ ori.w &0x3fff,%d1 # insert exp = 0
+ mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fabs.x %fp0 # need absolute value
+ fcmp.b %fp0,&0x2 # did exponent increase?
+ fblt.w fout_sgl_exg # no; go finish NORM
+ bra.w fout_sgl_ovfl # yes; go handle overflow
+
+################
+
+fout_sd_exc_unfl:
+ mov.l (%sp)+,%a0
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ cmpi.b STAG(%a6),&DENORM # was src a DENORM?
+ bne.b fout_sd_exc_cont # no
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm
+ neg.l %d0
+ andi.w &0x7fff,%d0
+ bfins %d0,FP_SCR0_EX(%a6){&1:&15}
+ bra.b fout_sd_exc_cont
+
+fout_sd_exc:
+fout_sd_exc_ovfl:
+ mov.l (%sp)+,%a0 # restore a0
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+fout_sd_exc_cont:
+ bclr &0x7,FP_SCR0_EX(%a6) # clear sign bit
+ sne.b 2+FP_SCR0_EX(%a6) # set internal sign bit
+ lea FP_SCR0(%a6),%a0 # pass: ptr to DENORM
+
+ mov.b 3+L_SCR3(%a6),%d1
+ lsr.b &0x4,%d1
+ andi.w &0x0c,%d1
+ swap %d1
+ mov.b 3+L_SCR3(%a6),%d1
+ lsr.b &0x4,%d1
+ andi.w &0x03,%d1
+ clr.l %d0 # pass: zero g,r,s
+ bsr.l _round # round the DENORM
+
+ tst.b 2+FP_SCR0_EX(%a6) # is EXOP negative?
+ beq.b fout_sd_exc_done # no
+ bset &0x7,FP_SCR0_EX(%a6) # yes
+
+fout_sd_exc_done:
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#################################################################
+# fmove.d out ###################################################
+#################################################################
+fout_dbl:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+ mov.l %d0,L_SCR3(%a6) # save rnd prec,mode on stack
+
+#
+# operand is a normalized number. first, we check to see if the move out
+# would cause either an underflow or overflow. these cases are handled
+# separately. otherwise, set the FPCR to the proper rounding mode and
+# execute the move.
+#
+ mov.w SRC_EX(%a0),%d0 # extract exponent
+ andi.w &0x7fff,%d0 # strip sign
+
+ cmpi.w %d0,&DBL_HI # will operand overflow?
+ bgt.w fout_dbl_ovfl # yes; go handle OVFL
+ beq.w fout_dbl_may_ovfl # maybe; go handle possible OVFL
+ cmpi.w %d0,&DBL_LO # will operand underflow?
+ blt.w fout_dbl_unfl # yes; go handle underflow
+
+#
+# NORMs(in range) can be stored out by a simple "fmov.d"
+# Unnormalized inputs can come through this point.
+#
+fout_dbl_exg:
+ fmovm.x SRC(%a0),&0x80 # fetch fop from stack
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmov.d %fp0,L_SCR1(%a6) # store does convert and round
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d0 # save FPSR
+
+ or.w %d0,2+USER_FPSR(%a6) # set possible inex2/ainex
+
+ mov.l EXC_EA(%a6),%a1 # pass: dst addr
+ lea L_SCR1(%a6),%a0 # pass: src addr
+ movq.l &0x8,%d0 # pass: opsize is 8 bytes
+ bsr.l _dmem_write # store dbl fop to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_d # yes
+
+ rts # no; so we're finished
+
+#
+# here, we know that the operand would UNFL if moved out to double prec,
+# so, denorm and round and then use generic store double routine to
+# write the value to memory.
+#
+fout_dbl_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set UNFL
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.l %a0,-(%sp)
+
+ clr.l %d0 # pass: S.F. = 0
+
+ cmpi.b STAG(%a6),&DENORM # fetch src optype tag
+ bne.b fout_dbl_unfl_cont # let DENORMs fall through
+
+ lea FP_SCR0(%a6),%a0
+ bsr.l norm # normalize the DENORM
+
+fout_dbl_unfl_cont:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calc default underflow result
+
+ lea FP_SCR0(%a6),%a0 # pass: ptr to fop
+ bsr.l dst_dbl # convert to single prec
+ mov.l %d0,L_SCR1(%a6)
+ mov.l %d1,L_SCR2(%a6)
+
+ mov.l EXC_EA(%a6),%a1 # pass: dst addr
+ lea L_SCR1(%a6),%a0 # pass: src addr
+ movq.l &0x8,%d0 # pass: opsize is 8 bytes
+ bsr.l _dmem_write # store dbl fop to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_d # yes
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0a,%d1 # is UNFL or INEX enabled?
+ bne.w fout_sd_exc_unfl # yes
+ addq.l &0x4,%sp
+ rts
+
+#
+# it's definitely an overflow so call ovf_res to get the correct answer
+#
+fout_dbl_ovfl:
+ mov.w 2+SRC_LO(%a0),%d0
+ andi.w &0x7ff,%d0
+ bne.b fout_dbl_ovfl_inex2
+
+ ori.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+ bra.b fout_dbl_ovfl_cont
+fout_dbl_ovfl_inex2:
+ ori.w &ovfinx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex/inex2
+
+fout_dbl_ovfl_cont:
+ mov.l %a0,-(%sp)
+
+# call ovf_res() w/ dbl prec and the correct rnd mode to create the default
+# overflow result. DON'T save the returned ccodes from ovf_res() since
+# fmove out doesn't alter them.
+ tst.b SRC_EX(%a0) # is operand negative?
+ smi %d1 # set if so
+ mov.l L_SCR3(%a6),%d0 # pass: dbl prec,rnd mode
+ bsr.l ovf_res # calc OVFL result
+ fmovm.x (%a0),&0x80 # load default overflow result
+ fmov.d %fp0,L_SCR1(%a6) # store to double
+
+ mov.l EXC_EA(%a6),%a1 # pass: dst addr
+ lea L_SCR1(%a6),%a0 # pass: src addr
+ movq.l &0x8,%d0 # pass: opsize is 8 bytes
+ bsr.l _dmem_write # store dbl fop to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.l facc_out_d # yes
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0a,%d1 # is UNFL or INEX enabled?
+ bne.w fout_sd_exc_ovfl # yes
+ addq.l &0x4,%sp
+ rts
+
+#
+# move out MAY overflow:
+# (1) force the exp to 0x3fff
+# (2) do a move w/ appropriate rnd mode
+# (3) if exp still equals zero, then insert original exponent
+# for the correct result.
+# if exp now equals one, then it overflowed so call ovf_res.
+#
+fout_dbl_may_ovfl:
+ mov.w SRC_EX(%a0),%d1 # fetch current sign
+ andi.w &0x8000,%d1 # keep it,clear exp
+ ori.w &0x3fff,%d1 # insert exp = 0
+ mov.w %d1,FP_SCR0_EX(%a6) # insert scaled exp
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6) # copy hi(man)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6) # copy lo(man)
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # force fop to be rounded
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fabs.x %fp0 # need absolute value
+ fcmp.b %fp0,&0x2 # did exponent increase?
+ fblt.w fout_dbl_exg # no; go finish NORM
+ bra.w fout_dbl_ovfl # yes; go handle overflow
+
+#########################################################################
+# XDEF **************************************************************** #
+# dst_dbl(): create double precision value from extended prec. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to source operand in extended precision #
+# #
+# OUTPUT ************************************************************** #
+# d0 = hi(double precision result) #
+# d1 = lo(double precision result) #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# Changes extended precision to double precision. #
+# Note: no attempt is made to round the extended value to double. #
+# dbl_sign = ext_sign #
+# dbl_exp = ext_exp - $3fff(ext bias) + $7ff(dbl bias) #
+# get rid of ext integer bit #
+# dbl_mant = ext_mant{62:12} #
+# #
+# --------------- --------------- --------------- #
+# extended -> |s| exp | |1| ms mant | | ls mant | #
+# --------------- --------------- --------------- #
+# 95 64 63 62 32 31 11 0 #
+# | | #
+# | | #
+# | | #
+# v v #
+# --------------- --------------- #
+# double -> |s|exp| mant | | mant | #
+# --------------- --------------- #
+# 63 51 32 31 0 #
+# #
+#########################################################################
+
+dst_dbl:
+ clr.l %d0 # clear d0
+ mov.w FTEMP_EX(%a0),%d0 # get exponent
+ subi.w &EXT_BIAS,%d0 # subtract extended precision bias
+ addi.w &DBL_BIAS,%d0 # add double precision bias
+ tst.b FTEMP_HI(%a0) # is number a denorm?
+ bmi.b dst_get_dupper # no
+ subq.w &0x1,%d0 # yes; denorm bias = DBL_BIAS - 1
+dst_get_dupper:
+ swap %d0 # d0 now in upper word
+ lsl.l &0x4,%d0 # d0 in proper place for dbl prec exp
+ tst.b FTEMP_EX(%a0) # test sign
+ bpl.b dst_get_dman # if postive, go process mantissa
+ bset &0x1f,%d0 # if negative, set sign
+dst_get_dman:
+ mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
+ bfextu %d1{&1:&20},%d1 # get upper 20 bits of ms
+ or.l %d1,%d0 # put these bits in ms word of double
+ mov.l %d0,L_SCR1(%a6) # put the new exp back on the stack
+ mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
+ mov.l &21,%d0 # load shift count
+ lsl.l %d0,%d1 # put lower 11 bits in upper bits
+ mov.l %d1,L_SCR2(%a6) # build lower lword in memory
+ mov.l FTEMP_LO(%a0),%d1 # get ls mantissa
+ bfextu %d1{&0:&21},%d0 # get ls 21 bits of double
+ mov.l L_SCR2(%a6),%d1
+ or.l %d0,%d1 # put them in double result
+ mov.l L_SCR1(%a6),%d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# dst_sgl(): create single precision value from extended prec #
+# #
+# XREF **************************************************************** #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to source operand in extended precision #
+# #
+# OUTPUT ************************************************************** #
+# d0 = single precision result #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# Changes extended precision to single precision. #
+# sgl_sign = ext_sign #
+# sgl_exp = ext_exp - $3fff(ext bias) + $7f(sgl bias) #
+# get rid of ext integer bit #
+# sgl_mant = ext_mant{62:12} #
+# #
+# --------------- --------------- --------------- #
+# extended -> |s| exp | |1| ms mant | | ls mant | #
+# --------------- --------------- --------------- #
+# 95 64 63 62 40 32 31 12 0 #
+# | | #
+# | | #
+# | | #
+# v v #
+# --------------- #
+# single -> |s|exp| mant | #
+# --------------- #
+# 31 22 0 #
+# #
+#########################################################################
+
+dst_sgl:
+ clr.l %d0
+ mov.w FTEMP_EX(%a0),%d0 # get exponent
+ subi.w &EXT_BIAS,%d0 # subtract extended precision bias
+ addi.w &SGL_BIAS,%d0 # add single precision bias
+ tst.b FTEMP_HI(%a0) # is number a denorm?
+ bmi.b dst_get_supper # no
+ subq.w &0x1,%d0 # yes; denorm bias = SGL_BIAS - 1
+dst_get_supper:
+ swap %d0 # put exp in upper word of d0
+ lsl.l &0x7,%d0 # shift it into single exp bits
+ tst.b FTEMP_EX(%a0) # test sign
+ bpl.b dst_get_sman # if positive, continue
+ bset &0x1f,%d0 # if negative, put in sign first
+dst_get_sman:
+ mov.l FTEMP_HI(%a0),%d1 # get ms mantissa
+ andi.l &0x7fffff00,%d1 # get upper 23 bits of ms
+ lsr.l &0x8,%d1 # and put them flush right
+ or.l %d1,%d0 # put these bits in ms word of single
+ rts
+
+##############################################################################
+fout_pack:
+ bsr.l _calc_ea_fout # fetch the <ea>
+ mov.l %a0,-(%sp)
+
+ mov.b STAG(%a6),%d0 # fetch input type
+ bne.w fout_pack_not_norm # input is not NORM
+
+fout_pack_norm:
+ btst &0x4,EXC_CMDREG(%a6) # static or dynamic?
+ beq.b fout_pack_s # static
+
+fout_pack_d:
+ mov.b 1+EXC_CMDREG(%a6),%d1 # fetch dynamic reg
+ lsr.b &0x4,%d1
+ andi.w &0x7,%d1
+
+ bsr.l fetch_dreg # fetch Dn w/ k-factor
+
+ bra.b fout_pack_type
+fout_pack_s:
+ mov.b 1+EXC_CMDREG(%a6),%d0 # fetch static field
+
+fout_pack_type:
+ bfexts %d0{&25:&7},%d0 # extract k-factor
+ mov.l %d0,-(%sp)
+
+ lea FP_SRC(%a6),%a0 # pass: ptr to input
+
+# bindec is currently scrambling FP_SRC for denorm inputs.
+# we'll have to change this, but for now, tough luck!!!
+ bsr.l bindec # convert xprec to packed
+
+# andi.l &0xcfff000f,FP_SCR0(%a6) # clear unused fields
+ andi.l &0xcffff00f,FP_SCR0(%a6) # clear unused fields
+
+ mov.l (%sp)+,%d0
+
+ tst.b 3+FP_SCR0_EX(%a6)
+ bne.b fout_pack_set
+ tst.l FP_SCR0_HI(%a6)
+ bne.b fout_pack_set
+ tst.l FP_SCR0_LO(%a6)
+ bne.b fout_pack_set
+
+# add the extra condition that only if the k-factor was zero, too, should
+# we zero the exponent
+ tst.l %d0
+ bne.b fout_pack_set
+# "mantissa" is all zero which means that the answer is zero. but, the '040
+# algorithm allows the exponent to be non-zero. the 881/2 do not. therefore,
+# if the mantissa is zero, I will zero the exponent, too.
+# the question now is whether the exponents sign bit is allowed to be non-zero
+# for a zero, also...
+ andi.w &0xf000,FP_SCR0(%a6)
+
+fout_pack_set:
+
+ lea FP_SCR0(%a6),%a0 # pass: src addr
+
+fout_pack_write:
+ mov.l (%sp)+,%a1 # pass: dst addr
+ mov.l &0xc,%d0 # pass: opsize is 12 bytes
+
+ cmpi.b SPCOND_FLG(%a6),&mda7_flg
+ beq.b fout_pack_a7
+
+ bsr.l _dmem_write # write ext prec number to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.w fout_ext_err # yes
+
+ rts
+
+# we don't want to do the write if the exception occurred in supervisor mode
+# so _mem_write2() handles this for us.
+fout_pack_a7:
+ bsr.l _mem_write2 # write ext prec number to memory
+
+ tst.l %d1 # did dstore fail?
+ bne.w fout_ext_err # yes
+
+ rts
+
+fout_pack_not_norm:
+ cmpi.b %d0,&DENORM # is it a DENORM?
+ beq.w fout_pack_norm # yes
+ lea FP_SRC(%a6),%a0
+ clr.w 2+FP_SRC_EX(%a6)
+ cmpi.b %d0,&SNAN # is it an SNAN?
+ beq.b fout_pack_snan # yes
+ bra.b fout_pack_write # no
+
+fout_pack_snan:
+ ori.w &snaniop2_mask,FPSR_EXCEPT(%a6) # set SNAN/AIOP
+ bset &0x6,FP_SRC_HI(%a6) # set snan bit
+ bra.b fout_pack_write
+
+#########################################################################
+# XDEF **************************************************************** #
+# fmul(): emulates the fmul instruction #
+# fsmul(): emulates the fsmul instruction #
+# fdmul(): emulates the fdmul instruction #
+# #
+# XREF **************************************************************** #
+# scale_to_zero_src() - scale src exponent to zero #
+# scale_to_zero_dst() - scale dst exponent to zero #
+# unf_res() - return default underflow result #
+# ovf_res() - return default overflow result #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a multiply #
+# instruction won't cause an exception. Use the regular fmul to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ align 0x10
+tbl_fmul_ovfl:
+ long 0x3fff - 0x7ffe # ext_max
+ long 0x3fff - 0x407e # sgl_max
+ long 0x3fff - 0x43fe # dbl_max
+tbl_fmul_unfl:
+ long 0x3fff + 0x0001 # ext_unfl
+ long 0x3fff - 0x3f80 # sgl_unfl
+ long 0x3fff - 0x3c00 # dbl_unfl
+
+ global fsmul
+fsmul:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ bra.b fmul
+
+ global fdmul
+fdmul:
+ andi.b &0x30,%d0
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fmul
+fmul:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+ bne.w fmul_not_norm # optimize on non-norm input
+
+fmul_norm:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_to_zero_src # scale src exponent
+ mov.l %d0,-(%sp) # save scale factor 1
+
+ bsr.l scale_to_zero_dst # scale dst exponent
+
+ add.l %d0,(%sp) # SCALE_FACTOR = scale1 + scale2
+
+ mov.w 2+L_SCR3(%a6),%d1 # fetch precision
+ lsr.b &0x6,%d1 # shift to lo bits
+ mov.l (%sp)+,%d0 # load S.F.
+ cmp.l %d0,(tbl_fmul_ovfl.w,%pc,%d1.w*4) # would result ovfl?
+ beq.w fmul_may_ovfl # result may rnd to overflow
+ blt.w fmul_ovfl # result will overflow
+
+ cmp.l %d0,(tbl_fmul_unfl.w,%pc,%d1.w*4) # would result unfl?
+ beq.w fmul_may_unfl # result may rnd to no unfl
+ bgt.w fmul_unfl # result will underflow
+
+#
+# NORMAL:
+# - the result of the multiply operation will neither overflow nor underflow.
+# - do the multiply to the proper precision and rounding mode.
+# - scale the result exponent using the scale factor. if both operands were
+# normalized then we really don't need to go through this scaling. but for now,
+# this will do.
+#
+fmul_normal:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fmul_normal_exit:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# OVERFLOW:
+# - the result of the multiply operation is an overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+# save setting this until now because this is where fmul_may_ovfl may jump in
+fmul_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fmul_ovfl_ena # yes
+
+# calculate the default result
+fmul_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass rnd prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled; Create EXOP:
+# - if precision is extended, then we have the EXOP. simply bias the exponent
+# with an extra -0x6000. if the precision is single or double, we need to
+# calculate a result rounded to extended precision.
+#
+fmul_ovfl_ena:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # test the rnd prec
+ bne.b fmul_ovfl_ena_sd # it's sgl or dbl
+
+fmul_ovfl_ena_cont:
+ fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1 # clear sign bit
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fmul_ovfl_dis
+
+fmul_ovfl_ena_sd:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # keep rnd mode only
+ fmov.l %d1,%fpcr # set FPCR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ bra.b fmul_ovfl_ena_cont
+
+#
+# may OVERFLOW:
+# - the result of the multiply operation MAY overflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+#
+fmul_may_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fmul_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fmul_normal_exit
+
+#
+# UNDERFLOW:
+# - the result of the multiply operation is an underflow.
+# - do the multiply to the proper precision and rounding mode in order to
+# set the inexact bits.
+# - calculate the default result and return it in fp0.
+# - if overflow or inexact is enabled, we need a multiply result rounded to
+# extended precision. if the original operation was extended, then we have this
+# result. if the original operation was single or double, we have to do another
+# multiply using extended precision and the correct rounding mode. the result
+# of this operation then has its exponent scaled by -0x6000 to create the
+# exceptional operand.
+#
+fmul_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+# for fun, let's use only extended precision, round to zero. then, let
+# the unf_res() routine figure out all the rest.
+# will we get the correct answer.
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fmul_unfl_ena # yes
+
+fmul_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # unf_res2 may have set 'Z'
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# UNFL is enabled.
+#
+fmul_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fmul_unfl_ena_sd # no, sgl or dbl
+
+# if the rnd mode is anything but RZ, then we have to re-do the above
+# multiplication becuase we used RZ for all.
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+fmul_unfl_ena_cont:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp1 # execute multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # add bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fmul_unfl_dis
+
+fmul_unfl_ena_sd:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # use only rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ bra.b fmul_unfl_ena_cont
+
+# MAY UNDERFLOW:
+# -use the correct rounding mode and precision. this code favors operations
+# that do not underflow.
+fmul_may_unfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp0 # execute multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| > 2.b?
+ fbgt.w fmul_normal_exit # no; no underflow occurred
+ fblt.w fmul_unfl # yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst operand
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert RZ
+
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fmul.x FP_SCR0(%a6),%fp1 # execute multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fabs.x %fp1 # make absolute value
+ fcmp.b %fp1,&0x2 # is |result| < 2.b?
+ fbge.w fmul_normal_exit # no; no underflow occurred
+ bra.w fmul_unfl # yes, underflow occurred
+
+################################################################################
+
+#
+# Multiply: inputs are not both normalized; what are they?
+#
+fmul_not_norm:
+ mov.w (tbl_fmul_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fmul_op.b,%pc,%d1.w)
+
+ swbeg &48
+tbl_fmul_op:
+ short fmul_norm - tbl_fmul_op # NORM x NORM
+ short fmul_zero - tbl_fmul_op # NORM x ZERO
+ short fmul_inf_src - tbl_fmul_op # NORM x INF
+ short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
+ short fmul_norm - tbl_fmul_op # NORM x DENORM
+ short fmul_res_snan - tbl_fmul_op # NORM x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_zero - tbl_fmul_op # ZERO x NORM
+ short fmul_zero - tbl_fmul_op # ZERO x ZERO
+ short fmul_res_operr - tbl_fmul_op # ZERO x INF
+ short fmul_res_qnan - tbl_fmul_op # ZERO x QNAN
+ short fmul_zero - tbl_fmul_op # ZERO x DENORM
+ short fmul_res_snan - tbl_fmul_op # ZERO x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_inf_dst - tbl_fmul_op # INF x NORM
+ short fmul_res_operr - tbl_fmul_op # INF x ZERO
+ short fmul_inf_dst - tbl_fmul_op # INF x INF
+ short fmul_res_qnan - tbl_fmul_op # INF x QNAN
+ short fmul_inf_dst - tbl_fmul_op # INF x DENORM
+ short fmul_res_snan - tbl_fmul_op # INF x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_res_qnan - tbl_fmul_op # QNAN x NORM
+ short fmul_res_qnan - tbl_fmul_op # QNAN x ZERO
+ short fmul_res_qnan - tbl_fmul_op # QNAN x INF
+ short fmul_res_qnan - tbl_fmul_op # QNAN x QNAN
+ short fmul_res_qnan - tbl_fmul_op # QNAN x DENORM
+ short fmul_res_snan - tbl_fmul_op # QNAN x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_norm - tbl_fmul_op # NORM x NORM
+ short fmul_zero - tbl_fmul_op # NORM x ZERO
+ short fmul_inf_src - tbl_fmul_op # NORM x INF
+ short fmul_res_qnan - tbl_fmul_op # NORM x QNAN
+ short fmul_norm - tbl_fmul_op # NORM x DENORM
+ short fmul_res_snan - tbl_fmul_op # NORM x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+ short fmul_res_snan - tbl_fmul_op # SNAN x NORM
+ short fmul_res_snan - tbl_fmul_op # SNAN x ZERO
+ short fmul_res_snan - tbl_fmul_op # SNAN x INF
+ short fmul_res_snan - tbl_fmul_op # SNAN x QNAN
+ short fmul_res_snan - tbl_fmul_op # SNAN x DENORM
+ short fmul_res_snan - tbl_fmul_op # SNAN x SNAN
+ short tbl_fmul_op - tbl_fmul_op #
+ short tbl_fmul_op - tbl_fmul_op #
+
+fmul_res_operr:
+ bra.l res_operr
+fmul_res_snan:
+ bra.l res_snan
+fmul_res_qnan:
+ bra.l res_qnan
+
+#
+# Multiply: (Zero x Zero) || (Zero x norm) || (Zero x denorm)
+#
+ global fmul_zero # global for fsglmul
+fmul_zero:
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bpl.b fmul_zero_p # result ZERO is pos.
+fmul_zero_n:
+ fmov.s &0x80000000,%fp0 # load -ZERO
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
+ rts
+fmul_zero_p:
+ fmov.s &0x00000000,%fp0 # load +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+#
+# Multiply: (inf x inf) || (inf x norm) || (inf x denorm)
+#
+# Note: The j-bit for an infinity is a don't-care. However, to be
+# strictly compatible w/ the 68881/882, we make sure to return an
+# INF w/ the j-bit set if the input INF j-bit was set. Destination
+# INFs take priority.
+#
+ global fmul_inf_dst # global for fsglmul
+fmul_inf_dst:
+ fmovm.x DST(%a1),&0x80 # return INF result in fp0
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bpl.b fmul_inf_dst_p # result INF is pos.
+fmul_inf_dst_n:
+ fabs.x %fp0 # clear result sign
+ fneg.x %fp0 # set result sign
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+ rts
+fmul_inf_dst_p:
+ fabs.x %fp0 # clear result sign
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+ global fmul_inf_src # global for fsglmul
+fmul_inf_src:
+ fmovm.x SRC(%a0),&0x80 # return INF result in fp0
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bpl.b fmul_inf_dst_p # result INF is pos.
+ bra.b fmul_inf_dst_n
+
+#########################################################################
+# XDEF **************************************************************** #
+# fin(): emulates the fmove instruction #
+# fsin(): emulates the fsmove instruction #
+# fdin(): emulates the fdmove instruction #
+# #
+# XREF **************************************************************** #
+# norm() - normalize mantissa for EXOP on denorm #
+# scale_to_zero_src() - scale src exponent to zero #
+# ovf_res() - return default overflow result #
+# unf_res() - return default underflow result #
+# res_qnan_1op() - return QNAN result #
+# res_snan_1op() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = round prec/mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms into extended, single, and double precision. #
+# Norms can be emulated w/ a regular fmove instruction. For #
+# sgl/dbl, must scale exponent and perform an "fmove". Check to see #
+# if the result would have overflowed/underflowed. If so, use unf_res() #
+# or ovf_res() to return the default result. Also return EXOP if #
+# exception is enabled. If no exception, return the default result. #
+# Unnorms don't pass through here. #
+# #
+#########################################################################
+
+ global fsin
+fsin:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl precision
+ bra.b fin
+
+ global fdin
+fdin:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl precision
+
+ global fin
+fin:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ mov.b STAG(%a6),%d1 # fetch src optype tag
+ bne.w fin_not_norm # optimize on non-norm input
+
+#
+# FP MOVE IN: NORMs and DENORMs ONLY!
+#
+fin_norm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.w fin_not_ext # no, so go handle dbl or sgl
+
+#
+# precision selected is extended. so...we cannot get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+ tst.b SRC_EX(%a0) # is the operand negative?
+ bpl.b fin_norm_done # no
+ bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
+fin_norm_done:
+ fmovm.x SRC(%a0),&0x80 # return result in fp0
+ rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fin_denorm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.w fin_not_ext # no, so go handle dbl or sgl
+
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+ tst.b SRC_EX(%a0) # is the operand negative?
+ bpl.b fin_denorm_done # no
+ bset &neg_bit,FPSR_CC(%a6) # yes, so set 'N' ccode bit
+fin_denorm_done:
+ fmovm.x SRC(%a0),&0x80 # return result in fp0
+ btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+ bne.b fin_denorm_unfl_ena # yes
+ rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fin_denorm_unfl_ena:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ bsr.l norm # normalize result
+ neg.w %d0 # new exponent = -(shft val)
+ addi.w &0x6000,%d0 # add new bias to exponent
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
+ andi.w &0x8000,%d1 # keep old sign
+ andi.w &0x7fff,%d0 # clear sign position
+ or.w %d1,%d0 # concat new exo,old sign
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#
+# operand is to be rounded to single or double precision
+#
+fin_not_ext:
+ cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
+ bne.b fin_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fin_sgl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
+ bge.w fin_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
+ beq.w fin_sd_may_ovfl # maybe; go check
+ blt.w fin_sd_ovfl # yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved into the fp reg file
+#
+fin_sd_normal:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # perform move
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fin_sd_normal_exit:
+ mov.l %d2,-(%sp) # save d2
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d1,%d2 # concat old sign,new exponent
+ mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# operand is to be rounded to double precision
+#
+fin_dbl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
+ bge.w fin_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
+ beq.w fin_sd_may_ovfl # maybe; go check
+ blt.w fin_sd_ovfl # yes; go handle overflow
+ bra.w fin_sd_normal # no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fin_sd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ tst.b FP_SCR0_EX(%a6) # is operand negative?
+ bpl.b fin_sd_unfl_tst
+ bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
+
+# if underflow or inexact is enabled, then go calculate the EXOP first.
+fin_sd_unfl_tst:
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fin_sd_unfl_ena # yes
+
+fin_sd_unfl_dis:
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# operand will underflow AND underflow or inexact is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fin_sd_unfl_ena:
+ mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+ mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+ mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # subtract scale factor
+ andi.w &0x8000,%d2 # extract old sign
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1
+ or.w %d1,%d2 # concat old sign,new exp
+ mov.w %d2,FP_SCR1_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fin_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fin_sd_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # perform move
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fin_sd_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fin_sd_ovfl_ena # yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fin_sd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass: prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fin_sd_ovfl_ena:
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ sub.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fin_sd_ovfl_dis
+
+#
+# the move in MAY overflow. so...
+#
+fin_sd_may_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fmov.x FP_SCR0(%a6),%fp0 # perform the move
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fin_sd_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fin_sd_normal_exit
+
+##########################################################################
+
+#
+# operand is not a NORM: check its optype and branch accordingly
+#
+fin_not_norm:
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.w fin_denorm
+ cmpi.b %d1,&SNAN # weed out SNANs
+ beq.l res_snan_1op
+ cmpi.b %d1,&QNAN # weed out QNANs
+ beq.l res_qnan_1op
+
+#
+# do the fmove in; at this point, only possible ops are ZERO and INF.
+# use fmov to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+ fmov.x SRC(%a0),%fp0 # do fmove in
+ fmov.l %fpsr,%d0 # no exceptions possible
+ rol.l &0x8,%d0 # put ccodes in lo byte
+ mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fdiv(): emulates the fdiv instruction #
+# fsdiv(): emulates the fsdiv instruction #
+# fddiv(): emulates the fddiv instruction #
+# #
+# XREF **************************************************************** #
+# scale_to_zero_src() - scale src exponent to zero #
+# scale_to_zero_dst() - scale dst exponent to zero #
+# unf_res() - return default underflow result #
+# ovf_res() - return default overflow result #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a divide #
+# instruction won't cause an exception. Use the regular fdiv to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ align 0x10
+tbl_fdiv_unfl:
+ long 0x3fff - 0x0000 # ext_unfl
+ long 0x3fff - 0x3f81 # sgl_unfl
+ long 0x3fff - 0x3c01 # dbl_unfl
+
+tbl_fdiv_ovfl:
+ long 0x3fff - 0x7ffe # ext overflow exponent
+ long 0x3fff - 0x407e # sgl overflow exponent
+ long 0x3fff - 0x43fe # dbl overflow exponent
+
+ global fsdiv
+fsdiv:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ bra.b fdiv
+
+ global fddiv
+fddiv:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fdiv
+fdiv:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+
+ bne.w fdiv_not_norm # optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fdiv_norm:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_to_zero_src # scale src exponent
+ mov.l %d0,-(%sp) # save scale factor 1
+
+ bsr.l scale_to_zero_dst # scale dst exponent
+
+ neg.l (%sp) # SCALE FACTOR = scale1 - scale2
+ add.l %d0,(%sp)
+
+ mov.w 2+L_SCR3(%a6),%d1 # fetch precision
+ lsr.b &0x6,%d1 # shift to lo bits
+ mov.l (%sp)+,%d0 # load S.F.
+ cmp.l %d0,(tbl_fdiv_ovfl.b,%pc,%d1.w*4) # will result overflow?
+ ble.w fdiv_may_ovfl # result will overflow
+
+ cmp.l %d0,(tbl_fdiv_unfl.w,%pc,%d1.w*4) # will result underflow?
+ beq.w fdiv_may_unfl # maybe
+ bgt.w fdiv_unfl # yes; go handle underflow
+
+fdiv_normal:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # save FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # perform divide
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fdiv_normal_exit:
+ fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
+ mov.l %d2,-(%sp) # store d2
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+tbl_fdiv_ovfl2:
+ long 0x7fff
+ long 0x407f
+ long 0x43ff
+
+fdiv_no_ovfl:
+ mov.l (%sp)+,%d0 # restore scale factor
+ bra.b fdiv_normal_exit
+
+fdiv_may_ovfl:
+ mov.l %d0,-(%sp) # save scale factor
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # set FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l %fpsr,%d0
+ fmov.l &0x0,%fpcr
+
+ or.l %d0,USER_FPSR(%a6) # save INEX,N
+
+ fmovm.x &0x01,-(%sp) # save result to stack
+ mov.w (%sp),%d0 # fetch new exponent
+ add.l &0xc,%sp # clear result from stack
+ andi.l &0x7fff,%d0 # strip sign
+ sub.l (%sp),%d0 # add scale factor
+ cmp.l %d0,(tbl_fdiv_ovfl2.b,%pc,%d1.w*4)
+ blt.b fdiv_no_ovfl
+ mov.l (%sp)+,%d0
+
+fdiv_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fdiv_ovfl_ena # yes
+
+fdiv_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+fdiv_ovfl_ena:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fdiv_ovfl_ena_sd # no, do sgl or dbl
+
+fdiv_ovfl_ena_cont:
+ fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1 # clear sign bit
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fdiv_ovfl_dis
+
+fdiv_ovfl_ena_sd:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst operand
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # keep rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ bra.b fdiv_ovfl_ena_cont
+
+fdiv_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fdiv_unfl_ena # yes
+
+fdiv_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# UNFL is enabled.
+#
+fdiv_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fdiv_unfl_ena_sd # no, sgl or dbl
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+fdiv_unfl_ena_cont:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp1 # execute divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factoer
+ addi.l &0x6000,%d1 # add bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exp
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fdiv_unfl_dis
+
+fdiv_unfl_ena_sd:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # use only rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ bra.b fdiv_unfl_ena_cont
+
+#
+# the divide operation MAY underflow:
+#
+fdiv_may_unfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x1 # is |result| > 1.b?
+ fbgt.w fdiv_normal_exit # no; no underflow occurred
+ fblt.w fdiv_unfl # yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert RZ
+
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fdiv.x FP_SCR0(%a6),%fp1 # execute divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fabs.x %fp1 # make absolute value
+ fcmp.b %fp1,&0x1 # is |result| < 1.b?
+ fbge.w fdiv_normal_exit # no; no underflow occurred
+ bra.w fdiv_unfl # yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fdiv_not_norm:
+ mov.w (tbl_fdiv_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fdiv_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fdiv_op:
+ short fdiv_norm - tbl_fdiv_op # NORM / NORM
+ short fdiv_inf_load - tbl_fdiv_op # NORM / ZERO
+ short fdiv_zero_load - tbl_fdiv_op # NORM / INF
+ short fdiv_res_qnan - tbl_fdiv_op # NORM / QNAN
+ short fdiv_norm - tbl_fdiv_op # NORM / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # NORM / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_zero_load - tbl_fdiv_op # ZERO / NORM
+ short fdiv_res_operr - tbl_fdiv_op # ZERO / ZERO
+ short fdiv_zero_load - tbl_fdiv_op # ZERO / INF
+ short fdiv_res_qnan - tbl_fdiv_op # ZERO / QNAN
+ short fdiv_zero_load - tbl_fdiv_op # ZERO / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # ZERO / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_inf_dst - tbl_fdiv_op # INF / NORM
+ short fdiv_inf_dst - tbl_fdiv_op # INF / ZERO
+ short fdiv_res_operr - tbl_fdiv_op # INF / INF
+ short fdiv_res_qnan - tbl_fdiv_op # INF / QNAN
+ short fdiv_inf_dst - tbl_fdiv_op # INF / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # INF / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / NORM
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / ZERO
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / INF
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / QNAN
+ short fdiv_res_qnan - tbl_fdiv_op # QNAN / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # QNAN / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_norm - tbl_fdiv_op # DENORM / NORM
+ short fdiv_inf_load - tbl_fdiv_op # DENORM / ZERO
+ short fdiv_zero_load - tbl_fdiv_op # DENORM / INF
+ short fdiv_res_qnan - tbl_fdiv_op # DENORM / QNAN
+ short fdiv_norm - tbl_fdiv_op # DENORM / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # DENORM / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / NORM
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / ZERO
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / INF
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / QNAN
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / DENORM
+ short fdiv_res_snan - tbl_fdiv_op # SNAN / SNAN
+ short tbl_fdiv_op - tbl_fdiv_op #
+ short tbl_fdiv_op - tbl_fdiv_op #
+
+fdiv_res_qnan:
+ bra.l res_qnan
+fdiv_res_snan:
+ bra.l res_snan
+fdiv_res_operr:
+ bra.l res_operr
+
+ global fdiv_zero_load # global for fsgldiv
+fdiv_zero_load:
+ mov.b SRC_EX(%a0),%d0 # result sign is exclusive
+ mov.b DST_EX(%a1),%d1 # or of input signs.
+ eor.b %d0,%d1
+ bpl.b fdiv_zero_load_p # result is positive
+ fmov.s &0x80000000,%fp0 # load a -ZERO
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/N
+ rts
+fdiv_zero_load_p:
+ fmov.s &0x00000000,%fp0 # load a +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+#
+# The destination was In Range and the source was a ZERO. The result,
+# therefore, is an INF w/ the proper sign.
+# So, determine the sign and return a new INF (w/ the j-bit cleared).
+#
+ global fdiv_inf_load # global for fsgldiv
+fdiv_inf_load:
+ ori.w &dz_mask+adz_mask,2+USER_FPSR(%a6) # no; set DZ/ADZ
+ mov.b SRC_EX(%a0),%d0 # load both signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bpl.b fdiv_inf_load_p # result is positive
+ fmov.s &0xff800000,%fp0 # make result -INF
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/N
+ rts
+fdiv_inf_load_p:
+ fmov.s &0x7f800000,%fp0 # make result +INF
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+#
+# The destination was an INF w/ an In Range or ZERO source, the result is
+# an INF w/ the proper sign.
+# The 68881/882 returns the destination INF w/ the new sign(if the j-bit of the
+# dst INF is set, then then j-bit of the result INF is also set).
+#
+ global fdiv_inf_dst # global for fsgldiv
+fdiv_inf_dst:
+ mov.b DST_EX(%a1),%d0 # load both signs
+ mov.b SRC_EX(%a0),%d1
+ eor.b %d0,%d1
+ bpl.b fdiv_inf_dst_p # result is positive
+
+ fmovm.x DST(%a1),&0x80 # return result in fp0
+ fabs.x %fp0 # clear sign bit
+ fneg.x %fp0 # set sign bit
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+fdiv_inf_dst_p:
+ fmovm.x DST(%a1),&0x80 # return result in fp0
+ fabs.x %fp0 # return positive INF
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fneg(): emulates the fneg instruction #
+# fsneg(): emulates the fsneg instruction #
+# fdneg(): emulates the fdneg instruction #
+# #
+# XREF **************************************************************** #
+# norm() - normalize a denorm to provide EXOP #
+# scale_to_zero_src() - scale sgl/dbl source exponent #
+# ovf_res() - return default overflow result #
+# unf_res() - return default underflow result #
+# res_qnan_1op() - return QNAN result #
+# res_snan_1op() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, zeroes, and infinities as special cases. Separate #
+# norms/denorms into ext/sgl/dbl precisions. Extended precision can be #
+# emulated by simply setting sign bit. Sgl/dbl operands must be scaled #
+# and an actual fneg performed to see if overflow/underflow would have #
+# occurred. If so, return default underflow/overflow result. Else, #
+# scale the result exponent and return result. FPSR gets set based on #
+# the result value. #
+# #
+#########################################################################
+
+ global fsneg
+fsneg:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl precision
+ bra.b fneg
+
+ global fdneg
+fdneg:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fneg
+fneg:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+ mov.b STAG(%a6),%d1
+ bne.w fneg_not_norm # optimize on non-norm input
+
+#
+# NEGATE SIGN : norms and denorms ONLY!
+#
+fneg_norm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.w fneg_not_ext # no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.w SRC_EX(%a0),%d0
+ eori.w &0x8000,%d0 # negate sign
+ bpl.b fneg_norm_load # sign is positive
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+fneg_norm_load:
+ mov.w %d0,FP_SCR0_EX(%a6)
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fneg_denorm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fneg_not_ext # no; go handle sgl or dbl
+
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.w SRC_EX(%a0),%d0
+ eori.w &0x8000,%d0 # negate sign
+ bpl.b fneg_denorm_done # no
+ mov.b &neg_bmask,FPSR_CC(%a6) # yes, set 'N' ccode bit
+fneg_denorm_done:
+ mov.w %d0,FP_SCR0_EX(%a6)
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+
+ btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+ bne.b fneg_ext_unfl_ena # yes
+ rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fneg_ext_unfl_ena:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ bsr.l norm # normalize result
+ neg.w %d0 # new exponent = -(shft val)
+ addi.w &0x6000,%d0 # add new bias to exponent
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
+ andi.w &0x8000,%d1 # keep old sign
+ andi.w &0x7fff,%d0 # clear sign position
+ or.w %d1,%d0 # concat old sign, new exponent
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#
+# operand is either single or double
+#
+fneg_not_ext:
+ cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
+ bne.b fneg_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fneg_sgl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
+ bge.w fneg_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
+ beq.w fneg_sd_may_ovfl # maybe; go check
+ blt.w fneg_sd_ovfl # yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fneg_sd_normal:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fneg.x FP_SCR0(%a6),%fp0 # perform negation
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fneg_sd_normal_exit:
+ mov.l %d2,-(%sp) # save d2
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
+ mov.w %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d1,%d2 # concat old sign,new exp
+ mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# operand is to be rounded to double precision
+#
+fneg_dbl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
+ bge.b fneg_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
+ beq.w fneg_sd_may_ovfl # maybe; go check
+ blt.w fneg_sd_ovfl # yes; go handle overflow
+ bra.w fneg_sd_normal # no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fneg_sd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ eori.b &0x80,FP_SCR0_EX(%a6) # negate sign
+ bpl.b fneg_sd_unfl_tst
+ bset &neg_bit,FPSR_CC(%a6) # set 'N' ccode bit
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+fneg_sd_unfl_tst:
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fneg_sd_unfl_ena # yes
+
+fneg_sd_unfl_dis:
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # unf_res may have set 'Z'
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fneg_sd_unfl_ena:
+ mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+ mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+ mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # subtract scale factor
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat new sign,new exp
+ mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
+ fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fneg_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fneg_sd_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fneg.x FP_SCR0(%a6),%fp0 # perform negation
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fneg_sd_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fneg_sd_ovfl_ena # yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fneg_sd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass: prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fneg_sd_ovfl_ena:
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat sign,exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fneg_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fneg_sd_may_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fneg.x FP_SCR0(%a6),%fp0 # perform negation
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fneg_sd_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fneg_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fneg_not_norm:
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.w fneg_denorm
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ cmpi.b %d1,&QNAN # weed out QNAN
+ beq.l res_qnan_1op
+
+#
+# do the fneg; at this point, only possible ops are ZERO and INF.
+# use fneg to determine ccodes.
+# prec:mode should be zero at this point but it won't affect answer anyways.
+#
+ fneg.x SRC_EX(%a0),%fp0 # do fneg
+ fmov.l %fpsr,%d0
+ rol.l &0x8,%d0 # put ccodes in lo byte
+ mov.b %d0,FPSR_CC(%a6) # insert correct ccodes
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# ftst(): emulates the ftest instruction #
+# #
+# XREF **************************************************************** #
+# res{s,q}nan_1op() - set NAN result for monadic instruction #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# #
+# OUTPUT ************************************************************** #
+# none #
+# #
+# ALGORITHM *********************************************************** #
+# Check the source operand tag (STAG) and set the FPCR according #
+# to the operand type and sign. #
+# #
+#########################################################################
+
+ global ftst
+ftst:
+ mov.b STAG(%a6),%d1
+ bne.b ftst_not_norm # optimize on non-norm input
+
+#
+# Norm:
+#
+ftst_norm:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.b ftst_norm_m # yes
+ rts
+ftst_norm_m:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+#
+# input is not normalized; what is it?
+#
+ftst_not_norm:
+ cmpi.b %d1,&ZERO # weed out ZERO
+ beq.b ftst_zero
+ cmpi.b %d1,&INF # weed out INF
+ beq.b ftst_inf
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ cmpi.b %d1,&QNAN # weed out QNAN
+ beq.l res_qnan_1op
+
+#
+# Denorm:
+#
+ftst_denorm:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.b ftst_denorm_m # yes
+ rts
+ftst_denorm_m:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+
+#
+# Infinity:
+#
+ftst_inf:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.b ftst_inf_m # yes
+ftst_inf_p:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+ftst_inf_m:
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'I','N' ccode bits
+ rts
+
+#
+# Zero:
+#
+ftst_zero:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.b ftst_zero_m # yes
+ftst_zero_p:
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'N' ccode bit
+ rts
+ftst_zero_m:
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fint(): emulates the fint instruction #
+# #
+# XREF **************************************************************** #
+# res_{s,q}nan_1op() - set NAN result for monadic operation #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = round precision/mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# #
+# ALGORITHM *********************************************************** #
+# Separate according to operand type. Unnorms don't pass through #
+# here. For norms, load the rounding mode/prec, execute a "fint", then #
+# store the resulting FPSR bits. #
+# For denorms, force the j-bit to a one and do the same as for #
+# norms. Denorms are so low that the answer will either be a zero or a #
+# one. #
+# For zeroes/infs/NANs, return the same while setting the FPSR #
+# as appropriate. #
+# #
+#########################################################################
+
+ global fint
+fint:
+ mov.b STAG(%a6),%d1
+ bne.b fint_not_norm # optimize on non-norm input
+
+#
+# Norm:
+#
+fint_norm:
+ andi.b &0x30,%d0 # set prec = ext
+
+ fmov.l %d0,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fint.x SRC(%a0),%fp0 # execute fint
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d0 # save FPSR
+ or.l %d0,USER_FPSR(%a6) # set exception bits
+
+ rts
+
+#
+# input is not normalized; what is it?
+#
+fint_not_norm:
+ cmpi.b %d1,&ZERO # weed out ZERO
+ beq.b fint_zero
+ cmpi.b %d1,&INF # weed out INF
+ beq.b fint_inf
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.b fint_denorm
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ bra.l res_qnan_1op # weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be either (+/-)ZERO or (+/-)1.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fint_denorm:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+ mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
+ lea FP_SCR0(%a6),%a0
+ bra.b fint_norm
+
+#
+# Zero:
+#
+fint_zero:
+ tst.b SRC_EX(%a0) # is ZERO negative?
+ bmi.b fint_zero_m # yes
+fint_zero_p:
+ fmov.s &0x00000000,%fp0 # return +ZERO in fp0
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+fint_zero_m:
+ fmov.s &0x80000000,%fp0 # return -ZERO in fp0
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+ rts
+
+#
+# Infinity:
+#
+fint_inf:
+ fmovm.x SRC(%a0),&0x80 # return result in fp0
+ tst.b SRC_EX(%a0) # is INF negative?
+ bmi.b fint_inf_m # yes
+fint_inf_p:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+fint_inf_m:
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fintrz(): emulates the fintrz instruction #
+# #
+# XREF **************************************************************** #
+# res_{s,q}nan_1op() - set NAN result for monadic operation #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = round precision/mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# #
+# ALGORITHM *********************************************************** #
+# Separate according to operand type. Unnorms don't pass through #
+# here. For norms, load the rounding mode/prec, execute a "fintrz", #
+# then store the resulting FPSR bits. #
+# For denorms, force the j-bit to a one and do the same as for #
+# norms. Denorms are so low that the answer will either be a zero or a #
+# one. #
+# For zeroes/infs/NANs, return the same while setting the FPSR #
+# as appropriate. #
+# #
+#########################################################################
+
+ global fintrz
+fintrz:
+ mov.b STAG(%a6),%d1
+ bne.b fintrz_not_norm # optimize on non-norm input
+
+#
+# Norm:
+#
+fintrz_norm:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fintrz.x SRC(%a0),%fp0 # execute fintrz
+
+ fmov.l %fpsr,%d0 # save FPSR
+ or.l %d0,USER_FPSR(%a6) # set exception bits
+
+ rts
+
+#
+# input is not normalized; what is it?
+#
+fintrz_not_norm:
+ cmpi.b %d1,&ZERO # weed out ZERO
+ beq.b fintrz_zero
+ cmpi.b %d1,&INF # weed out INF
+ beq.b fintrz_inf
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.b fintrz_denorm
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ bra.l res_qnan_1op # weed out QNAN
+
+#
+# Denorm:
+#
+# for DENORMs, the result will be (+/-)ZERO.
+# also, the INEX2 and AINEX exception bits will be set.
+# so, we could either set these manually or force the DENORM
+# to a very small NORM and ship it to the NORM routine.
+# I do the latter.
+#
+fintrz_denorm:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6) # copy sign, zero exp
+ mov.b &0x80,FP_SCR0_HI(%a6) # force DENORM ==> small NORM
+ lea FP_SCR0(%a6),%a0
+ bra.b fintrz_norm
+
+#
+# Zero:
+#
+fintrz_zero:
+ tst.b SRC_EX(%a0) # is ZERO negative?
+ bmi.b fintrz_zero_m # yes
+fintrz_zero_p:
+ fmov.s &0x00000000,%fp0 # return +ZERO in fp0
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+fintrz_zero_m:
+ fmov.s &0x80000000,%fp0 # return -ZERO in fp0
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+ rts
+
+#
+# Infinity:
+#
+fintrz_inf:
+ fmovm.x SRC(%a0),&0x80 # return result in fp0
+ tst.b SRC_EX(%a0) # is INF negative?
+ bmi.b fintrz_inf_m # yes
+fintrz_inf_p:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+fintrz_inf_m:
+ mov.b &inf_bmask+neg_bmask,FPSR_CC(%a6) # set 'N','I' ccode bits
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fabs(): emulates the fabs instruction #
+# fsabs(): emulates the fsabs instruction #
+# fdabs(): emulates the fdabs instruction #
+# #
+# XREF **************************************************************** #
+# norm() - normalize denorm mantissa to provide EXOP #
+# scale_to_zero_src() - make exponent. = 0; get scale factor #
+# unf_res() - calculate underflow result #
+# ovf_res() - calculate overflow result #
+# res_{s,q}nan_1op() - set NAN result for monadic operation #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 = rnd precision/mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms into extended, single, and double precision. #
+# Simply clear sign for extended precision norm. Ext prec denorm #
+# gets an EXOP created for it since it's an underflow. #
+# Double and single precision can overflow and underflow. First, #
+# scale the operand such that the exponent is zero. Perform an "fabs" #
+# using the correct rnd mode/prec. Check to see if the original #
+# exponent would take an exception. If so, use unf_res() or ovf_res() #
+# to calculate the default result. Also, create the EXOP for the #
+# exceptional case. If no exception should occur, insert the correct #
+# result exponent and return. #
+# Unnorms don't pass through here. #
+# #
+#########################################################################
+
+ global fsabs
+fsabs:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl precision
+ bra.b fabs
+
+ global fdabs
+fdabs:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl precision
+
+ global fabs
+fabs:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+ mov.b STAG(%a6),%d1
+ bne.w fabs_not_norm # optimize on non-norm input
+
+#
+# ABSOLUTE VALUE: norms and denorms ONLY!
+#
+fabs_norm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fabs_not_ext # no; go handle sgl or dbl
+
+#
+# precision selected is extended. so...we can not get an underflow
+# or overflow because of rounding to the correct precision. so...
+# skip the scaling and unscaling...
+#
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.w SRC_EX(%a0),%d1
+ bclr &15,%d1 # force absolute value
+ mov.w %d1,FP_SCR0_EX(%a6) # insert exponent
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# for an extended precision DENORM, the UNFL exception bit is set
+# the accrued bit is NOT set in this instance(no inexactness!)
+#
+fabs_denorm:
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fabs_not_ext # no
+
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ mov.w SRC_EX(%a0),%d0
+ bclr &15,%d0 # clear sign
+ mov.w %d0,FP_SCR0_EX(%a6) # insert exponent
+
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+
+ btst &unfl_bit,FPCR_ENABLE(%a6) # is UNFL enabled?
+ bne.b fabs_ext_unfl_ena
+ rts
+
+#
+# the input is an extended DENORM and underflow is enabled in the FPCR.
+# normalize the mantissa and add the bias of 0x6000 to the resulting negative
+# exponent and insert back into the operand.
+#
+fabs_ext_unfl_ena:
+ lea FP_SCR0(%a6),%a0 # pass: ptr to operand
+ bsr.l norm # normalize result
+ neg.w %d0 # new exponent = -(shft val)
+ addi.w &0x6000,%d0 # add new bias to exponent
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch old sign,exp
+ andi.w &0x8000,%d1 # keep old sign
+ andi.w &0x7fff,%d0 # clear sign position
+ or.w %d1,%d0 # concat old sign, new exponent
+ mov.w %d0,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ rts
+
+#
+# operand is either single or double
+#
+fabs_not_ext:
+ cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
+ bne.b fabs_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fabs_sgl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3f80 # will move in underflow?
+ bge.w fabs_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x407e # will move in overflow?
+ beq.w fabs_sd_may_ovfl # maybe; go check
+ blt.w fabs_sd_ovfl # yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fabs_sd_normal:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fabs.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fabs_sd_normal_exit:
+ mov.l %d2,-(%sp) # save d2
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d1,%d2 # concat old sign,new exp
+ mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# operand is to be rounded to double precision
+#
+fabs_dbl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3c00 # will move in underflow?
+ bge.b fabs_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x43fe # will move in overflow?
+ beq.w fabs_sd_may_ovfl # maybe; go check
+ blt.w fabs_sd_ovfl # yes; go handle overflow
+ bra.w fabs_sd_normal # no; ho handle normalized op
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fabs_sd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ bclr &0x7,FP_SCR0_EX(%a6) # force absolute value
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fabs_sd_unfl_ena # yes
+
+fabs_sd_unfl_dis:
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fabs_sd_unfl_ena:
+ mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+ mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+ mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # subtract scale factor
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat new sign,new exp
+ mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
+ fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fabs_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fabs_sd_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fabs.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fabs_sd_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fabs_sd_ovfl_ena # yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fabs_sd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass: prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fabs_sd_ovfl_ena:
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat sign,exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fabs_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fabs_sd_may_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fabs.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fabs_sd_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fabs_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fabs_not_norm:
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.w fabs_denorm
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ cmpi.b %d1,&QNAN # weed out QNAN
+ beq.l res_qnan_1op
+
+ fabs.x SRC(%a0),%fp0 # force absolute value
+
+ cmpi.b %d1,&INF # weed out INF
+ beq.b fabs_inf
+fabs_zero:
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+fabs_inf:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fcmp(): fp compare op routine #
+# #
+# XREF **************************************************************** #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 = round prec/mode #
+# #
+# OUTPUT ************************************************************** #
+# None #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs and denorms as special cases. For everything else, #
+# just use the actual fcmp instruction to produce the correct condition #
+# codes. #
+# #
+#########################################################################
+
+ global fcmp
+fcmp:
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1
+ bne.b fcmp_not_norm # optimize on non-norm input
+
+#
+# COMPARE FP OPs : NORMs, ZEROs, INFs, and "corrected" DENORMs
+#
+fcmp_norm:
+ fmovm.x DST(%a1),&0x80 # load dst op
+
+ fcmp.x %fp0,SRC(%a0) # do compare
+
+ fmov.l %fpsr,%d0 # save FPSR
+ rol.l &0x8,%d0 # extract ccode bits
+ mov.b %d0,FPSR_CC(%a6) # set ccode bits(no exc bits are set)
+
+ rts
+
+#
+# fcmp: inputs are not both normalized; what are they?
+#
+fcmp_not_norm:
+ mov.w (tbl_fcmp_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fcmp_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fcmp_op:
+ short fcmp_norm - tbl_fcmp_op # NORM - NORM
+ short fcmp_norm - tbl_fcmp_op # NORM - ZERO
+ short fcmp_norm - tbl_fcmp_op # NORM - INF
+ short fcmp_res_qnan - tbl_fcmp_op # NORM - QNAN
+ short fcmp_nrm_dnrm - tbl_fcmp_op # NORM - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # NORM - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_norm - tbl_fcmp_op # ZERO - NORM
+ short fcmp_norm - tbl_fcmp_op # ZERO - ZERO
+ short fcmp_norm - tbl_fcmp_op # ZERO - INF
+ short fcmp_res_qnan - tbl_fcmp_op # ZERO - QNAN
+ short fcmp_dnrm_s - tbl_fcmp_op # ZERO - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # ZERO - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_norm - tbl_fcmp_op # INF - NORM
+ short fcmp_norm - tbl_fcmp_op # INF - ZERO
+ short fcmp_norm - tbl_fcmp_op # INF - INF
+ short fcmp_res_qnan - tbl_fcmp_op # INF - QNAN
+ short fcmp_dnrm_s - tbl_fcmp_op # INF - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # INF - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - NORM
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - ZERO
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - INF
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - QNAN
+ short fcmp_res_qnan - tbl_fcmp_op # QNAN - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # QNAN - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_dnrm_nrm - tbl_fcmp_op # DENORM - NORM
+ short fcmp_dnrm_d - tbl_fcmp_op # DENORM - ZERO
+ short fcmp_dnrm_d - tbl_fcmp_op # DENORM - INF
+ short fcmp_res_qnan - tbl_fcmp_op # DENORM - QNAN
+ short fcmp_dnrm_sd - tbl_fcmp_op # DENORM - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # DENORM - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - NORM
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - ZERO
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - INF
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - QNAN
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - DENORM
+ short fcmp_res_snan - tbl_fcmp_op # SNAN - SNAN
+ short tbl_fcmp_op - tbl_fcmp_op #
+ short tbl_fcmp_op - tbl_fcmp_op #
+
+# unlike all other functions for QNAN and SNAN, fcmp does NOT set the
+# 'N' bit for a negative QNAN or SNAN input so we must squelch it here.
+fcmp_res_qnan:
+ bsr.l res_qnan
+ andi.b &0xf7,FPSR_CC(%a6)
+ rts
+fcmp_res_snan:
+ bsr.l res_snan
+ andi.b &0xf7,FPSR_CC(%a6)
+ rts
+
+#
+# DENORMs are a little more difficult.
+# If you have a 2 DENORMs, then you can just force the j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and an INF or ZERO, just force the DENORM's j-bit to a one
+# and use the fcmp_norm routine.
+# If you have a DENORM and a NORM with opposite signs, then use fcmp_norm, also.
+# But with a DENORM and a NORM of the same sign, the neg bit is set if the
+# (1) signs are (+) and the DENORM is the dst or
+# (2) signs are (-) and the DENORM is the src
+#
+
+fcmp_dnrm_s:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),%d0
+ bset &31,%d0 # DENORM src; make into small norm
+ mov.l %d0,FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ lea FP_SCR0(%a6),%a0
+ bra.w fcmp_norm
+
+fcmp_dnrm_d:
+ mov.l DST_EX(%a1),FP_SCR0_EX(%a6)
+ mov.l DST_HI(%a1),%d0
+ bset &31,%d0 # DENORM src; make into small norm
+ mov.l %d0,FP_SCR0_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR0_LO(%a6)
+ lea FP_SCR0(%a6),%a1
+ bra.w fcmp_norm
+
+fcmp_dnrm_sd:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l DST_HI(%a1),%d0
+ bset &31,%d0 # DENORM dst; make into small norm
+ mov.l %d0,FP_SCR1_HI(%a6)
+ mov.l SRC_HI(%a0),%d0
+ bset &31,%d0 # DENORM dst; make into small norm
+ mov.l %d0,FP_SCR0_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ lea FP_SCR1(%a6),%a1
+ lea FP_SCR0(%a6),%a0
+ bra.w fcmp_norm
+
+fcmp_nrm_dnrm:
+ mov.b SRC_EX(%a0),%d0 # determine if like signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bmi.w fcmp_dnrm_s
+
+# signs are the same, so must determine the answer ourselves.
+ tst.b %d0 # is src op negative?
+ bmi.b fcmp_nrm_dnrm_m # yes
+ rts
+fcmp_nrm_dnrm_m:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+
+fcmp_dnrm_nrm:
+ mov.b SRC_EX(%a0),%d0 # determine if like signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bmi.w fcmp_dnrm_d
+
+# signs are the same, so must determine the answer ourselves.
+ tst.b %d0 # is src op negative?
+ bpl.b fcmp_dnrm_nrm_m # no
+ rts
+fcmp_dnrm_nrm_m:
+ mov.b &neg_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fsglmul(): emulates the fsglmul instruction #
+# #
+# XREF **************************************************************** #
+# scale_to_zero_src() - scale src exponent to zero #
+# scale_to_zero_dst() - scale dst exponent to zero #
+# unf_res4() - return default underflow result for sglop #
+# ovf_res() - return default overflow result #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a multiply #
+# instruction won't cause an exception. Use the regular fsglmul to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ global fsglmul
+fsglmul:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1
+
+ bne.w fsglmul_not_norm # optimize on non-norm input
+
+fsglmul_norm:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_to_zero_src # scale exponent
+ mov.l %d0,-(%sp) # save scale factor 1
+
+ bsr.l scale_to_zero_dst # scale dst exponent
+
+ add.l (%sp)+,%d0 # SCALE_FACTOR = scale1 + scale2
+
+ cmpi.l %d0,&0x3fff-0x7ffe # would result ovfl?
+ beq.w fsglmul_may_ovfl # result may rnd to overflow
+ blt.w fsglmul_ovfl # result will overflow
+
+ cmpi.l %d0,&0x3fff+0x0001 # would result unfl?
+ beq.w fsglmul_may_unfl # result may rnd to no unfl
+ bgt.w fsglmul_unfl # result will underflow
+
+fsglmul_normal:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsglmul_normal_exit:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+fsglmul_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsglmul_ovfl_tst:
+
+# save setting this until now because this is where fsglmul_may_ovfl may jump in
+ or.l &ovfl_inx_mask, USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fsglmul_ovfl_ena # yes
+
+fsglmul_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ andi.b &0x30,%d0 # force prec = ext
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+fsglmul_ovfl_ena:
+ fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fsglmul_ovfl_dis
+
+fsglmul_may_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| >= 2.b?
+ fbge.w fsglmul_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fsglmul_normal_exit
+
+fsglmul_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fsglmul_unfl_ena # yes
+
+fsglmul_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res4 # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# UNFL is enabled.
+#
+fsglmul_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # add bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fsglmul_unfl_dis
+
+fsglmul_may_unfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp0 # execute sgl multiply
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x2 # is |result| > 2.b?
+ fbgt.w fsglmul_normal_exit # no; no underflow occurred
+ fblt.w fsglmul_unfl # yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 2. but,
+# we don't know if the result was an underflow that rounded up to a 2 or
+# a normalized number that rounded down to a 2. so, redo the entire operation
+# using RZ as the rounding mode to see what the pre-rounded result is.
+# this case should be relatively rare.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert RZ
+
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsglmul.x FP_SCR0(%a6),%fp1 # execute sgl multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fabs.x %fp1 # make absolute value
+ fcmp.b %fp1,&0x2 # is |result| < 2.b?
+ fbge.w fsglmul_normal_exit # no; no underflow occurred
+ bra.w fsglmul_unfl # yes, underflow occurred
+
+##############################################################################
+
+#
+# Single Precision Multiply: inputs are not both normalized; what are they?
+#
+fsglmul_not_norm:
+ mov.w (tbl_fsglmul_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fsglmul_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fsglmul_op:
+ short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
+ short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
+ short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
+ short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_zero - tbl_fsglmul_op # ZERO x NORM
+ short fsglmul_zero - tbl_fsglmul_op # ZERO x ZERO
+ short fsglmul_res_operr - tbl_fsglmul_op # ZERO x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # ZERO x QNAN
+ short fsglmul_zero - tbl_fsglmul_op # ZERO x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # ZERO x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_inf_dst - tbl_fsglmul_op # INF x NORM
+ short fsglmul_res_operr - tbl_fsglmul_op # INF x ZERO
+ short fsglmul_inf_dst - tbl_fsglmul_op # INF x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # INF x QNAN
+ short fsglmul_inf_dst - tbl_fsglmul_op # INF x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # INF x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x NORM
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x ZERO
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x QNAN
+ short fsglmul_res_qnan - tbl_fsglmul_op # QNAN x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # QNAN x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_norm - tbl_fsglmul_op # NORM x NORM
+ short fsglmul_zero - tbl_fsglmul_op # NORM x ZERO
+ short fsglmul_inf_src - tbl_fsglmul_op # NORM x INF
+ short fsglmul_res_qnan - tbl_fsglmul_op # NORM x QNAN
+ short fsglmul_norm - tbl_fsglmul_op # NORM x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # NORM x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x NORM
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x ZERO
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x INF
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x QNAN
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x DENORM
+ short fsglmul_res_snan - tbl_fsglmul_op # SNAN x SNAN
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+ short tbl_fsglmul_op - tbl_fsglmul_op #
+
+fsglmul_res_operr:
+ bra.l res_operr
+fsglmul_res_snan:
+ bra.l res_snan
+fsglmul_res_qnan:
+ bra.l res_qnan
+fsglmul_zero:
+ bra.l fmul_zero
+fsglmul_inf_src:
+ bra.l fmul_inf_src
+fsglmul_inf_dst:
+ bra.l fmul_inf_dst
+
+#########################################################################
+# XDEF **************************************************************** #
+# fsgldiv(): emulates the fsgldiv instruction #
+# #
+# XREF **************************************************************** #
+# scale_to_zero_src() - scale src exponent to zero #
+# scale_to_zero_dst() - scale dst exponent to zero #
+# unf_res4() - return default underflow result for sglop #
+# ovf_res() - return default overflow result #
+# res_qnan() - return QNAN result #
+# res_snan() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a divide #
+# instruction won't cause an exception. Use the regular fsgldiv to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ global fsgldiv
+fsgldiv:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+
+ bne.w fsgldiv_not_norm # optimize on non-norm input
+
+#
+# DIVIDE: NORMs and DENORMs ONLY!
+#
+fsgldiv_norm:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_to_zero_src # calculate scale factor 1
+ mov.l %d0,-(%sp) # save scale factor 1
+
+ bsr.l scale_to_zero_dst # calculate scale factor 2
+
+ neg.l (%sp) # S.F. = scale1 - scale2
+ add.l %d0,(%sp)
+
+ mov.w 2+L_SCR3(%a6),%d1 # fetch precision,mode
+ lsr.b &0x6,%d1
+ mov.l (%sp)+,%d0
+ cmpi.l %d0,&0x3fff-0x7ffe
+ ble.w fsgldiv_may_ovfl
+
+ cmpi.l %d0,&0x3fff-0x0000 # will result underflow?
+ beq.w fsgldiv_may_unfl # maybe
+ bgt.w fsgldiv_unfl # yes; go handle underflow
+
+fsgldiv_normal:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # save FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp0 # perform sgl divide
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsgldiv_normal_exit:
+ fmovm.x &0x80,FP_SCR0(%a6) # store result on stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # load {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+fsgldiv_may_ovfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # set FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp0 # execute divide
+
+ fmov.l %fpsr,%d1
+ fmov.l &0x0,%fpcr
+
+ or.l %d1,USER_FPSR(%a6) # save INEX,N
+
+ fmovm.x &0x01,-(%sp) # save result to stack
+ mov.w (%sp),%d1 # fetch new exponent
+ add.l &0xc,%sp # clear result
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ cmp.l %d1,&0x7fff # did divide overflow?
+ blt.b fsgldiv_normal_exit
+
+fsgldiv_ovfl_tst:
+ or.w &ovfl_inx_mask,2+USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fsgldiv_ovfl_ena # yes
+
+fsgldiv_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ andi.b &0x30,%d0 # kill precision
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+fsgldiv_ovfl_ena:
+ fmovm.x &0x80,FP_SCR0(%a6) # move result to stack
+
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract new bias
+ andi.w &0x7fff,%d1 # clear ms bit
+ or.w %d2,%d1 # concat old sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fsgldiv_ovfl_dis
+
+fsgldiv_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fsgldiv_unfl_ena # yes
+
+fsgldiv_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res4 # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# UNFL is enabled.
+#
+fsgldiv_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # add bias
+ andi.w &0x7fff,%d1 # clear top bit
+ or.w %d2,%d1 # concat old sign, new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.b fsgldiv_unfl_dis
+
+#
+# the divide operation MAY underflow:
+#
+fsgldiv_may_unfl:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp0 # execute sgl divide
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fabs.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x1 # is |result| > 1.b?
+ fbgt.w fsgldiv_normal_exit # no; no underflow occurred
+ fblt.w fsgldiv_unfl # yes; underflow occurred
+
+#
+# we still don't know if underflow occurred. result is ~ equal to 1. but,
+# we don't know if the result was an underflow that rounded up to a 1
+# or a normalized number that rounded down to a 1. so, redo the entire
+# operation using RZ as the rounding mode to see what the pre-rounded
+# result is. this case should be relatively rare.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into %fp1
+
+ clr.l %d1 # clear scratch register
+ ori.b &rz_mode*0x10,%d1 # force RZ rnd mode
+
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsgldiv.x FP_SCR0(%a6),%fp1 # execute sgl divide
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fabs.x %fp1 # make absolute value
+ fcmp.b %fp1,&0x1 # is |result| < 1.b?
+ fbge.w fsgldiv_normal_exit # no; no underflow occurred
+ bra.w fsgldiv_unfl # yes; underflow occurred
+
+############################################################################
+
+#
+# Divide: inputs are not both normalized; what are they?
+#
+fsgldiv_not_norm:
+ mov.w (tbl_fsgldiv_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fsgldiv_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fsgldiv_op:
+ short fsgldiv_norm - tbl_fsgldiv_op # NORM / NORM
+ short fsgldiv_inf_load - tbl_fsgldiv_op # NORM / ZERO
+ short fsgldiv_zero_load - tbl_fsgldiv_op # NORM / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # NORM / QNAN
+ short fsgldiv_norm - tbl_fsgldiv_op # NORM / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # NORM / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / NORM
+ short fsgldiv_res_operr - tbl_fsgldiv_op # ZERO / ZERO
+ short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # ZERO / QNAN
+ short fsgldiv_zero_load - tbl_fsgldiv_op # ZERO / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # ZERO / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / NORM
+ short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / ZERO
+ short fsgldiv_res_operr - tbl_fsgldiv_op # INF / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # INF / QNAN
+ short fsgldiv_inf_dst - tbl_fsgldiv_op # INF / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # INF / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / NORM
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / ZERO
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / QNAN
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # QNAN / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # QNAN / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_norm - tbl_fsgldiv_op # DENORM / NORM
+ short fsgldiv_inf_load - tbl_fsgldiv_op # DENORM / ZERO
+ short fsgldiv_zero_load - tbl_fsgldiv_op # DENORM / INF
+ short fsgldiv_res_qnan - tbl_fsgldiv_op # DENORM / QNAN
+ short fsgldiv_norm - tbl_fsgldiv_op # DENORM / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # DENORM / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / NORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / ZERO
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / INF
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / QNAN
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / DENORM
+ short fsgldiv_res_snan - tbl_fsgldiv_op # SNAN / SNAN
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+ short tbl_fsgldiv_op - tbl_fsgldiv_op #
+
+fsgldiv_res_qnan:
+ bra.l res_qnan
+fsgldiv_res_snan:
+ bra.l res_snan
+fsgldiv_res_operr:
+ bra.l res_operr
+fsgldiv_inf_load:
+ bra.l fdiv_inf_load
+fsgldiv_zero_load:
+ bra.l fdiv_zero_load
+fsgldiv_inf_dst:
+ bra.l fdiv_inf_dst
+
+#########################################################################
+# XDEF **************************************************************** #
+# fadd(): emulates the fadd instruction #
+# fsadd(): emulates the fadd instruction #
+# fdadd(): emulates the fdadd instruction #
+# #
+# XREF **************************************************************** #
+# addsub_scaler2() - scale the operands so they won't take exc #
+# ovf_res() - return default overflow result #
+# unf_res() - return default underflow result #
+# res_qnan() - set QNAN result #
+# res_snan() - set SNAN result #
+# res_operr() - set OPERR result #
+# scale_to_zero_src() - set src operand exponent equal to zero #
+# scale_to_zero_dst() - set dst operand exponent equal to zero #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms into extended, single, and double precision. #
+# Do addition after scaling exponents such that exception won't #
+# occur. Then, check result exponent to see if exception would have #
+# occurred. If so, return default result and maybe EXOP. Else, insert #
+# the correct result exponent and return. Set FPSR bits as appropriate. #
+# #
+#########################################################################
+
+ global fsadd
+fsadd:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ bra.b fadd
+
+ global fdadd
+fdadd:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fadd
+fadd:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+
+ bne.w fadd_not_norm # optimize on non-norm input
+
+#
+# ADD: norms and denorms
+#
+fadd_norm:
+ bsr.l addsub_scaler2 # scale exponents
+
+fadd_zero_entry:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fadd.x FP_SCR0(%a6),%fp0 # execute add
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch INEX2,N,Z
+
+ or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
+
+ fbeq.w fadd_zero_exit # if result is zero, end now
+
+ mov.l %d2,-(%sp) # save d2
+
+ fmovm.x &0x01,-(%sp) # save result to stack
+
+ mov.w 2+L_SCR3(%a6),%d1
+ lsr.b &0x6,%d1
+
+ mov.w (%sp),%d2 # fetch new sign, exp
+ andi.l &0x7fff,%d2 # strip sign
+ sub.l %d0,%d2 # add scale factor
+
+ cmp.l %d2,(tbl_fadd_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+ bge.b fadd_ovfl # yes
+
+ cmp.l %d2,(tbl_fadd_unfl.b,%pc,%d1.w*4) # is it an underflow?
+ blt.w fadd_unfl # yes
+ beq.w fadd_may_unfl # maybe; go find out
+
+fadd_normal:
+ mov.w (%sp),%d1
+ andi.w &0x8000,%d1 # keep sign
+ or.w %d2,%d1 # concat sign,new exp
+ mov.w %d1,(%sp) # insert new exponent
+
+ fmovm.x (%sp)+,&0x80 # return result in fp0
+
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fadd_zero_exit:
+# fmov.s &0x00000000,%fp0 # return zero in fp0
+ rts
+
+tbl_fadd_ovfl:
+ long 0x7fff # ext ovfl
+ long 0x407f # sgl ovfl
+ long 0x43ff # dbl ovfl
+
+tbl_fadd_unfl:
+ long 0x0000 # ext unfl
+ long 0x3f81 # sgl unfl
+ long 0x3c01 # dbl unfl
+
+fadd_ovfl:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fadd_ovfl_ena # yes
+
+ add.l &0xc,%sp
+fadd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fadd_ovfl_ena:
+ mov.b L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fadd_ovfl_ena_sd # no; prec = sgl or dbl
+
+fadd_ovfl_ena_cont:
+ mov.w (%sp),%d1
+ andi.w &0x8000,%d1 # keep sign
+ subi.l &0x6000,%d2 # add extra bias
+ andi.w &0x7fff,%d2
+ or.w %d2,%d1 # concat sign,new exp
+ mov.w %d1,(%sp) # insert new exponent
+
+ fmovm.x (%sp)+,&0x40 # return EXOP in fp1
+ bra.b fadd_ovfl_dis
+
+fadd_ovfl_ena_sd:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # keep rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ fadd.x FP_SCR0(%a6),%fp0 # execute add
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ add.l &0xc,%sp
+ fmovm.x &0x01,-(%sp)
+ bra.b fadd_ovfl_ena_cont
+
+fadd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ add.l &0xc,%sp
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fadd.x FP_SCR0(%a6),%fp0 # execute add
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save status
+
+ or.l %d1,USER_FPSR(%a6) # save INEX,N
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fadd_unfl_ena # yes
+
+fadd_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' bit may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fadd_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fadd_unfl_ena_sd # no; sgl or dbl
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+fadd_unfl_ena_cont:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fadd.x FP_SCR0(%a6),%fp1 # execute multiply
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # save result to stack
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1 # clear top bit
+ or.w %d2,%d1 # concat sign,new exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fadd_unfl_dis
+
+fadd_unfl_ena_sd:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # use only rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+
+ bra.b fadd_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fadd_may_unfl:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1
+ beq.w fadd_normal # yes; no underflow occurred
+
+ mov.l 0x4(%sp),%d1 # extract hi(man)
+ cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
+ bne.w fadd_normal # no; no underflow occurred
+
+ tst.l 0x8(%sp) # is lo(man) = 0x0?
+ bne.w fadd_normal # no; no underflow occurred
+
+ btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+ beq.w fadd_normal # no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fadd.x FP_SCR0(%a6),%fp1 # execute add
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fabs.x %fp0 # compare absolute values
+ fabs.x %fp1
+ fcmp.x %fp0,%fp1 # is first result > second?
+
+ fbgt.w fadd_unfl # yes; it's an underflow
+ bra.w fadd_normal # no; it's not an underflow
+
+##########################################################################
+
+#
+# Add: inputs are not both normalized; what are they?
+#
+fadd_not_norm:
+ mov.w (tbl_fadd_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fadd_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fadd_op:
+ short fadd_norm - tbl_fadd_op # NORM + NORM
+ short fadd_zero_src - tbl_fadd_op # NORM + ZERO
+ short fadd_inf_src - tbl_fadd_op # NORM + INF
+ short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
+ short fadd_norm - tbl_fadd_op # NORM + DENORM
+ short fadd_res_snan - tbl_fadd_op # NORM + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_zero_dst - tbl_fadd_op # ZERO + NORM
+ short fadd_zero_2 - tbl_fadd_op # ZERO + ZERO
+ short fadd_inf_src - tbl_fadd_op # ZERO + INF
+ short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
+ short fadd_zero_dst - tbl_fadd_op # ZERO + DENORM
+ short fadd_res_snan - tbl_fadd_op # NORM + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_inf_dst - tbl_fadd_op # INF + NORM
+ short fadd_inf_dst - tbl_fadd_op # INF + ZERO
+ short fadd_inf_2 - tbl_fadd_op # INF + INF
+ short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
+ short fadd_inf_dst - tbl_fadd_op # INF + DENORM
+ short fadd_res_snan - tbl_fadd_op # NORM + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_res_qnan - tbl_fadd_op # QNAN + NORM
+ short fadd_res_qnan - tbl_fadd_op # QNAN + ZERO
+ short fadd_res_qnan - tbl_fadd_op # QNAN + INF
+ short fadd_res_qnan - tbl_fadd_op # QNAN + QNAN
+ short fadd_res_qnan - tbl_fadd_op # QNAN + DENORM
+ short fadd_res_snan - tbl_fadd_op # QNAN + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_norm - tbl_fadd_op # DENORM + NORM
+ short fadd_zero_src - tbl_fadd_op # DENORM + ZERO
+ short fadd_inf_src - tbl_fadd_op # DENORM + INF
+ short fadd_res_qnan - tbl_fadd_op # NORM + QNAN
+ short fadd_norm - tbl_fadd_op # DENORM + DENORM
+ short fadd_res_snan - tbl_fadd_op # NORM + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+ short fadd_res_snan - tbl_fadd_op # SNAN + NORM
+ short fadd_res_snan - tbl_fadd_op # SNAN + ZERO
+ short fadd_res_snan - tbl_fadd_op # SNAN + INF
+ short fadd_res_snan - tbl_fadd_op # SNAN + QNAN
+ short fadd_res_snan - tbl_fadd_op # SNAN + DENORM
+ short fadd_res_snan - tbl_fadd_op # SNAN + SNAN
+ short tbl_fadd_op - tbl_fadd_op #
+ short tbl_fadd_op - tbl_fadd_op #
+
+fadd_res_qnan:
+ bra.l res_qnan
+fadd_res_snan:
+ bra.l res_snan
+
+#
+# both operands are ZEROes
+#
+fadd_zero_2:
+ mov.b SRC_EX(%a0),%d0 # are the signs opposite
+ mov.b DST_EX(%a1),%d1
+ eor.b %d0,%d1
+ bmi.w fadd_zero_2_chk_rm # weed out (-ZERO)+(+ZERO)
+
+# the signs are the same. so determine whether they are positive or negative
+# and return the appropriately signed zero.
+ tst.b %d0 # are ZEROes positive or negative?
+ bmi.b fadd_zero_rm # negative
+ fmov.s &0x00000000,%fp0 # return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+#
+# the ZEROes have opposite signs:
+# - therefore, we return +ZERO if the rounding modes are RN,RZ, or RP.
+# - -ZERO is returned in the case of RM.
+#
+fadd_zero_2_chk_rm:
+ mov.b 3+L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # extract rnd mode
+ cmpi.b %d1,&rm_mode*0x10 # is rnd mode == RM?
+ beq.b fadd_zero_rm # yes
+ fmov.s &0x00000000,%fp0 # return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+fadd_zero_rm:
+ fmov.s &0x80000000,%fp0 # return -ZERO
+ mov.b &neg_bmask+z_bmask,FPSR_CC(%a6) # set NEG/Z
+ rts
+
+#
+# one operand is a ZERO and the other is a DENORM or NORM. scale
+# the DENORM or NORM and jump to the regular fadd routine.
+#
+fadd_zero_dst:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # scale the operand
+ clr.w FP_SCR1_EX(%a6)
+ clr.l FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6)
+ bra.w fadd_zero_entry # go execute fadd
+
+fadd_zero_src:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+ bsr.l scale_to_zero_dst # scale the operand
+ clr.w FP_SCR0_EX(%a6)
+ clr.l FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6)
+ bra.w fadd_zero_entry # go execute fadd
+
+#
+# both operands are INFs. an OPERR will result if the INFs have
+# different signs. else, an INF of the same sign is returned
+#
+fadd_inf_2:
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d1,%d0
+ bmi.l res_operr # weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but, we do have to remember to return the
+# src INF since that's where the 881/882 gets the j-bit from...
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_src:
+ fmovm.x SRC(%a0),&0x80 # return src INF
+ tst.b SRC_EX(%a0) # is INF positive?
+ bpl.b fadd_inf_done # yes; we're done
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+#
+# operands are INF and one of {ZERO, INF, DENORM, NORM}
+#
+fadd_inf_dst:
+ fmovm.x DST(%a1),&0x80 # return dst INF
+ tst.b DST_EX(%a1) # is INF positive?
+ bpl.b fadd_inf_done # yes; we're done
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+fadd_inf_done:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fsub(): emulates the fsub instruction #
+# fssub(): emulates the fssub instruction #
+# fdsub(): emulates the fdsub instruction #
+# #
+# XREF **************************************************************** #
+# addsub_scaler2() - scale the operands so they won't take exc #
+# ovf_res() - return default overflow result #
+# unf_res() - return default underflow result #
+# res_qnan() - set QNAN result #
+# res_snan() - set SNAN result #
+# res_operr() - set OPERR result #
+# scale_to_zero_src() - set src operand exponent equal to zero #
+# scale_to_zero_dst() - set dst operand exponent equal to zero #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# a1 = pointer to extended precision destination operand #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms into extended, single, and double precision. #
+# Do subtraction after scaling exponents such that exception won't#
+# occur. Then, check result exponent to see if exception would have #
+# occurred. If so, return default result and maybe EXOP. Else, insert #
+# the correct result exponent and return. Set FPSR bits as appropriate. #
+# #
+#########################################################################
+
+ global fssub
+fssub:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl prec
+ bra.b fsub
+
+ global fdsub
+fdsub:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl prec
+
+ global fsub
+fsub:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+
+ clr.w %d1
+ mov.b DTAG(%a6),%d1
+ lsl.b &0x3,%d1
+ or.b STAG(%a6),%d1 # combine src tags
+
+ bne.w fsub_not_norm # optimize on non-norm input
+
+#
+# SUB: norms and denorms
+#
+fsub_norm:
+ bsr.l addsub_scaler2 # scale exponents
+
+fsub_zero_entry:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fsub.x FP_SCR0(%a6),%fp0 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # fetch INEX2, N, Z
+
+ or.l %d1,USER_FPSR(%a6) # save exc and ccode bits
+
+ fbeq.w fsub_zero_exit # if result zero, end now
+
+ mov.l %d2,-(%sp) # save d2
+
+ fmovm.x &0x01,-(%sp) # save result to stack
+
+ mov.w 2+L_SCR3(%a6),%d1
+ lsr.b &0x6,%d1
+
+ mov.w (%sp),%d2 # fetch new exponent
+ andi.l &0x7fff,%d2 # strip sign
+ sub.l %d0,%d2 # add scale factor
+
+ cmp.l %d2,(tbl_fsub_ovfl.b,%pc,%d1.w*4) # is it an overflow?
+ bge.b fsub_ovfl # yes
+
+ cmp.l %d2,(tbl_fsub_unfl.b,%pc,%d1.w*4) # is it an underflow?
+ blt.w fsub_unfl # yes
+ beq.w fsub_may_unfl # maybe; go find out
+
+fsub_normal:
+ mov.w (%sp),%d1
+ andi.w &0x8000,%d1 # keep sign
+ or.w %d2,%d1 # insert new exponent
+ mov.w %d1,(%sp) # insert new exponent
+
+ fmovm.x (%sp)+,&0x80 # return result in fp0
+
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fsub_zero_exit:
+# fmov.s &0x00000000,%fp0 # return zero in fp0
+ rts
+
+tbl_fsub_ovfl:
+ long 0x7fff # ext ovfl
+ long 0x407f # sgl ovfl
+ long 0x43ff # dbl ovfl
+
+tbl_fsub_unfl:
+ long 0x0000 # ext unfl
+ long 0x3f81 # sgl unfl
+ long 0x3c01 # dbl unfl
+
+fsub_ovfl:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fsub_ovfl_ena # yes
+
+ add.l &0xc,%sp
+fsub_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass prec:rnd
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fsub_ovfl_ena:
+ mov.b L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fsub_ovfl_ena_sd # no
+
+fsub_ovfl_ena_cont:
+ mov.w (%sp),%d1 # fetch {sgn,exp}
+ andi.w &0x8000,%d1 # keep sign
+ subi.l &0x6000,%d2 # subtract new bias
+ andi.w &0x7fff,%d2 # clear top bit
+ or.w %d2,%d1 # concat sign,exp
+ mov.w %d1,(%sp) # insert new exponent
+
+ fmovm.x (%sp)+,&0x40 # return EXOP in fp1
+ bra.b fsub_ovfl_dis
+
+fsub_ovfl_ena_sd:
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # clear rnd prec
+ fmov.l %d1,%fpcr # set FPCR
+
+ fsub.x FP_SCR0(%a6),%fp0 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ add.l &0xc,%sp
+ fmovm.x &0x01,-(%sp)
+ bra.b fsub_ovfl_ena_cont
+
+fsub_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ add.l &0xc,%sp
+
+ fmovm.x FP_SCR1(%a6),&0x80 # load dst op
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsub.x FP_SCR0(%a6),%fp0 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save status
+
+ or.l %d1,USER_FPSR(%a6)
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fsub_unfl_ena # yes
+
+fsub_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # 'Z' may have been set
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ mov.l (%sp)+,%d2 # restore d2
+ rts
+
+fsub_unfl_ena:
+ fmovm.x FP_SCR1(%a6),&0x40
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # is precision extended?
+ bne.b fsub_unfl_ena_sd # no
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+fsub_unfl_ena_cont:
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsub.x FP_SCR0(%a6),%fp1 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fmovm.x &0x40,FP_SCR0(%a6) # store result to stack
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ addi.l &0x6000,%d1 # subtract new bias
+ andi.w &0x7fff,%d1 # clear top bit
+ or.w %d2,%d1 # concat sgn,exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ bra.w fsub_unfl_dis
+
+fsub_unfl_ena_sd:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # clear rnd prec
+ fmov.l %d1,%fpcr # set FPCR
+
+ bra.b fsub_unfl_ena_cont
+
+#
+# result is equal to the smallest normalized number in the selected precision
+# if the precision is extended, this result could not have come from an
+# underflow that rounded up.
+#
+fsub_may_unfl:
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # fetch rnd prec
+ beq.w fsub_normal # yes; no underflow occurred
+
+ mov.l 0x4(%sp),%d1
+ cmpi.l %d1,&0x80000000 # is hi(man) = 0x80000000?
+ bne.w fsub_normal # no; no underflow occurred
+
+ tst.l 0x8(%sp) # is lo(man) = 0x0?
+ bne.w fsub_normal # no; no underflow occurred
+
+ btst &inex2_bit,FPSR_EXCEPT(%a6) # is INEX2 set?
+ beq.w fsub_normal # no; no underflow occurred
+
+#
+# ok, so now the result has a exponent equal to the smallest normalized
+# exponent for the selected precision. also, the mantissa is equal to
+# 0x8000000000000000 and this mantissa is the result of rounding non-zero
+# g,r,s.
+# now, we must determine whether the pre-rounded result was an underflow
+# rounded "up" or a normalized number rounded "down".
+# so, we do this be re-executing the add using RZ as the rounding mode and
+# seeing if the new result is smaller or equal to the current result.
+#
+ fmovm.x FP_SCR1(%a6),&0x40 # load dst op into fp1
+
+ mov.l L_SCR3(%a6),%d1
+ andi.b &0xc0,%d1 # keep rnd prec
+ ori.b &rz_mode*0x10,%d1 # insert rnd mode
+ fmov.l %d1,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsub.x FP_SCR0(%a6),%fp1 # execute subtract
+
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ fabs.x %fp0 # compare absolute values
+ fabs.x %fp1
+ fcmp.x %fp0,%fp1 # is first result > second?
+
+ fbgt.w fsub_unfl # yes; it's an underflow
+ bra.w fsub_normal # no; it's not an underflow
+
+##########################################################################
+
+#
+# Sub: inputs are not both normalized; what are they?
+#
+fsub_not_norm:
+ mov.w (tbl_fsub_op.b,%pc,%d1.w*2),%d1
+ jmp (tbl_fsub_op.b,%pc,%d1.w*1)
+
+ swbeg &48
+tbl_fsub_op:
+ short fsub_norm - tbl_fsub_op # NORM - NORM
+ short fsub_zero_src - tbl_fsub_op # NORM - ZERO
+ short fsub_inf_src - tbl_fsub_op # NORM - INF
+ short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
+ short fsub_norm - tbl_fsub_op # NORM - DENORM
+ short fsub_res_snan - tbl_fsub_op # NORM - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_zero_dst - tbl_fsub_op # ZERO - NORM
+ short fsub_zero_2 - tbl_fsub_op # ZERO - ZERO
+ short fsub_inf_src - tbl_fsub_op # ZERO - INF
+ short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
+ short fsub_zero_dst - tbl_fsub_op # ZERO - DENORM
+ short fsub_res_snan - tbl_fsub_op # NORM - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_inf_dst - tbl_fsub_op # INF - NORM
+ short fsub_inf_dst - tbl_fsub_op # INF - ZERO
+ short fsub_inf_2 - tbl_fsub_op # INF - INF
+ short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
+ short fsub_inf_dst - tbl_fsub_op # INF - DENORM
+ short fsub_res_snan - tbl_fsub_op # NORM - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_res_qnan - tbl_fsub_op # QNAN - NORM
+ short fsub_res_qnan - tbl_fsub_op # QNAN - ZERO
+ short fsub_res_qnan - tbl_fsub_op # QNAN - INF
+ short fsub_res_qnan - tbl_fsub_op # QNAN - QNAN
+ short fsub_res_qnan - tbl_fsub_op # QNAN - DENORM
+ short fsub_res_snan - tbl_fsub_op # QNAN - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_norm - tbl_fsub_op # DENORM - NORM
+ short fsub_zero_src - tbl_fsub_op # DENORM - ZERO
+ short fsub_inf_src - tbl_fsub_op # DENORM - INF
+ short fsub_res_qnan - tbl_fsub_op # NORM - QNAN
+ short fsub_norm - tbl_fsub_op # DENORM - DENORM
+ short fsub_res_snan - tbl_fsub_op # NORM - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+ short fsub_res_snan - tbl_fsub_op # SNAN - NORM
+ short fsub_res_snan - tbl_fsub_op # SNAN - ZERO
+ short fsub_res_snan - tbl_fsub_op # SNAN - INF
+ short fsub_res_snan - tbl_fsub_op # SNAN - QNAN
+ short fsub_res_snan - tbl_fsub_op # SNAN - DENORM
+ short fsub_res_snan - tbl_fsub_op # SNAN - SNAN
+ short tbl_fsub_op - tbl_fsub_op #
+ short tbl_fsub_op - tbl_fsub_op #
+
+fsub_res_qnan:
+ bra.l res_qnan
+fsub_res_snan:
+ bra.l res_snan
+
+#
+# both operands are ZEROes
+#
+fsub_zero_2:
+ mov.b SRC_EX(%a0),%d0
+ mov.b DST_EX(%a1),%d1
+ eor.b %d1,%d0
+ bpl.b fsub_zero_2_chk_rm
+
+# the signs are opposite, so, return a ZERO w/ the sign of the dst ZERO
+ tst.b %d0 # is dst negative?
+ bmi.b fsub_zero_2_rm # yes
+ fmov.s &0x00000000,%fp0 # no; return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+#
+# the ZEROes have the same signs:
+# - therefore, we return +ZERO if the rounding mode is RN,RZ, or RP
+# - -ZERO is returned in the case of RM.
+#
+fsub_zero_2_chk_rm:
+ mov.b 3+L_SCR3(%a6),%d1
+ andi.b &0x30,%d1 # extract rnd mode
+ cmpi.b %d1,&rm_mode*0x10 # is rnd mode = RM?
+ beq.b fsub_zero_2_rm # yes
+ fmov.s &0x00000000,%fp0 # no; return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set Z
+ rts
+
+fsub_zero_2_rm:
+ fmov.s &0x80000000,%fp0 # return -ZERO
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set Z/NEG
+ rts
+
+#
+# one operand is a ZERO and the other is a DENORM or a NORM.
+# scale the DENORM or NORM and jump to the regular fsub routine.
+#
+fsub_zero_dst:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+ bsr.l scale_to_zero_src # scale the operand
+ clr.w FP_SCR1_EX(%a6)
+ clr.l FP_SCR1_HI(%a6)
+ clr.l FP_SCR1_LO(%a6)
+ bra.w fsub_zero_entry # go execute fsub
+
+fsub_zero_src:
+ mov.w DST_EX(%a1),FP_SCR1_EX(%a6)
+ mov.l DST_HI(%a1),FP_SCR1_HI(%a6)
+ mov.l DST_LO(%a1),FP_SCR1_LO(%a6)
+ bsr.l scale_to_zero_dst # scale the operand
+ clr.w FP_SCR0_EX(%a6)
+ clr.l FP_SCR0_HI(%a6)
+ clr.l FP_SCR0_LO(%a6)
+ bra.w fsub_zero_entry # go execute fsub
+
+#
+# both operands are INFs. an OPERR will result if the INFs have the
+# same signs. else,
+#
+fsub_inf_2:
+ mov.b SRC_EX(%a0),%d0 # exclusive or the signs
+ mov.b DST_EX(%a1),%d1
+ eor.b %d1,%d0
+ bpl.l res_operr # weed out (-INF)+(+INF)
+
+# ok, so it's not an OPERR. but we do have to remember to return
+# the src INF since that's where the 881/882 gets the j-bit.
+
+fsub_inf_src:
+ fmovm.x SRC(%a0),&0x80 # return src INF
+ fneg.x %fp0 # invert sign
+ fbge.w fsub_inf_done # sign is now positive
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+fsub_inf_dst:
+ fmovm.x DST(%a1),&0x80 # return dst INF
+ tst.b DST_EX(%a1) # is INF negative?
+ bpl.b fsub_inf_done # no
+ mov.b &neg_bmask+inf_bmask,FPSR_CC(%a6) # set INF/NEG
+ rts
+
+fsub_inf_done:
+ mov.b &inf_bmask,FPSR_CC(%a6) # set INF
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fsqrt(): emulates the fsqrt instruction #
+# fssqrt(): emulates the fssqrt instruction #
+# fdsqrt(): emulates the fdsqrt instruction #
+# #
+# XREF **************************************************************** #
+# scale_sqrt() - scale the source operand #
+# unf_res() - return default underflow result #
+# ovf_res() - return default overflow result #
+# res_qnan_1op() - return QNAN result #
+# res_snan_1op() - return SNAN result #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to extended precision source operand #
+# d0 rnd prec,mode #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = result #
+# fp1 = EXOP (if exception occurred) #
+# #
+# ALGORITHM *********************************************************** #
+# Handle NANs, infinities, and zeroes as special cases. Divide #
+# norms/denorms into ext/sgl/dbl precision. #
+# For norms/denorms, scale the exponents such that a sqrt #
+# instruction won't cause an exception. Use the regular fsqrt to #
+# compute a result. Check if the regular operands would have taken #
+# an exception. If so, return the default overflow/underflow result #
+# and return the EXOP if exceptions are enabled. Else, scale the #
+# result operand to the proper exponent. #
+# #
+#########################################################################
+
+ global fssqrt
+fssqrt:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &s_mode*0x10,%d0 # insert sgl precision
+ bra.b fsqrt
+
+ global fdsqrt
+fdsqrt:
+ andi.b &0x30,%d0 # clear rnd prec
+ ori.b &d_mode*0x10,%d0 # insert dbl precision
+
+ global fsqrt
+fsqrt:
+ mov.l %d0,L_SCR3(%a6) # store rnd info
+ clr.w %d1
+ mov.b STAG(%a6),%d1
+ bne.w fsqrt_not_norm # optimize on non-norm input
+
+#
+# SQUARE ROOT: norms and denorms ONLY!
+#
+fsqrt_norm:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.l res_operr # yes
+
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fsqrt_not_ext # no; go handle sgl or dbl
+
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsqrt.x (%a0),%fp0 # execute square root
+
+ fmov.l %fpsr,%d1
+ or.l %d1,USER_FPSR(%a6) # set N,INEX
+
+ rts
+
+fsqrt_denorm:
+ tst.b SRC_EX(%a0) # is operand negative?
+ bmi.l res_operr # yes
+
+ andi.b &0xc0,%d0 # is precision extended?
+ bne.b fsqrt_not_ext # no; go handle sgl or dbl
+
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_sqrt # calculate scale factor
+
+ bra.w fsqrt_sd_normal
+
+#
+# operand is either single or double
+#
+fsqrt_not_ext:
+ cmpi.b %d0,&s_mode*0x10 # separate sgl/dbl prec
+ bne.w fsqrt_dbl
+
+#
+# operand is to be rounded to single precision
+#
+fsqrt_sgl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_sqrt # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3f81 # will move in underflow?
+ beq.w fsqrt_sd_may_unfl
+ bgt.w fsqrt_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x407f # will move in overflow?
+ beq.w fsqrt_sd_may_ovfl # maybe; go check
+ blt.w fsqrt_sd_ovfl # yes; go handle overflow
+
+#
+# operand will NOT overflow or underflow when moved in to the fp reg file
+#
+fsqrt_sd_normal:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l %fpsr,%d1 # save FPSR
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsqrt_sd_normal_exit:
+ mov.l %d2,-(%sp) # save d2
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+ mov.w FP_SCR0_EX(%a6),%d1 # load sgn,exp
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ sub.l %d0,%d1 # add scale factor
+ andi.w &0x8000,%d2 # keep old sign
+ or.w %d1,%d2 # concat old sign,new exp
+ mov.w %d2,FP_SCR0_EX(%a6) # insert new exponent
+ mov.l (%sp)+,%d2 # restore d2
+ fmovm.x FP_SCR0(%a6),&0x80 # return result in fp0
+ rts
+
+#
+# operand is to be rounded to double precision
+#
+fsqrt_dbl:
+ mov.w SRC_EX(%a0),FP_SCR0_EX(%a6)
+ mov.l SRC_HI(%a0),FP_SCR0_HI(%a6)
+ mov.l SRC_LO(%a0),FP_SCR0_LO(%a6)
+
+ bsr.l scale_sqrt # calculate scale factor
+
+ cmpi.l %d0,&0x3fff-0x3c01 # will move in underflow?
+ beq.w fsqrt_sd_may_unfl
+ bgt.b fsqrt_sd_unfl # yes; go handle underflow
+ cmpi.l %d0,&0x3fff-0x43ff # will move in overflow?
+ beq.w fsqrt_sd_may_ovfl # maybe; go check
+ blt.w fsqrt_sd_ovfl # yes; go handle overflow
+ bra.w fsqrt_sd_normal # no; ho handle normalized op
+
+# we're on the line here and the distinguising characteristic is whether
+# the exponent is 3fff or 3ffe. if it's 3ffe, then it's a safe number
+# elsewise fall through to underflow.
+fsqrt_sd_may_unfl:
+ btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
+ bne.w fsqrt_sd_normal # yes, so no underflow
+
+#
+# operand WILL underflow when moved in to the fp register file
+#
+fsqrt_sd_unfl:
+ bset &unfl_bit,FPSR_EXCEPT(%a6) # set unfl exc bit
+
+ fmov.l &rz_mode*0x10,%fpcr # set FPCR
+ fmov.l &0x0,%fpsr # clear FPSR
+
+ fsqrt.x FP_SCR0(%a6),%fp0 # execute square root
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+# if underflow or inexact is enabled, go calculate EXOP first.
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x0b,%d1 # is UNFL or INEX enabled?
+ bne.b fsqrt_sd_unfl_ena # yes
+
+fsqrt_sd_unfl_dis:
+ fmovm.x &0x80,FP_SCR0(%a6) # store out result
+
+ lea FP_SCR0(%a6),%a0 # pass: result addr
+ mov.l L_SCR3(%a6),%d1 # pass: rnd prec,mode
+ bsr.l unf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set possible 'Z' ccode
+ fmovm.x FP_SCR0(%a6),&0x80 # return default result in fp0
+ rts
+
+#
+# operand will underflow AND underflow is enabled.
+# therefore, we must return the result rounded to extended precision.
+#
+fsqrt_sd_unfl_ena:
+ mov.l FP_SCR0_HI(%a6),FP_SCR1_HI(%a6)
+ mov.l FP_SCR0_LO(%a6),FP_SCR1_LO(%a6)
+ mov.w FP_SCR0_EX(%a6),%d1 # load current exponent
+
+ mov.l %d2,-(%sp) # save d2
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # subtract scale factor
+ addi.l &0x6000,%d1 # add new bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat new sign,new exp
+ mov.w %d1,FP_SCR1_EX(%a6) # insert new exp
+ fmovm.x FP_SCR1(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fsqrt_sd_unfl_dis
+
+#
+# operand WILL overflow.
+#
+fsqrt_sd_ovfl:
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fsqrt.x FP_SCR0(%a6),%fp0 # perform square root
+
+ fmov.l &0x0,%fpcr # clear FPCR
+ fmov.l %fpsr,%d1 # save FPSR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+fsqrt_sd_ovfl_tst:
+ or.l &ovfl_inx_mask,USER_FPSR(%a6) # set ovfl/aovfl/ainex
+
+ mov.b FPCR_ENABLE(%a6),%d1
+ andi.b &0x13,%d1 # is OVFL or INEX enabled?
+ bne.b fsqrt_sd_ovfl_ena # yes
+
+#
+# OVFL is not enabled; therefore, we must create the default result by
+# calling ovf_res().
+#
+fsqrt_sd_ovfl_dis:
+ btst &neg_bit,FPSR_CC(%a6) # is result negative?
+ sne %d1 # set sign param accordingly
+ mov.l L_SCR3(%a6),%d0 # pass: prec,mode
+ bsr.l ovf_res # calculate default result
+ or.b %d0,FPSR_CC(%a6) # set INF,N if applicable
+ fmovm.x (%a0),&0x80 # return default result in fp0
+ rts
+
+#
+# OVFL is enabled.
+# the INEX2 bit has already been updated by the round to the correct precision.
+# now, round to extended(and don't alter the FPSR).
+#
+fsqrt_sd_ovfl_ena:
+ mov.l %d2,-(%sp) # save d2
+ mov.w FP_SCR0_EX(%a6),%d1 # fetch {sgn,exp}
+ mov.l %d1,%d2 # make a copy
+ andi.l &0x7fff,%d1 # strip sign
+ andi.w &0x8000,%d2 # keep old sign
+ sub.l %d0,%d1 # add scale factor
+ subi.l &0x6000,%d1 # subtract bias
+ andi.w &0x7fff,%d1
+ or.w %d2,%d1 # concat sign,exp
+ mov.w %d1,FP_SCR0_EX(%a6) # insert new exponent
+ fmovm.x FP_SCR0(%a6),&0x40 # return EXOP in fp1
+ mov.l (%sp)+,%d2 # restore d2
+ bra.b fsqrt_sd_ovfl_dis
+
+#
+# the move in MAY underflow. so...
+#
+fsqrt_sd_may_ovfl:
+ btst &0x0,1+FP_SCR0_EX(%a6) # is exponent 0x3fff?
+ bne.w fsqrt_sd_ovfl # yes, so overflow
+
+ fmov.l &0x0,%fpsr # clear FPSR
+ fmov.l L_SCR3(%a6),%fpcr # set FPCR
+
+ fsqrt.x FP_SCR0(%a6),%fp0 # perform absolute
+
+ fmov.l %fpsr,%d1 # save status
+ fmov.l &0x0,%fpcr # clear FPCR
+
+ or.l %d1,USER_FPSR(%a6) # save INEX2,N
+
+ fmov.x %fp0,%fp1 # make a copy of result
+ fcmp.b %fp1,&0x1 # is |result| >= 1.b?
+ fbge.w fsqrt_sd_ovfl_tst # yes; overflow has occurred
+
+# no, it didn't overflow; we have correct result
+ bra.w fsqrt_sd_normal_exit
+
+##########################################################################
+
+#
+# input is not normalized; what is it?
+#
+fsqrt_not_norm:
+ cmpi.b %d1,&DENORM # weed out DENORM
+ beq.w fsqrt_denorm
+ cmpi.b %d1,&ZERO # weed out ZERO
+ beq.b fsqrt_zero
+ cmpi.b %d1,&INF # weed out INF
+ beq.b fsqrt_inf
+ cmpi.b %d1,&SNAN # weed out SNAN
+ beq.l res_snan_1op
+ bra.l res_qnan_1op
+
+#
+# fsqrt(+0) = +0
+# fsqrt(-0) = -0
+# fsqrt(+INF) = +INF
+# fsqrt(-INF) = OPERR
+#
+fsqrt_zero:
+ tst.b SRC_EX(%a0) # is ZERO positive or negative?
+ bmi.b fsqrt_zero_m # negative
+fsqrt_zero_p:
+ fmov.s &0x00000000,%fp0 # return +ZERO
+ mov.b &z_bmask,FPSR_CC(%a6) # set 'Z' ccode bit
+ rts
+fsqrt_zero_m:
+ fmov.s &0x80000000,%fp0 # return -ZERO
+ mov.b &z_bmask+neg_bmask,FPSR_CC(%a6) # set 'Z','N' ccode bits
+ rts
+
+fsqrt_inf:
+ tst.b SRC_EX(%a0) # is INF positive or negative?
+ bmi.l res_operr # negative
+fsqrt_inf_p:
+ fmovm.x SRC(%a0),&0x80 # return +INF in fp0
+ mov.b &inf_bmask,FPSR_CC(%a6) # set 'I' ccode bit
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# fetch_dreg(): fetch register according to index in d1 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d1 = index of register to fetch from #
+# #
+# OUTPUT ************************************************************** #
+# d0 = value of register fetched #
+# #
+# ALGORITHM *********************************************************** #
+# According to the index value in d1 which can range from zero #
+# to fifteen, load the corresponding register file value (where #
+# address register indexes start at 8). D0/D1/A0/A1/A6/A7 are on the #
+# stack. The rest should still be in their original places. #
+# #
+#########################################################################
+
+# this routine leaves d1 intact for subsequent store_dreg calls.
+ global fetch_dreg
+fetch_dreg:
+ mov.w (tbl_fdreg.b,%pc,%d1.w*2),%d0
+ jmp (tbl_fdreg.b,%pc,%d0.w*1)
+
+tbl_fdreg:
+ short fdreg0 - tbl_fdreg
+ short fdreg1 - tbl_fdreg
+ short fdreg2 - tbl_fdreg
+ short fdreg3 - tbl_fdreg
+ short fdreg4 - tbl_fdreg
+ short fdreg5 - tbl_fdreg
+ short fdreg6 - tbl_fdreg
+ short fdreg7 - tbl_fdreg
+ short fdreg8 - tbl_fdreg
+ short fdreg9 - tbl_fdreg
+ short fdrega - tbl_fdreg
+ short fdregb - tbl_fdreg
+ short fdregc - tbl_fdreg
+ short fdregd - tbl_fdreg
+ short fdrege - tbl_fdreg
+ short fdregf - tbl_fdreg
+
+fdreg0:
+ mov.l EXC_DREGS+0x0(%a6),%d0
+ rts
+fdreg1:
+ mov.l EXC_DREGS+0x4(%a6),%d0
+ rts
+fdreg2:
+ mov.l %d2,%d0
+ rts
+fdreg3:
+ mov.l %d3,%d0
+ rts
+fdreg4:
+ mov.l %d4,%d0
+ rts
+fdreg5:
+ mov.l %d5,%d0
+ rts
+fdreg6:
+ mov.l %d6,%d0
+ rts
+fdreg7:
+ mov.l %d7,%d0
+ rts
+fdreg8:
+ mov.l EXC_DREGS+0x8(%a6),%d0
+ rts
+fdreg9:
+ mov.l EXC_DREGS+0xc(%a6),%d0
+ rts
+fdrega:
+ mov.l %a2,%d0
+ rts
+fdregb:
+ mov.l %a3,%d0
+ rts
+fdregc:
+ mov.l %a4,%d0
+ rts
+fdregd:
+ mov.l %a5,%d0
+ rts
+fdrege:
+ mov.l (%a6),%d0
+ rts
+fdregf:
+ mov.l EXC_A7(%a6),%d0
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# store_dreg_l(): store longword to data register specified by d1 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = longowrd value to store #
+# d1 = index of register to fetch from #
+# #
+# OUTPUT ************************************************************** #
+# (data register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# According to the index value in d1, store the longword value #
+# in d0 to the corresponding data register. D0/D1 are on the stack #
+# while the rest are in their initial places. #
+# #
+#########################################################################
+
+ global store_dreg_l
+store_dreg_l:
+ mov.w (tbl_sdregl.b,%pc,%d1.w*2),%d1
+ jmp (tbl_sdregl.b,%pc,%d1.w*1)
+
+tbl_sdregl:
+ short sdregl0 - tbl_sdregl
+ short sdregl1 - tbl_sdregl
+ short sdregl2 - tbl_sdregl
+ short sdregl3 - tbl_sdregl
+ short sdregl4 - tbl_sdregl
+ short sdregl5 - tbl_sdregl
+ short sdregl6 - tbl_sdregl
+ short sdregl7 - tbl_sdregl
+
+sdregl0:
+ mov.l %d0,EXC_DREGS+0x0(%a6)
+ rts
+sdregl1:
+ mov.l %d0,EXC_DREGS+0x4(%a6)
+ rts
+sdregl2:
+ mov.l %d0,%d2
+ rts
+sdregl3:
+ mov.l %d0,%d3
+ rts
+sdregl4:
+ mov.l %d0,%d4
+ rts
+sdregl5:
+ mov.l %d0,%d5
+ rts
+sdregl6:
+ mov.l %d0,%d6
+ rts
+sdregl7:
+ mov.l %d0,%d7
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# store_dreg_w(): store word to data register specified by d1 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = word value to store #
+# d1 = index of register to fetch from #
+# #
+# OUTPUT ************************************************************** #
+# (data register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# According to the index value in d1, store the word value #
+# in d0 to the corresponding data register. D0/D1 are on the stack #
+# while the rest are in their initial places. #
+# #
+#########################################################################
+
+ global store_dreg_w
+store_dreg_w:
+ mov.w (tbl_sdregw.b,%pc,%d1.w*2),%d1
+ jmp (tbl_sdregw.b,%pc,%d1.w*1)
+
+tbl_sdregw:
+ short sdregw0 - tbl_sdregw
+ short sdregw1 - tbl_sdregw
+ short sdregw2 - tbl_sdregw
+ short sdregw3 - tbl_sdregw
+ short sdregw4 - tbl_sdregw
+ short sdregw5 - tbl_sdregw
+ short sdregw6 - tbl_sdregw
+ short sdregw7 - tbl_sdregw
+
+sdregw0:
+ mov.w %d0,2+EXC_DREGS+0x0(%a6)
+ rts
+sdregw1:
+ mov.w %d0,2+EXC_DREGS+0x4(%a6)
+ rts
+sdregw2:
+ mov.w %d0,%d2
+ rts
+sdregw3:
+ mov.w %d0,%d3
+ rts
+sdregw4:
+ mov.w %d0,%d4
+ rts
+sdregw5:
+ mov.w %d0,%d5
+ rts
+sdregw6:
+ mov.w %d0,%d6
+ rts
+sdregw7:
+ mov.w %d0,%d7
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# store_dreg_b(): store byte to data register specified by d1 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = byte value to store #
+# d1 = index of register to fetch from #
+# #
+# OUTPUT ************************************************************** #
+# (data register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# According to the index value in d1, store the byte value #
+# in d0 to the corresponding data register. D0/D1 are on the stack #
+# while the rest are in their initial places. #
+# #
+#########################################################################
+
+ global store_dreg_b
+store_dreg_b:
+ mov.w (tbl_sdregb.b,%pc,%d1.w*2),%d1
+ jmp (tbl_sdregb.b,%pc,%d1.w*1)
+
+tbl_sdregb:
+ short sdregb0 - tbl_sdregb
+ short sdregb1 - tbl_sdregb
+ short sdregb2 - tbl_sdregb
+ short sdregb3 - tbl_sdregb
+ short sdregb4 - tbl_sdregb
+ short sdregb5 - tbl_sdregb
+ short sdregb6 - tbl_sdregb
+ short sdregb7 - tbl_sdregb
+
+sdregb0:
+ mov.b %d0,3+EXC_DREGS+0x0(%a6)
+ rts
+sdregb1:
+ mov.b %d0,3+EXC_DREGS+0x4(%a6)
+ rts
+sdregb2:
+ mov.b %d0,%d2
+ rts
+sdregb3:
+ mov.b %d0,%d3
+ rts
+sdregb4:
+ mov.b %d0,%d4
+ rts
+sdregb5:
+ mov.b %d0,%d5
+ rts
+sdregb6:
+ mov.b %d0,%d6
+ rts
+sdregb7:
+ mov.b %d0,%d7
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# inc_areg(): increment an address register by the value in d0 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = amount to increment by #
+# d1 = index of address register to increment #
+# #
+# OUTPUT ************************************************************** #
+# (address register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# Typically used for an instruction w/ a post-increment <ea>, #
+# this routine adds the increment value in d0 to the address register #
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
+# in their original places. #
+# For a7, if the increment amount is one, then we have to #
+# increment by two. For any a7 update, set the mia7_flag so that if #
+# an access error exception occurs later in emulation, this address #
+# register update can be undone. #
+# #
+#########################################################################
+
+ global inc_areg
+inc_areg:
+ mov.w (tbl_iareg.b,%pc,%d1.w*2),%d1
+ jmp (tbl_iareg.b,%pc,%d1.w*1)
+
+tbl_iareg:
+ short iareg0 - tbl_iareg
+ short iareg1 - tbl_iareg
+ short iareg2 - tbl_iareg
+ short iareg3 - tbl_iareg
+ short iareg4 - tbl_iareg
+ short iareg5 - tbl_iareg
+ short iareg6 - tbl_iareg
+ short iareg7 - tbl_iareg
+
+iareg0: add.l %d0,EXC_DREGS+0x8(%a6)
+ rts
+iareg1: add.l %d0,EXC_DREGS+0xc(%a6)
+ rts
+iareg2: add.l %d0,%a2
+ rts
+iareg3: add.l %d0,%a3
+ rts
+iareg4: add.l %d0,%a4
+ rts
+iareg5: add.l %d0,%a5
+ rts
+iareg6: add.l %d0,(%a6)
+ rts
+iareg7: mov.b &mia7_flg,SPCOND_FLG(%a6)
+ cmpi.b %d0,&0x1
+ beq.b iareg7b
+ add.l %d0,EXC_A7(%a6)
+ rts
+iareg7b:
+ addq.l &0x2,EXC_A7(%a6)
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# dec_areg(): decrement an address register by the value in d0 #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = amount to decrement by #
+# d1 = index of address register to decrement #
+# #
+# OUTPUT ************************************************************** #
+# (address register is updated) #
+# #
+# ALGORITHM *********************************************************** #
+# Typically used for an instruction w/ a pre-decrement <ea>, #
+# this routine adds the decrement value in d0 to the address register #
+# specified by d1. A0/A1/A6/A7 reside on the stack. The rest reside #
+# in their original places. #
+# For a7, if the decrement amount is one, then we have to #
+# decrement by two. For any a7 update, set the mda7_flag so that if #
+# an access error exception occurs later in emulation, this address #
+# register update can be undone. #
+# #
+#########################################################################
+
+ global dec_areg
+dec_areg:
+ mov.w (tbl_dareg.b,%pc,%d1.w*2),%d1
+ jmp (tbl_dareg.b,%pc,%d1.w*1)
+
+tbl_dareg:
+ short dareg0 - tbl_dareg
+ short dareg1 - tbl_dareg
+ short dareg2 - tbl_dareg
+ short dareg3 - tbl_dareg
+ short dareg4 - tbl_dareg
+ short dareg5 - tbl_dareg
+ short dareg6 - tbl_dareg
+ short dareg7 - tbl_dareg
+
+dareg0: sub.l %d0,EXC_DREGS+0x8(%a6)
+ rts
+dareg1: sub.l %d0,EXC_DREGS+0xc(%a6)
+ rts
+dareg2: sub.l %d0,%a2
+ rts
+dareg3: sub.l %d0,%a3
+ rts
+dareg4: sub.l %d0,%a4
+ rts
+dareg5: sub.l %d0,%a5
+ rts
+dareg6: sub.l %d0,(%a6)
+ rts
+dareg7: mov.b &mda7_flg,SPCOND_FLG(%a6)
+ cmpi.b %d0,&0x1
+ beq.b dareg7b
+ sub.l %d0,EXC_A7(%a6)
+ rts
+dareg7b:
+ subq.l &0x2,EXC_A7(%a6)
+ rts
+
+##############################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# load_fpn1(): load FP register value into FP_SRC(a6). #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = index of FP register to load #
+# #
+# OUTPUT ************************************************************** #
+# FP_SRC(a6) = value loaded from FP register file #
+# #
+# ALGORITHM *********************************************************** #
+# Using the index in d0, load FP_SRC(a6) with a number from the #
+# FP register file. #
+# #
+#########################################################################
+
+ global load_fpn1
+load_fpn1:
+ mov.w (tbl_load_fpn1.b,%pc,%d0.w*2), %d0
+ jmp (tbl_load_fpn1.b,%pc,%d0.w*1)
+
+tbl_load_fpn1:
+ short load_fpn1_0 - tbl_load_fpn1
+ short load_fpn1_1 - tbl_load_fpn1
+ short load_fpn1_2 - tbl_load_fpn1
+ short load_fpn1_3 - tbl_load_fpn1
+ short load_fpn1_4 - tbl_load_fpn1
+ short load_fpn1_5 - tbl_load_fpn1
+ short load_fpn1_6 - tbl_load_fpn1
+ short load_fpn1_7 - tbl_load_fpn1
+
+load_fpn1_0:
+ mov.l 0+EXC_FP0(%a6), 0+FP_SRC(%a6)
+ mov.l 4+EXC_FP0(%a6), 4+FP_SRC(%a6)
+ mov.l 8+EXC_FP0(%a6), 8+FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_1:
+ mov.l 0+EXC_FP1(%a6), 0+FP_SRC(%a6)
+ mov.l 4+EXC_FP1(%a6), 4+FP_SRC(%a6)
+ mov.l 8+EXC_FP1(%a6), 8+FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_2:
+ fmovm.x &0x20, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_3:
+ fmovm.x &0x10, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_4:
+ fmovm.x &0x08, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_5:
+ fmovm.x &0x04, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_6:
+ fmovm.x &0x02, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+load_fpn1_7:
+ fmovm.x &0x01, FP_SRC(%a6)
+ lea FP_SRC(%a6), %a0
+ rts
+
+#############################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# load_fpn2(): load FP register value into FP_DST(a6). #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# d0 = index of FP register to load #
+# #
+# OUTPUT ************************************************************** #
+# FP_DST(a6) = value loaded from FP register file #
+# #
+# ALGORITHM *********************************************************** #
+# Using the index in d0, load FP_DST(a6) with a number from the #
+# FP register file. #
+# #
+#########################################################################
+
+ global load_fpn2
+load_fpn2:
+ mov.w (tbl_load_fpn2.b,%pc,%d0.w*2), %d0
+ jmp (tbl_load_fpn2.b,%pc,%d0.w*1)
+
+tbl_load_fpn2:
+ short load_fpn2_0 - tbl_load_fpn2
+ short load_fpn2_1 - tbl_load_fpn2
+ short load_fpn2_2 - tbl_load_fpn2
+ short load_fpn2_3 - tbl_load_fpn2
+ short load_fpn2_4 - tbl_load_fpn2
+ short load_fpn2_5 - tbl_load_fpn2
+ short load_fpn2_6 - tbl_load_fpn2
+ short load_fpn2_7 - tbl_load_fpn2
+
+load_fpn2_0:
+ mov.l 0+EXC_FP0(%a6), 0+FP_DST(%a6)
+ mov.l 4+EXC_FP0(%a6), 4+FP_DST(%a6)
+ mov.l 8+EXC_FP0(%a6), 8+FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_1:
+ mov.l 0+EXC_FP1(%a6), 0+FP_DST(%a6)
+ mov.l 4+EXC_FP1(%a6), 4+FP_DST(%a6)
+ mov.l 8+EXC_FP1(%a6), 8+FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_2:
+ fmovm.x &0x20, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_3:
+ fmovm.x &0x10, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_4:
+ fmovm.x &0x08, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_5:
+ fmovm.x &0x04, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_6:
+ fmovm.x &0x02, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+load_fpn2_7:
+ fmovm.x &0x01, FP_DST(%a6)
+ lea FP_DST(%a6), %a0
+ rts
+
+#############################################################################
+
+#########################################################################
+# XDEF **************************************************************** #
+# store_fpreg(): store an fp value to the fpreg designated d0. #
+# #
+# XREF **************************************************************** #
+# None #
+# #
+# INPUT *************************************************************** #
+# fp0 = extended precision value to store #
+# d0 = index of floating-point register #
+# #
+# OUTPUT ************************************************************** #
+# None #
+# #
+# ALGORITHM *********************************************************** #
+# Store the value in fp0 to the FP register designated by the #
+# value in d0. The FP number can be DENORM or SNAN so we have to be #
+# careful that we don't take an exception here. #
+# #
+#########################################################################
+
+ global store_fpreg
+store_fpreg:
+ mov.w (tbl_store_fpreg.b,%pc,%d0.w*2), %d0
+ jmp (tbl_store_fpreg.b,%pc,%d0.w*1)
+
+tbl_store_fpreg:
+ short store_fpreg_0 - tbl_store_fpreg
+ short store_fpreg_1 - tbl_store_fpreg
+ short store_fpreg_2 - tbl_store_fpreg
+ short store_fpreg_3 - tbl_store_fpreg
+ short store_fpreg_4 - tbl_store_fpreg
+ short store_fpreg_5 - tbl_store_fpreg
+ short store_fpreg_6 - tbl_store_fpreg
+ short store_fpreg_7 - tbl_store_fpreg
+
+store_fpreg_0:
+ fmovm.x &0x80, EXC_FP0(%a6)
+ rts
+store_fpreg_1:
+ fmovm.x &0x80, EXC_FP1(%a6)
+ rts
+store_fpreg_2:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x20
+ rts
+store_fpreg_3:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x10
+ rts
+store_fpreg_4:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x08
+ rts
+store_fpreg_5:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x04
+ rts
+store_fpreg_6:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x02
+ rts
+store_fpreg_7:
+ fmovm.x &0x01, -(%sp)
+ fmovm.x (%sp)+, &0x01
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# get_packed(): fetch a packed operand from memory and then #
+# convert it to a floating-point binary number. #
+# #
+# XREF **************************************************************** #
+# _dcalc_ea() - calculate the correct <ea> #
+# _mem_read() - fetch the packed operand from memory #
+# facc_in_x() - the fetch failed so jump to special exit code #
+# decbin() - convert packed to binary extended precision #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# If no failure on _mem_read(): #
+# FP_SRC(a6) = packed operand now as a binary FP number #
+# #
+# ALGORITHM *********************************************************** #
+# Get the correct <ea> whihc is the value on the exception stack #
+# frame w/ maybe a correction factor if the <ea> is -(an) or (an)+. #
+# Then, fetch the operand from memory. If the fetch fails, exit #
+# through facc_in_x(). #
+# If the packed operand is a ZERO,NAN, or INF, convert it to #
+# its binary representation here. Else, call decbin() which will #
+# convert the packed value to an extended precision binary value. #
+# #
+#########################################################################
+
+# the stacked <ea> for packed is correct except for -(An).
+# the base reg must be updated for both -(An) and (An)+.
+ global get_packed
+get_packed:
+ mov.l &0xc,%d0 # packed is 12 bytes
+ bsr.l _dcalc_ea # fetch <ea>; correct An
+
+ lea FP_SRC(%a6),%a1 # pass: ptr to super dst
+ mov.l &0xc,%d0 # pass: 12 bytes
+ bsr.l _dmem_read # read packed operand
+
+ tst.l %d1 # did dfetch fail?
+ bne.l facc_in_x # yes
+
+# The packed operand is an INF or a NAN if the exponent field is all ones.
+ bfextu FP_SRC(%a6){&1:&15},%d0 # get exp
+ cmpi.w %d0,&0x7fff # INF or NAN?
+ bne.b gp_try_zero # no
+ rts # operand is an INF or NAN
+
+# The packed operand is a zero if the mantissa is all zero, else it's
+# a normal packed op.
+gp_try_zero:
+ mov.b 3+FP_SRC(%a6),%d0 # get byte 4
+ andi.b &0x0f,%d0 # clear all but last nybble
+ bne.b gp_not_spec # not a zero
+ tst.l FP_SRC_HI(%a6) # is lw 2 zero?
+ bne.b gp_not_spec # not a zero
+ tst.l FP_SRC_LO(%a6) # is lw 3 zero?
+ bne.b gp_not_spec # not a zero
+ rts # operand is a ZERO
+gp_not_spec:
+ lea FP_SRC(%a6),%a0 # pass: ptr to packed op
+ bsr.l decbin # convert to extended
+ fmovm.x &0x80,FP_SRC(%a6) # make this the srcop
+ rts
+
+#########################################################################
+# decbin(): Converts normalized packed bcd value pointed to by register #
+# a0 to extended-precision value in fp0. #
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to normalized packed bcd value #
+# #
+# OUTPUT ************************************************************** #
+# fp0 = exact fp representation of the packed bcd value. #
+# #
+# ALGORITHM *********************************************************** #
+# Expected is a normal bcd (i.e. non-exceptional; all inf, zero, #
+# and NaN operands are dispatched without entering this routine) #
+# value in 68881/882 format at location (a0). #
+# #
+# A1. Convert the bcd exponent to binary by successive adds and #
+# muls. Set the sign according to SE. Subtract 16 to compensate #
+# for the mantissa which is to be interpreted as 17 integer #
+# digits, rather than 1 integer and 16 fraction digits. #
+# Note: this operation can never overflow. #
+# #
+# A2. Convert the bcd mantissa to binary by successive #
+# adds and muls in FP0. Set the sign according to SM. #
+# The mantissa digits will be converted with the decimal point #
+# assumed following the least-significant digit. #
+# Note: this operation can never overflow. #
+# #
+# A3. Count the number of leading/trailing zeros in the #
+# bcd string. If SE is positive, count the leading zeros; #
+# if negative, count the trailing zeros. Set the adjusted #
+# exponent equal to the exponent from A1 and the zero count #
+# added if SM = 1 and subtracted if SM = 0. Scale the #
+# mantissa the equivalent of forcing in the bcd value: #
+# #
+# SM = 0 a non-zero digit in the integer position #
+# SM = 1 a non-zero digit in Mant0, lsd of the fraction #
+# #
+# this will insure that any value, regardless of its #
+# representation (ex. 0.1E2, 1E1, 10E0, 100E-1), is converted #
+# consistently. #
+# #
+# A4. Calculate the factor 10^exp in FP1 using a table of #
+# 10^(2^n) values. To reduce the error in forming factors #
+# greater than 10^27, a directed rounding scheme is used with #
+# tables rounded to RN, RM, and RP, according to the table #
+# in the comments of the pwrten section. #
+# #
+# A5. Form the final binary number by scaling the mantissa by #
+# the exponent factor. This is done by multiplying the #
+# mantissa in FP0 by the factor in FP1 if the adjusted #
+# exponent sign is positive, and dividing FP0 by FP1 if #
+# it is negative. #
+# #
+# Clean up and return. Check if the final mul or div was inexact. #
+# If so, set INEX1 in USER_FPSR. #
+# #
+#########################################################################
+
+#
+# PTENRN, PTENRM, and PTENRP are arrays of powers of 10 rounded
+# to nearest, minus, and plus, respectively. The tables include
+# 10**{1,2,4,8,16,32,64,128,256,512,1024,2048,4096}. No rounding
+# is required until the power is greater than 27, however, all
+# tables include the first 5 for ease of indexing.
+#
+RTABLE:
+ byte 0,0,0,0
+ byte 2,3,2,3
+ byte 2,3,3,2
+ byte 3,2,2,3
+
+ set FNIBS,7
+ set FSTRT,0
+
+ set ESTRT,4
+ set EDIGITS,2
+
+ global decbin
+decbin:
+ mov.l 0x0(%a0),FP_SCR0_EX(%a6) # make a copy of input
+ mov.l 0x4(%a0),FP_SCR0_HI(%a6) # so we don't alter it
+ mov.l 0x8(%a0),FP_SCR0_LO(%a6)
+
+ lea FP_SCR0(%a6),%a0
+
+ movm.l &0x3c00,-(%sp) # save d2-d5
+ fmovm.x &0x1,-(%sp) # save fp1
+#
+# Calculate exponent:
+# 1. Copy bcd value in memory for use as a working copy.
+# 2. Calculate absolute value of exponent in d1 by mul and add.
+# 3. Correct for exponent sign.
+# 4. Subtract 16 to compensate for interpreting the mant as all integer digits.
+# (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+# calc_e:
+# (*) d0: temp digit storage
+# (*) d1: accumulator for binary exponent
+# (*) d2: digit count
+# (*) d3: offset pointer
+# ( ) d4: first word of bcd
+# ( ) a0: pointer to working bcd value
+# ( ) a6: pointer to original bcd value
+# (*) FP_SCR1: working copy of original bcd value
+# (*) L_SCR1: copy of original exponent word
+#
+calc_e:
+ mov.l &EDIGITS,%d2 # # of nibbles (digits) in fraction part
+ mov.l &ESTRT,%d3 # counter to pick up digits
+ mov.l (%a0),%d4 # get first word of bcd
+ clr.l %d1 # zero d1 for accumulator
+e_gd:
+ mulu.l &0xa,%d1 # mul partial product by one digit place
+ bfextu %d4{%d3:&4},%d0 # get the digit and zero extend into d0
+ add.l %d0,%d1 # d1 = d1 + d0
+ addq.b &4,%d3 # advance d3 to the next digit
+ dbf.w %d2,e_gd # if we have used all 3 digits, exit loop
+ btst &30,%d4 # get SE
+ beq.b e_pos # don't negate if pos
+ neg.l %d1 # negate before subtracting
+e_pos:
+ sub.l &16,%d1 # sub to compensate for shift of mant
+ bge.b e_save # if still pos, do not neg
+ neg.l %d1 # now negative, make pos and set SE
+ or.l &0x40000000,%d4 # set SE in d4,
+ or.l &0x40000000,(%a0) # and in working bcd
+e_save:
+ mov.l %d1,-(%sp) # save exp on stack
+#
+#
+# Calculate mantissa:
+# 1. Calculate absolute value of mantissa in fp0 by mul and add.
+# 2. Correct for mantissa sign.
+# (i.e., all digits assumed left of the decimal point.)
+#
+# Register usage:
+#
+# calc_m:
+# (*) d0: temp digit storage
+# (*) d1: lword counter
+# (*) d2: digit count
+# (*) d3: offset pointer
+# ( ) d4: words 2 and 3 of bcd
+# ( ) a0: pointer to working bcd value
+# ( ) a6: pointer to original bcd value
+# (*) fp0: mantissa accumulator
+# ( ) FP_SCR1: working copy of original bcd value
+# ( ) L_SCR1: copy of original exponent word
+#
+calc_m:
+ mov.l &1,%d1 # word counter, init to 1
+ fmov.s &0x00000000,%fp0 # accumulator
+#
+#
+# Since the packed number has a long word between the first & second parts,
+# get the integer digit then skip down & get the rest of the
+# mantissa. We will unroll the loop once.
+#
+ bfextu (%a0){&28:&4},%d0 # integer part is ls digit in long word
+ fadd.b %d0,%fp0 # add digit to sum in fp0
+#
+#
+# Get the rest of the mantissa.
+#
+loadlw:
+ mov.l (%a0,%d1.L*4),%d4 # load mantissa lonqword into d4
+ mov.l &FSTRT,%d3 # counter to pick up digits
+ mov.l &FNIBS,%d2 # reset number of digits per a0 ptr
+md2b:
+ fmul.s &0x41200000,%fp0 # fp0 = fp0 * 10
+ bfextu %d4{%d3:&4},%d0 # get the digit and zero extend
+ fadd.b %d0,%fp0 # fp0 = fp0 + digit
+#
+#
+# If all the digits (8) in that long word have been converted (d2=0),
+# then inc d1 (=2) to point to the next long word and reset d3 to 0
+# to initialize the digit offset, and set d2 to 7 for the digit count;
+# else continue with this long word.
+#
+ addq.b &4,%d3 # advance d3 to the next digit
+ dbf.w %d2,md2b # check for last digit in this lw
+nextlw:
+ addq.l &1,%d1 # inc lw pointer in mantissa
+ cmp.l %d1,&2 # test for last lw
+ ble.b loadlw # if not, get last one
+#
+# Check the sign of the mant and make the value in fp0 the same sign.
+#
+m_sign:
+ btst &31,(%a0) # test sign of the mantissa
+ beq.b ap_st_z # if clear, go to append/strip zeros
+ fneg.x %fp0 # if set, negate fp0
+#
+# Append/strip zeros:
+#
+# For adjusted exponents which have an absolute value greater than 27*,
+# this routine calculates the amount needed to normalize the mantissa
+# for the adjusted exponent. That number is subtracted from the exp
+# if the exp was positive, and added if it was negative. The purpose
+# of this is to reduce the value of the exponent and the possibility
+# of error in calculation of pwrten.
+#
+# 1. Branch on the sign of the adjusted exponent.
+# 2p.(positive exp)
+# 2. Check M16 and the digits in lwords 2 and 3 in decending order.
+# 3. Add one for each zero encountered until a non-zero digit.
+# 4. Subtract the count from the exp.
+# 5. Check if the exp has crossed zero in #3 above; make the exp abs
+# and set SE.
+# 6. Multiply the mantissa by 10**count.
+# 2n.(negative exp)
+# 2. Check the digits in lwords 3 and 2 in decending order.
+# 3. Add one for each zero encountered until a non-zero digit.
+# 4. Add the count to the exp.
+# 5. Check if the exp has crossed zero in #3 above; clear SE.
+# 6. Divide the mantissa by 10**count.
+#
+# *Why 27? If the adjusted exponent is within -28 < expA < 28, than
+# any adjustment due to append/strip zeros will drive the resultane
+# exponent towards zero. Since all pwrten constants with a power
+# of 27 or less are exact, there is no need to use this routine to
+# attempt to lessen the resultant exponent.
+#
+# Register usage:
+#
+# ap_st_z:
+# (*) d0: temp digit storage
+# (*) d1: zero count
+# (*) d2: digit count
+# (*) d3: offset pointer
+# ( ) d4: first word of bcd
+# (*) d5: lword counter
+# ( ) a0: pointer to working bcd value
+# ( ) FP_SCR1: working copy of original bcd value
+# ( ) L_SCR1: copy of original exponent word
+#
+#
+# First check the absolute value of the exponent to see if this
+# routine is necessary. If so, then check the sign of the exponent
+# and do append (+) or strip (-) zeros accordingly.
+# This section handles a positive adjusted exponent.
+#
+ap_st_z:
+ mov.l (%sp),%d1 # load expA for range test
+ cmp.l %d1,&27 # test is with 27
+ ble.w pwrten # if abs(expA) <28, skip ap/st zeros
+ btst &30,(%a0) # check sign of exp
+ bne.b ap_st_n # if neg, go to neg side
+ clr.l %d1 # zero count reg
+ mov.l (%a0),%d4 # load lword 1 to d4
+ bfextu %d4{&28:&4},%d0 # get M16 in d0
+ bne.b ap_p_fx # if M16 is non-zero, go fix exp
+ addq.l &1,%d1 # inc zero count
+ mov.l &1,%d5 # init lword counter
+ mov.l (%a0,%d5.L*4),%d4 # get lword 2 to d4
+ bne.b ap_p_cl # if lw 2 is zero, skip it
+ addq.l &8,%d1 # and inc count by 8
+ addq.l &1,%d5 # inc lword counter
+ mov.l (%a0,%d5.L*4),%d4 # get lword 3 to d4
+ap_p_cl:
+ clr.l %d3 # init offset reg
+ mov.l &7,%d2 # init digit counter
+ap_p_gd:
+ bfextu %d4{%d3:&4},%d0 # get digit
+ bne.b ap_p_fx # if non-zero, go to fix exp
+ addq.l &4,%d3 # point to next digit
+ addq.l &1,%d1 # inc digit counter
+ dbf.w %d2,ap_p_gd # get next digit
+ap_p_fx:
+ mov.l %d1,%d0 # copy counter to d2
+ mov.l (%sp),%d1 # get adjusted exp from memory
+ sub.l %d0,%d1 # subtract count from exp
+ bge.b ap_p_fm # if still pos, go to pwrten
+ neg.l %d1 # now its neg; get abs
+ mov.l (%a0),%d4 # load lword 1 to d4
+ or.l &0x40000000,%d4 # and set SE in d4
+ or.l &0x40000000,(%a0) # and in memory
+#
+# Calculate the mantissa multiplier to compensate for the striping of
+# zeros from the mantissa.
+#
+ap_p_fm:
+ lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
+ clr.l %d3 # init table index
+ fmov.s &0x3f800000,%fp1 # init fp1 to 1
+ mov.l &3,%d2 # init d2 to count bits in counter
+ap_p_el:
+ asr.l &1,%d0 # shift lsb into carry
+ bcc.b ap_p_en # if 1, mul fp1 by pwrten factor
+ fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
+ap_p_en:
+ add.l &12,%d3 # inc d3 to next rtable entry
+ tst.l %d0 # check if d0 is zero
+ bne.b ap_p_el # if not, get next bit
+ fmul.x %fp1,%fp0 # mul mantissa by 10**(no_bits_shifted)
+ bra.b pwrten # go calc pwrten
+#
+# This section handles a negative adjusted exponent.
+#
+ap_st_n:
+ clr.l %d1 # clr counter
+ mov.l &2,%d5 # set up d5 to point to lword 3
+ mov.l (%a0,%d5.L*4),%d4 # get lword 3
+ bne.b ap_n_cl # if not zero, check digits
+ sub.l &1,%d5 # dec d5 to point to lword 2
+ addq.l &8,%d1 # inc counter by 8
+ mov.l (%a0,%d5.L*4),%d4 # get lword 2
+ap_n_cl:
+ mov.l &28,%d3 # point to last digit
+ mov.l &7,%d2 # init digit counter
+ap_n_gd:
+ bfextu %d4{%d3:&4},%d0 # get digit
+ bne.b ap_n_fx # if non-zero, go to exp fix
+ subq.l &4,%d3 # point to previous digit
+ addq.l &1,%d1 # inc digit counter
+ dbf.w %d2,ap_n_gd # get next digit
+ap_n_fx:
+ mov.l %d1,%d0 # copy counter to d0
+ mov.l (%sp),%d1 # get adjusted exp from memory
+ sub.l %d0,%d1 # subtract count from exp
+ bgt.b ap_n_fm # if still pos, go fix mantissa
+ neg.l %d1 # take abs of exp and clr SE
+ mov.l (%a0),%d4 # load lword 1 to d4
+ and.l &0xbfffffff,%d4 # and clr SE in d4
+ and.l &0xbfffffff,(%a0) # and in memory
+#
+# Calculate the mantissa multiplier to compensate for the appending of
+# zeros to the mantissa.
+#
+ap_n_fm:
+ lea.l PTENRN(%pc),%a1 # get address of power-of-ten table
+ clr.l %d3 # init table index
+ fmov.s &0x3f800000,%fp1 # init fp1 to 1
+ mov.l &3,%d2 # init d2 to count bits in counter
+ap_n_el:
+ asr.l &1,%d0 # shift lsb into carry
+ bcc.b ap_n_en # if 1, mul fp1 by pwrten factor
+ fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
+ap_n_en:
+ add.l &12,%d3 # inc d3 to next rtable entry
+ tst.l %d0 # check if d0 is zero
+ bne.b ap_n_el # if not, get next bit
+ fdiv.x %fp1,%fp0 # div mantissa by 10**(no_bits_shifted)
+#
+#
+# Calculate power-of-ten factor from adjusted and shifted exponent.
+#
+# Register usage:
+#
+# pwrten:
+# (*) d0: temp
+# ( ) d1: exponent
+# (*) d2: {FPCR[6:5],SM,SE} as index in RTABLE; temp
+# (*) d3: FPCR work copy
+# ( ) d4: first word of bcd
+# (*) a1: RTABLE pointer
+# calc_p:
+# (*) d0: temp
+# ( ) d1: exponent
+# (*) d3: PWRTxx table index
+# ( ) a0: pointer to working copy of bcd
+# (*) a1: PWRTxx pointer
+# (*) fp1: power-of-ten accumulator
+#
+# Pwrten calculates the exponent factor in the selected rounding mode
+# according to the following table:
+#
+# Sign of Mant Sign of Exp Rounding Mode PWRTEN Rounding Mode
+#
+# ANY ANY RN RN
+#
+# + + RP RP
+# - + RP RM
+# + - RP RM
+# - - RP RP
+#
+# + + RM RM
+# - + RM RP
+# + - RM RP
+# - - RM RM
+#
+# + + RZ RM
+# - + RZ RM
+# + - RZ RP
+# - - RZ RP
+#
+#
+pwrten:
+ mov.l USER_FPCR(%a6),%d3 # get user's FPCR
+ bfextu %d3{&26:&2},%d2 # isolate rounding mode bits
+ mov.l (%a0),%d4 # reload 1st bcd word to d4
+ asl.l &2,%d2 # format d2 to be
+ bfextu %d4{&0:&2},%d0 # {FPCR[6],FPCR[5],SM,SE}
+ add.l %d0,%d2 # in d2 as index into RTABLE
+ lea.l RTABLE(%pc),%a1 # load rtable base
+ mov.b (%a1,%d2),%d0 # load new rounding bits from table
+ clr.l %d3 # clear d3 to force no exc and extended
+ bfins %d0,%d3{&26:&2} # stuff new rounding bits in FPCR
+ fmov.l %d3,%fpcr # write new FPCR
+ asr.l &1,%d0 # write correct PTENxx table
+ bcc.b not_rp # to a1
+ lea.l PTENRP(%pc),%a1 # it is RP
+ bra.b calc_p # go to init section
+not_rp:
+ asr.l &1,%d0 # keep checking
+ bcc.b not_rm
+ lea.l PTENRM(%pc),%a1 # it is RM
+ bra.b calc_p # go to init section
+not_rm:
+ lea.l PTENRN(%pc),%a1 # it is RN
+calc_p:
+ mov.l %d1,%d0 # copy exp to d0;use d0
+ bpl.b no_neg # if exp is negative,
+ neg.l %d0 # invert it
+ or.l &0x40000000,(%a0) # and set SE bit
+no_neg:
+ clr.l %d3 # table index
+ fmov.s &0x3f800000,%fp1 # init fp1 to 1
+e_loop:
+ asr.l &1,%d0 # shift next bit into carry
+ bcc.b e_next # if zero, skip the mul
+ fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
+e_next:
+ add.l &12,%d3 # inc d3 to next rtable entry
+ tst.l %d0 # check if d0 is zero
+ bne.b e_loop # not zero, continue shifting
+#
+#
+# Check the sign of the adjusted exp and make the value in fp0 the
+# same sign. If the exp was pos then multiply fp1*fp0;
+# else divide fp0/fp1.
+#
+# Register Usage:
+# norm:
+# ( ) a0: pointer to working bcd value
+# (*) fp0: mantissa accumulator
+# ( ) fp1: scaling factor - 10**(abs(exp))
+#
+pnorm:
+ btst &30,(%a0) # test the sign of the exponent
+ beq.b mul # if clear, go to multiply
+div:
+ fdiv.x %fp1,%fp0 # exp is negative, so divide mant by exp
+ bra.b end_dec
+mul:
+ fmul.x %fp1,%fp0 # exp is positive, so multiply by exp
+#
+#
+# Clean up and return with result in fp0.
+#
+# If the final mul/div in decbin incurred an inex exception,
+# it will be inex2, but will be reported as inex1 by get_op.
+#
+end_dec:
+ fmov.l %fpsr,%d0 # get status register
+ bclr &inex2_bit+8,%d0 # test for inex2 and clear it
+ beq.b no_exc # skip this if no exc
+ ori.w &inx1a_mask,2+USER_FPSR(%a6) # set INEX1/AINEX
+no_exc:
+ add.l &0x4,%sp # clear 1 lw param
+ fmovm.x (%sp)+,&0x40 # restore fp1
+ movm.l (%sp)+,&0x3c # restore d2-d5
+ fmov.l &0x0,%fpcr
+ fmov.l &0x0,%fpsr
+ rts
+
+#########################################################################
+# bindec(): Converts an input in extended precision format to bcd format#
+# #
+# INPUT *************************************************************** #
+# a0 = pointer to the input extended precision value in memory. #
+# the input may be either normalized, unnormalized, or #
+# denormalized. #
+# d0 = contains the k-factor sign-extended to 32-bits. #
+# #
+# OUTPUT ************************************************************** #
+# FP_SCR0(a6) = bcd format result on the stack. #
+# #
+# ALGORITHM *********************************************************** #
+# #
+# A1. Set RM and size ext; Set SIGMA = sign of input. #
+# The k-factor is saved for use in d7. Clear the #
+# BINDEC_FLG for separating normalized/denormalized #
+# input. If input is unnormalized or denormalized, #
+# normalize it. #
+# #
+# A2. Set X = abs(input). #
+# #
+# A3. Compute ILOG. #
+# ILOG is the log base 10 of the input value. It is #
+# approximated by adding e + 0.f when the original #
+# value is viewed as 2^^e * 1.f in extended precision. #
+# This value is stored in d6. #
+# #
+# A4. Clr INEX bit. #
+# The operation in A3 above may have set INEX2. #
+# #
+# A5. Set ICTR = 0; #
+# ICTR is a flag used in A13. It must be set before the #
+# loop entry A6. #
+# #
+# A6. Calculate LEN. #
+# LEN is the number of digits to be displayed. The #
+# k-factor can dictate either the total number of digits, #
+# if it is a positive number, or the number of digits #
+# after the decimal point which are to be included as #
+# significant. See the 68882 manual for examples. #
+# If LEN is computed to be greater than 17, set OPERR in #
+# USER_FPSR. LEN is stored in d4. #
+# #
+# A7. Calculate SCALE. #
+# SCALE is equal to 10^ISCALE, where ISCALE is the number #
+# of decimal places needed to insure LEN integer digits #
+# in the output before conversion to bcd. LAMBDA is the #
+# sign of ISCALE, used in A9. Fp1 contains #
+# 10^^(abs(ISCALE)) using a rounding mode which is a #
+# function of the original rounding mode and the signs #
+# of ISCALE and X. A table is given in the code. #
+# #
+# A8. Clr INEX; Force RZ. #
+# The operation in A3 above may have set INEX2. #
+# RZ mode is forced for the scaling operation to insure #
+# only one rounding error. The grs bits are collected in #
+# the INEX flag for use in A10. #
+# #
+# A9. Scale X -> Y. #
+# The mantissa is scaled to the desired number of #
+# significant digits. The excess digits are collected #
+# in INEX2. #
+# #
+# A10. Or in INEX. #
+# If INEX is set, round error occurred. This is #
+# compensated for by 'or-ing' in the INEX2 flag to #
+# the lsb of Y. #
+# #
+# A11. Restore original FPCR; set size ext. #
+# Perform FINT operation in the user's rounding mode. #
+# Keep the size to extended. #
+# #
+# A12. Calculate YINT = FINT(Y) according to user's rounding #
+# mode. The FPSP routine sintd0 is used. The output #
+# is in fp0. #
+# #
+# A13. Check for LEN digits. #
+# If the int operation results in more than LEN digits, #
+# or less than LEN -1 digits, adjust ILOG and repeat from #
+# A6. This test occurs only on the first pass. If the #
+# result is exactly 10^LEN, decrement ILOG and divide #
+# the mantissa by 10. #
+# #
+# A14. Convert the mantissa to bcd. #
+# The binstr routine is used to convert the LEN digit #
+# mantissa to bcd in memory. The input to binstr is #
+# to be a fraction; i.e. (mantissa)/10^LEN and adjusted #
+# such that the decimal point is to the left of bit 63. #
+# The bcd digits are stored in the correct position in #
+# the final string area in memory. #
+# #
+# A15. Convert the exponent to bcd. #
+# As in A14 above, the exp is converted to bcd and the #
+# digits are stored in the final string. #
+# Test the length of the final exponent string. If the #
+# length is 4, set operr. #
+# #
+# A16. Write sign bits to final string. #
+# #
+#########################################################################
+
+set BINDEC_FLG, EXC_TEMP # DENORM flag
+
+# Constants in extended precision
+PLOG2:
+ long 0x3FFD0000,0x9A209A84,0xFBCFF798,0x00000000
+PLOG2UP1:
+ long 0x3FFD0000,0x9A209A84,0xFBCFF799,0x00000000
+
+# Constants in single precision
+FONE:
+ long 0x3F800000,0x00000000,0x00000000,0x00000000
+FTWO:
+ long 0x40000000,0x00000000,0x00000000,0x00000000
+FTEN:
+ long 0x41200000,0x00000000,0x00000000,0x00000000
+F4933:
+ long 0x459A2800,0x00000000,0x00000000,0x00000000
+
+RBDTBL:
+ byte 0,0,0,0
+ byte 3,3,2,2
+ byte 3,2,2,3
+ byte 2,3,3,2
+
+# Implementation Notes:
+#
+# The registers are used as follows:
+#
+# d0: scratch; LEN input to binstr
+# d1: scratch
+# d2: upper 32-bits of mantissa for binstr
+# d3: scratch;lower 32-bits of mantissa for binstr
+# d4: LEN
+# d5: LAMBDA/ICTR
+# d6: ILOG
+# d7: k-factor
+# a0: ptr for original operand/final result
+# a1: scratch pointer
+# a2: pointer to FP_X; abs(original value) in ext
+# fp0: scratch
+# fp1: scratch
+# fp2: scratch
+# F_SCR1:
+# F_SCR2:
+# L_SCR1:
+# L_SCR2:
+
+ global bindec
+bindec:
+ movm.l &0x3f20,-(%sp) # {%d2-%d7/%a2}
+ fmovm.x &0x7,-(%sp) # {%fp0-%fp2}
+
+# A1. Set RM and size ext. Set SIGMA = sign input;
+# The k-factor is saved for use in d7. Clear BINDEC_FLG for
+# separating normalized/denormalized input. If the input
+# is a denormalized number, set the BINDEC_FLG memory word
+# to signal denorm. If the input is unnormalized, normalize
+# the input and test for denormalized result.
+#
+ fmov.l &rm_mode*0x10,%fpcr # set RM and ext
+ mov.l (%a0),L_SCR2(%a6) # save exponent for sign check
+ mov.l %d0,%d7 # move k-factor to d7
+
+ clr.b BINDEC_FLG(%a6) # clr norm/denorm flag
+ cmpi.b STAG(%a6),&DENORM # is input a DENORM?
+ bne.w A2_str # no; input is a NORM
+
+#
+# Normalize the denorm
+#
+un_de_norm:
+ mov.w (%a0),%d0
+ and.w &0x7fff,%d0 # strip sign of normalized exp
+ mov.l 4(%a0),%d1
+ mov.l 8(%a0),%d2
+norm_loop:
+ sub.w &1,%d0
+ lsl.l &1,%d2
+ roxl.l &1,%d1
+ tst.l %d1
+ bge.b norm_loop
+#
+# Test if the normalized input is denormalized
+#
+ tst.w %d0
+ bgt.b pos_exp # if greater than zero, it is a norm
+ st BINDEC_FLG(%a6) # set flag for denorm
+pos_exp:
+ and.w &0x7fff,%d0 # strip sign of normalized exp
+ mov.w %d0,(%a0)
+ mov.l %d1,4(%a0)
+ mov.l %d2,8(%a0)
+
+# A2. Set X = abs(input).
+#
+A2_str:
+ mov.l (%a0),FP_SCR1(%a6) # move input to work space
+ mov.l 4(%a0),FP_SCR1+4(%a6) # move input to work space
+ mov.l 8(%a0),FP_SCR1+8(%a6) # move input to work space
+ and.l &0x7fffffff,FP_SCR1(%a6) # create abs(X)
+
+# A3. Compute ILOG.
+# ILOG is the log base 10 of the input value. It is approx-
+# imated by adding e + 0.f when the original value is viewed
+# as 2^^e * 1.f in extended precision. This value is stored
+# in d6.
+#
+# Register usage:
+# Input/Output
+# d0: k-factor/exponent
+# d2: x/x
+# d3: x/x
+# d4: x/x
+# d5: x/x
+# d6: x/ILOG
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: x/x
+# a2: x/x
+# fp0: x/float(ILOG)
+# fp1: x/x
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Abs(X)/Abs(X) with $3fff exponent
+# L_SCR1:x/x
+# L_SCR2:first word of X packed/Unchanged
+
+ tst.b BINDEC_FLG(%a6) # check for denorm
+ beq.b A3_cont # if clr, continue with norm
+ mov.l &-4933,%d6 # force ILOG = -4933
+ bra.b A4_str
+A3_cont:
+ mov.w FP_SCR1(%a6),%d0 # move exp to d0
+ mov.w &0x3fff,FP_SCR1(%a6) # replace exponent with 0x3fff
+ fmov.x FP_SCR1(%a6),%fp0 # now fp0 has 1.f
+ sub.w &0x3fff,%d0 # strip off bias
+ fadd.w %d0,%fp0 # add in exp
+ fsub.s FONE(%pc),%fp0 # subtract off 1.0
+ fbge.w pos_res # if pos, branch
+ fmul.x PLOG2UP1(%pc),%fp0 # if neg, mul by LOG2UP1
+ fmov.l %fp0,%d6 # put ILOG in d6 as a lword
+ bra.b A4_str # go move out ILOG
+pos_res:
+ fmul.x PLOG2(%pc),%fp0 # if pos, mul by LOG2
+ fmov.l %fp0,%d6 # put ILOG in d6 as a lword
+
+
+# A4. Clr INEX bit.
+# The operation in A3 above may have set INEX2.
+
+A4_str:
+ fmov.l &0,%fpsr # zero all of fpsr - nothing needed
+
+
+# A5. Set ICTR = 0;
+# ICTR is a flag used in A13. It must be set before the
+# loop entry A6. The lower word of d5 is used for ICTR.
+
+ clr.w %d5 # clear ICTR
+
+# A6. Calculate LEN.
+# LEN is the number of digits to be displayed. The k-factor
+# can dictate either the total number of digits, if it is
+# a positive number, or the number of digits after the
+# original decimal point which are to be included as
+# significant. See the 68882 manual for examples.
+# If LEN is computed to be greater than 17, set OPERR in
+# USER_FPSR. LEN is stored in d4.
+#
+# Register usage:
+# Input/Output
+# d0: exponent/Unchanged
+# d2: x/x/scratch
+# d3: x/x
+# d4: exc picture/LEN
+# d5: ICTR/Unchanged
+# d6: ILOG/Unchanged
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: x/x
+# a2: x/x
+# fp0: float(ILOG)/Unchanged
+# fp1: x/x
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Abs(X) with $3fff exponent/Unchanged
+# L_SCR1:x/x
+# L_SCR2:first word of X packed/Unchanged
+
+A6_str:
+ tst.l %d7 # branch on sign of k
+ ble.b k_neg # if k <= 0, LEN = ILOG + 1 - k
+ mov.l %d7,%d4 # if k > 0, LEN = k
+ bra.b len_ck # skip to LEN check
+k_neg:
+ mov.l %d6,%d4 # first load ILOG to d4
+ sub.l %d7,%d4 # subtract off k
+ addq.l &1,%d4 # add in the 1
+len_ck:
+ tst.l %d4 # LEN check: branch on sign of LEN
+ ble.b LEN_ng # if neg, set LEN = 1
+ cmp.l %d4,&17 # test if LEN > 17
+ ble.b A7_str # if not, forget it
+ mov.l &17,%d4 # set max LEN = 17
+ tst.l %d7 # if negative, never set OPERR
+ ble.b A7_str # if positive, continue
+ or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
+ bra.b A7_str # finished here
+LEN_ng:
+ mov.l &1,%d4 # min LEN is 1
+
+
+# A7. Calculate SCALE.
+# SCALE is equal to 10^ISCALE, where ISCALE is the number
+# of decimal places needed to insure LEN integer digits
+# in the output before conversion to bcd. LAMBDA is the sign
+# of ISCALE, used in A9. Fp1 contains 10^^(abs(ISCALE)) using
+# the rounding mode as given in the following table (see
+# Coonen, p. 7.23 as ref.; however, the SCALE variable is
+# of opposite sign in bindec.sa from Coonen).
+#
+# Initial USE
+# FPCR[6:5] LAMBDA SIGN(X) FPCR[6:5]
+# ----------------------------------------------
+# RN 00 0 0 00/0 RN
+# RN 00 0 1 00/0 RN
+# RN 00 1 0 00/0 RN
+# RN 00 1 1 00/0 RN
+# RZ 01 0 0 11/3 RP
+# RZ 01 0 1 11/3 RP
+# RZ 01 1 0 10/2 RM
+# RZ 01 1 1 10/2 RM
+# RM 10 0 0 11/3 RP
+# RM 10 0 1 10/2 RM
+# RM 10 1 0 10/2 RM
+# RM 10 1 1 11/3 RP
+# RP 11 0 0 10/2 RM
+# RP 11 0 1 11/3 RP
+# RP 11 1 0 11/3 RP
+# RP 11 1 1 10/2 RM
+#
+# Register usage:
+# Input/Output
+# d0: exponent/scratch - final is 0
+# d2: x/0 or 24 for A9
+# d3: x/scratch - offset ptr into PTENRM array
+# d4: LEN/Unchanged
+# d5: 0/ICTR:LAMBDA
+# d6: ILOG/ILOG or k if ((k<=0)&(ILOG<k))
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: x/ptr to PTENRM array
+# a2: x/x
+# fp0: float(ILOG)/Unchanged
+# fp1: x/10^ISCALE
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Abs(X) with $3fff exponent/Unchanged
+# L_SCR1:x/x
+# L_SCR2:first word of X packed/Unchanged
+
+A7_str:
+ tst.l %d7 # test sign of k
+ bgt.b k_pos # if pos and > 0, skip this
+ cmp.l %d7,%d6 # test k - ILOG
+ blt.b k_pos # if ILOG >= k, skip this
+ mov.l %d7,%d6 # if ((k<0) & (ILOG < k)) ILOG = k
+k_pos:
+ mov.l %d6,%d0 # calc ILOG + 1 - LEN in d0
+ addq.l &1,%d0 # add the 1
+ sub.l %d4,%d0 # sub off LEN
+ swap %d5 # use upper word of d5 for LAMBDA
+ clr.w %d5 # set it zero initially
+ clr.w %d2 # set up d2 for very small case
+ tst.l %d0 # test sign of ISCALE
+ bge.b iscale # if pos, skip next inst
+ addq.w &1,%d5 # if neg, set LAMBDA true
+ cmp.l %d0,&0xffffecd4 # test iscale <= -4908
+ bgt.b no_inf # if false, skip rest
+ add.l &24,%d0 # add in 24 to iscale
+ mov.l &24,%d2 # put 24 in d2 for A9
+no_inf:
+ neg.l %d0 # and take abs of ISCALE
+iscale:
+ fmov.s FONE(%pc),%fp1 # init fp1 to 1
+ bfextu USER_FPCR(%a6){&26:&2},%d1 # get initial rmode bits
+ lsl.w &1,%d1 # put them in bits 2:1
+ add.w %d5,%d1 # add in LAMBDA
+ lsl.w &1,%d1 # put them in bits 3:1
+ tst.l L_SCR2(%a6) # test sign of original x
+ bge.b x_pos # if pos, don't set bit 0
+ addq.l &1,%d1 # if neg, set bit 0
+x_pos:
+ lea.l RBDTBL(%pc),%a2 # load rbdtbl base
+ mov.b (%a2,%d1),%d3 # load d3 with new rmode
+ lsl.l &4,%d3 # put bits in proper position
+ fmov.l %d3,%fpcr # load bits into fpu
+ lsr.l &4,%d3 # put bits in proper position
+ tst.b %d3 # decode new rmode for pten table
+ bne.b not_rn # if zero, it is RN
+ lea.l PTENRN(%pc),%a1 # load a1 with RN table base
+ bra.b rmode # exit decode
+not_rn:
+ lsr.b &1,%d3 # get lsb in carry
+ bcc.b not_rp2 # if carry clear, it is RM
+ lea.l PTENRP(%pc),%a1 # load a1 with RP table base
+ bra.b rmode # exit decode
+not_rp2:
+ lea.l PTENRM(%pc),%a1 # load a1 with RM table base
+rmode:
+ clr.l %d3 # clr table index
+e_loop2:
+ lsr.l &1,%d0 # shift next bit into carry
+ bcc.b e_next2 # if zero, skip the mul
+ fmul.x (%a1,%d3),%fp1 # mul by 10**(d3_bit_no)
+e_next2:
+ add.l &12,%d3 # inc d3 to next pwrten table entry
+ tst.l %d0 # test if ISCALE is zero
+ bne.b e_loop2 # if not, loop
+
+# A8. Clr INEX; Force RZ.
+# The operation in A3 above may have set INEX2.
+# RZ mode is forced for the scaling operation to insure
+# only one rounding error. The grs bits are collected in
+# the INEX flag for use in A10.
+#
+# Register usage:
+# Input/Output
+
+ fmov.l &0,%fpsr # clr INEX
+ fmov.l &rz_mode*0x10,%fpcr # set RZ rounding mode
+
+# A9. Scale X -> Y.
+# The mantissa is scaled to the desired number of significant
+# digits. The excess digits are collected in INEX2. If mul,
+# Check d2 for excess 10 exponential value. If not zero,
+# the iscale value would have caused the pwrten calculation
+# to overflow. Only a negative iscale can cause this, so
+# multiply by 10^(d2), which is now only allowed to be 24,
+# with a multiply by 10^8 and 10^16, which is exact since
+# 10^24 is exact. If the input was denormalized, we must
+# create a busy stack frame with the mul command and the
+# two operands, and allow the fpu to complete the multiply.
+#
+# Register usage:
+# Input/Output
+# d0: FPCR with RZ mode/Unchanged
+# d2: 0 or 24/unchanged
+# d3: x/x
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA
+# d6: ILOG/Unchanged
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: ptr to PTENRM array/Unchanged
+# a2: x/x
+# fp0: float(ILOG)/X adjusted for SCALE (Y)
+# fp1: 10^ISCALE/Unchanged
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Abs(X) with $3fff exponent/Unchanged
+# L_SCR1:x/x
+# L_SCR2:first word of X packed/Unchanged
+
+A9_str:
+ fmov.x (%a0),%fp0 # load X from memory
+ fabs.x %fp0 # use abs(X)
+ tst.w %d5 # LAMBDA is in lower word of d5
+ bne.b sc_mul # if neg (LAMBDA = 1), scale by mul
+ fdiv.x %fp1,%fp0 # calculate X / SCALE -> Y to fp0
+ bra.w A10_st # branch to A10
+
+sc_mul:
+ tst.b BINDEC_FLG(%a6) # check for denorm
+ beq.w A9_norm # if norm, continue with mul
+
+# for DENORM, we must calculate:
+# fp0 = input_op * 10^ISCALE * 10^24
+# since the input operand is a DENORM, we can't multiply it directly.
+# so, we do the multiplication of the exponents and mantissas separately.
+# in this way, we avoid underflow on intermediate stages of the
+# multiplication and guarantee a result without exception.
+ fmovm.x &0x2,-(%sp) # save 10^ISCALE to stack
+
+ mov.w (%sp),%d3 # grab exponent
+ andi.w &0x7fff,%d3 # clear sign
+ ori.w &0x8000,(%a0) # make DENORM exp negative
+ add.w (%a0),%d3 # add DENORM exp to 10^ISCALE exp
+ subi.w &0x3fff,%d3 # subtract BIAS
+ add.w 36(%a1),%d3
+ subi.w &0x3fff,%d3 # subtract BIAS
+ add.w 48(%a1),%d3
+ subi.w &0x3fff,%d3 # subtract BIAS
+
+ bmi.w sc_mul_err # is result is DENORM, punt!!!
+
+ andi.w &0x8000,(%sp) # keep sign
+ or.w %d3,(%sp) # insert new exponent
+ andi.w &0x7fff,(%a0) # clear sign bit on DENORM again
+ mov.l 0x8(%a0),-(%sp) # put input op mantissa on stk
+ mov.l 0x4(%a0),-(%sp)
+ mov.l &0x3fff0000,-(%sp) # force exp to zero
+ fmovm.x (%sp)+,&0x80 # load normalized DENORM into fp0
+ fmul.x (%sp)+,%fp0
+
+# fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
+# fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
+ mov.l 36+8(%a1),-(%sp) # get 10^8 mantissa
+ mov.l 36+4(%a1),-(%sp)
+ mov.l &0x3fff0000,-(%sp) # force exp to zero
+ mov.l 48+8(%a1),-(%sp) # get 10^16 mantissa
+ mov.l 48+4(%a1),-(%sp)
+ mov.l &0x3fff0000,-(%sp)# force exp to zero
+ fmul.x (%sp)+,%fp0 # multiply fp0 by 10^8
+ fmul.x (%sp)+,%fp0 # multiply fp0 by 10^16
+ bra.b A10_st
+
+sc_mul_err:
+ bra.b sc_mul_err
+
+A9_norm:
+ tst.w %d2 # test for small exp case
+ beq.b A9_con # if zero, continue as normal
+ fmul.x 36(%a1),%fp0 # multiply fp0 by 10^8
+ fmul.x 48(%a1),%fp0 # multiply fp0 by 10^16
+A9_con:
+ fmul.x %fp1,%fp0 # calculate X * SCALE -> Y to fp0
+
+# A10. Or in INEX.
+# If INEX is set, round error occurred. This is compensated
+# for by 'or-ing' in the INEX2 flag to the lsb of Y.
+#
+# Register usage:
+# Input/Output
+# d0: FPCR with RZ mode/FPSR with INEX2 isolated
+# d2: x/x
+# d3: x/x
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA
+# d6: ILOG/Unchanged
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/final result
+# a1: ptr to PTENxx array/Unchanged
+# a2: x/ptr to FP_SCR1(a6)
+# fp0: Y/Y with lsb adjusted
+# fp1: 10^ISCALE/Unchanged
+# fp2: x/x
+
+A10_st:
+ fmov.l %fpsr,%d0 # get FPSR
+ fmov.x %fp0,FP_SCR1(%a6) # move Y to memory
+ lea.l FP_SCR1(%a6),%a2 # load a2 with ptr to FP_SCR1
+ btst &9,%d0 # check if INEX2 set
+ beq.b A11_st # if clear, skip rest
+ or.l &1,8(%a2) # or in 1 to lsb of mantissa
+ fmov.x FP_SCR1(%a6),%fp0 # write adjusted Y back to fpu
+
+
+# A11. Restore original FPCR; set size ext.
+# Perform FINT operation in the user's rounding mode. Keep
+# the size to extended. The sintdo entry point in the sint
+# routine expects the FPCR value to be in USER_FPCR for
+# mode and precision. The original FPCR is saved in L_SCR1.
+
+A11_st:
+ mov.l USER_FPCR(%a6),L_SCR1(%a6) # save it for later
+ and.l &0x00000030,USER_FPCR(%a6) # set size to ext,
+# ;block exceptions
+
+
+# A12. Calculate YINT = FINT(Y) according to user's rounding mode.
+# The FPSP routine sintd0 is used. The output is in fp0.
+#
+# Register usage:
+# Input/Output
+# d0: FPSR with AINEX cleared/FPCR with size set to ext
+# d2: x/x/scratch
+# d3: x/x
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA/Unchanged
+# d6: ILOG/Unchanged
+# d7: k-factor/Unchanged
+# a0: ptr for original operand/src ptr for sintdo
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# a6: temp pointer to FP_SCR1(a6) - orig value saved and restored
+# fp0: Y/YINT
+# fp1: 10^ISCALE/Unchanged
+# fp2: x/x
+# F_SCR1:x/x
+# F_SCR2:Y adjusted for inex/Y with original exponent
+# L_SCR1:x/original USER_FPCR
+# L_SCR2:first word of X packed/Unchanged
+
+A12_st:
+ movm.l &0xc0c0,-(%sp) # save regs used by sintd0 {%d0-%d1/%a0-%a1}
+ mov.l L_SCR1(%a6),-(%sp)
+ mov.l L_SCR2(%a6),-(%sp)
+
+ lea.l FP_SCR1(%a6),%a0 # a0 is ptr to FP_SCR1(a6)
+ fmov.x %fp0,(%a0) # move Y to memory at FP_SCR1(a6)
+ tst.l L_SCR2(%a6) # test sign of original operand
+ bge.b do_fint12 # if pos, use Y
+ or.l &0x80000000,(%a0) # if neg, use -Y
+do_fint12:
+ mov.l USER_FPSR(%a6),-(%sp)
+# bsr sintdo # sint routine returns int in fp0
+
+ fmov.l USER_FPCR(%a6),%fpcr
+ fmov.l &0x0,%fpsr # clear the AEXC bits!!!
+## mov.l USER_FPCR(%a6),%d0 # ext prec/keep rnd mode
+## andi.l &0x00000030,%d0
+## fmov.l %d0,%fpcr
+ fint.x FP_SCR1(%a6),%fp0 # do fint()
+ fmov.l %fpsr,%d0
+ or.w %d0,FPSR_EXCEPT(%a6)
+## fmov.l &0x0,%fpcr
+## fmov.l %fpsr,%d0 # don't keep ccodes
+## or.w %d0,FPSR_EXCEPT(%a6)
+
+ mov.b (%sp),USER_FPSR(%a6)
+ add.l &4,%sp
+
+ mov.l (%sp)+,L_SCR2(%a6)
+ mov.l (%sp)+,L_SCR1(%a6)
+ movm.l (%sp)+,&0x303 # restore regs used by sint {%d0-%d1/%a0-%a1}
+
+ mov.l L_SCR2(%a6),FP_SCR1(%a6) # restore original exponent
+ mov.l L_SCR1(%a6),USER_FPCR(%a6) # restore user's FPCR
+
+# A13. Check for LEN digits.
+# If the int operation results in more than LEN digits,
+# or less than LEN -1 digits, adjust ILOG and repeat from
+# A6. This test occurs only on the first pass. If the
+# result is exactly 10^LEN, decrement ILOG and divide
+# the mantissa by 10. The calculation of 10^LEN cannot
+# be inexact, since all powers of ten upto 10^27 are exact
+# in extended precision, so the use of a previous power-of-ten
+# table will introduce no error.
+#
+#
+# Register usage:
+# Input/Output
+# d0: FPCR with size set to ext/scratch final = 0
+# d2: x/x
+# d3: x/scratch final = x
+# d4: LEN/LEN adjusted
+# d5: ICTR:LAMBDA/LAMBDA:ICTR
+# d6: ILOG/ILOG adjusted
+# d7: k-factor/Unchanged
+# a0: pointer into memory for packed bcd string formation
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# fp0: int portion of Y/abs(YINT) adjusted
+# fp1: 10^ISCALE/Unchanged
+# fp2: x/10^LEN
+# F_SCR1:x/x
+# F_SCR2:Y with original exponent/Unchanged
+# L_SCR1:original USER_FPCR/Unchanged
+# L_SCR2:first word of X packed/Unchanged
+
+A13_st:
+ swap %d5 # put ICTR in lower word of d5
+ tst.w %d5 # check if ICTR = 0
+ bne not_zr # if non-zero, go to second test
+#
+# Compute 10^(LEN-1)
+#
+ fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
+ mov.l %d4,%d0 # put LEN in d0
+ subq.l &1,%d0 # d0 = LEN -1
+ clr.l %d3 # clr table index
+l_loop:
+ lsr.l &1,%d0 # shift next bit into carry
+ bcc.b l_next # if zero, skip the mul
+ fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
+l_next:
+ add.l &12,%d3 # inc d3 to next pwrten table entry
+ tst.l %d0 # test if LEN is zero
+ bne.b l_loop # if not, loop
+#
+# 10^LEN-1 is computed for this test and A14. If the input was
+# denormalized, check only the case in which YINT > 10^LEN.
+#
+ tst.b BINDEC_FLG(%a6) # check if input was norm
+ beq.b A13_con # if norm, continue with checking
+ fabs.x %fp0 # take abs of YINT
+ bra test_2
+#
+# Compare abs(YINT) to 10^(LEN-1) and 10^LEN
+#
+A13_con:
+ fabs.x %fp0 # take abs of YINT
+ fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^(LEN-1)
+ fbge.w test_2 # if greater, do next test
+ subq.l &1,%d6 # subtract 1 from ILOG
+ mov.w &1,%d5 # set ICTR
+ fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
+ fmul.s FTEN(%pc),%fp2 # compute 10^LEN
+ bra.w A6_str # return to A6 and recompute YINT
+test_2:
+ fmul.s FTEN(%pc),%fp2 # compute 10^LEN
+ fcmp.x %fp0,%fp2 # compare abs(YINT) with 10^LEN
+ fblt.w A14_st # if less, all is ok, go to A14
+ fbgt.w fix_ex # if greater, fix and redo
+ fdiv.s FTEN(%pc),%fp0 # if equal, divide by 10
+ addq.l &1,%d6 # and inc ILOG
+ bra.b A14_st # and continue elsewhere
+fix_ex:
+ addq.l &1,%d6 # increment ILOG by 1
+ mov.w &1,%d5 # set ICTR
+ fmov.l &rm_mode*0x10,%fpcr # set rmode to RM
+ bra.w A6_str # return to A6 and recompute YINT
+#
+# Since ICTR <> 0, we have already been through one adjustment,
+# and shouldn't have another; this is to check if abs(YINT) = 10^LEN
+# 10^LEN is again computed using whatever table is in a1 since the
+# value calculated cannot be inexact.
+#
+not_zr:
+ fmov.s FONE(%pc),%fp2 # init fp2 to 1.0
+ mov.l %d4,%d0 # put LEN in d0
+ clr.l %d3 # clr table index
+z_loop:
+ lsr.l &1,%d0 # shift next bit into carry
+ bcc.b z_next # if zero, skip the mul
+ fmul.x (%a1,%d3),%fp2 # mul by 10**(d3_bit_no)
+z_next:
+ add.l &12,%d3 # inc d3 to next pwrten table entry
+ tst.l %d0 # test if LEN is zero
+ bne.b z_loop # if not, loop
+ fabs.x %fp0 # get abs(YINT)
+ fcmp.x %fp0,%fp2 # check if abs(YINT) = 10^LEN
+ fbneq.w A14_st # if not, skip this
+ fdiv.s FTEN(%pc),%fp0 # divide abs(YINT) by 10
+ addq.l &1,%d6 # and inc ILOG by 1
+ addq.l &1,%d4 # and inc LEN
+ fmul.s FTEN(%pc),%fp2 # if LEN++, the get 10^^LEN
+
+# A14. Convert the mantissa to bcd.
+# The binstr routine is used to convert the LEN digit
+# mantissa to bcd in memory. The input to binstr is
+# to be a fraction; i.e. (mantissa)/10^LEN and adjusted
+# such that the decimal point is to the left of bit 63.
+# The bcd digits are stored in the correct position in
+# the final string area in memory.
+#
+#
+# Register usage:
+# Input/Output
+# d0: x/LEN call to binstr - final is 0
+# d1: x/0
+# d2: x/ms 32-bits of mant of abs(YINT)
+# d3: x/ls 32-bits of mant of abs(YINT)
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA/LAMBDA:ICTR
+# d6: ILOG
+# d7: k-factor/Unchanged
+# a0: pointer into memory for packed bcd string formation
+# /ptr to first mantissa byte in result string
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# fp0: int portion of Y/abs(YINT) adjusted
+# fp1: 10^ISCALE/Unchanged
+# fp2: 10^LEN/Unchanged
+# F_SCR1:x/Work area for final result
+# F_SCR2:Y with original exponent/Unchanged
+# L_SCR1:original USER_FPCR/Unchanged
+# L_SCR2:first word of X packed/Unchanged
+
+A14_st:
+ fmov.l &rz_mode*0x10,%fpcr # force rz for conversion
+ fdiv.x %fp2,%fp0 # divide abs(YINT) by 10^LEN
+ lea.l FP_SCR0(%a6),%a0
+ fmov.x %fp0,(%a0) # move abs(YINT)/10^LEN to memory
+ mov.l 4(%a0),%d2 # move 2nd word of FP_RES to d2
+ mov.l 8(%a0),%d3 # move 3rd word of FP_RES to d3
+ clr.l 4(%a0) # zero word 2 of FP_RES
+ clr.l 8(%a0) # zero word 3 of FP_RES
+ mov.l (%a0),%d0 # move exponent to d0
+ swap %d0 # put exponent in lower word
+ beq.b no_sft # if zero, don't shift
+ sub.l &0x3ffd,%d0 # sub bias less 2 to make fract
+ tst.l %d0 # check if > 1
+ bgt.b no_sft # if so, don't shift
+ neg.l %d0 # make exp positive
+m_loop:
+ lsr.l &1,%d2 # shift d2:d3 right, add 0s
+ roxr.l &1,%d3 # the number of places
+ dbf.w %d0,m_loop # given in d0
+no_sft:
+ tst.l %d2 # check for mantissa of zero
+ bne.b no_zr # if not, go on
+ tst.l %d3 # continue zero check
+ beq.b zer_m # if zero, go directly to binstr
+no_zr:
+ clr.l %d1 # put zero in d1 for addx
+ add.l &0x00000080,%d3 # inc at bit 7
+ addx.l %d1,%d2 # continue inc
+ and.l &0xffffff80,%d3 # strip off lsb not used by 882
+zer_m:
+ mov.l %d4,%d0 # put LEN in d0 for binstr call
+ addq.l &3,%a0 # a0 points to M16 byte in result
+ bsr binstr # call binstr to convert mant
+
+
+# A15. Convert the exponent to bcd.
+# As in A14 above, the exp is converted to bcd and the
+# digits are stored in the final string.
+#
+# Digits are stored in L_SCR1(a6) on return from BINDEC as:
+#
+# 32 16 15 0
+# -----------------------------------------
+# | 0 | e3 | e2 | e1 | e4 | X | X | X |
+# -----------------------------------------
+#
+# And are moved into their proper places in FP_SCR0. If digit e4
+# is non-zero, OPERR is signaled. In all cases, all 4 digits are
+# written as specified in the 881/882 manual for packed decimal.
+#
+# Register usage:
+# Input/Output
+# d0: x/LEN call to binstr - final is 0
+# d1: x/scratch (0);shift count for final exponent packing
+# d2: x/ms 32-bits of exp fraction/scratch
+# d3: x/ls 32-bits of exp fraction
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA/LAMBDA:ICTR
+# d6: ILOG
+# d7: k-factor/Unchanged
+# a0: ptr to result string/ptr to L_SCR1(a6)
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# fp0: abs(YINT) adjusted/float(ILOG)
+# fp1: 10^ISCALE/Unchanged
+# fp2: 10^LEN/Unchanged
+# F_SCR1:Work area for final result/BCD result
+# F_SCR2:Y with original exponent/ILOG/10^4
+# L_SCR1:original USER_FPCR/Exponent digits on return from binstr
+# L_SCR2:first word of X packed/Unchanged
+
+A15_st:
+ tst.b BINDEC_FLG(%a6) # check for denorm
+ beq.b not_denorm
+ ftest.x %fp0 # test for zero
+ fbeq.w den_zero # if zero, use k-factor or 4933
+ fmov.l %d6,%fp0 # float ILOG
+ fabs.x %fp0 # get abs of ILOG
+ bra.b convrt
+den_zero:
+ tst.l %d7 # check sign of the k-factor
+ blt.b use_ilog # if negative, use ILOG
+ fmov.s F4933(%pc),%fp0 # force exponent to 4933
+ bra.b convrt # do it
+use_ilog:
+ fmov.l %d6,%fp0 # float ILOG
+ fabs.x %fp0 # get abs of ILOG
+ bra.b convrt
+not_denorm:
+ ftest.x %fp0 # test for zero
+ fbneq.w not_zero # if zero, force exponent
+ fmov.s FONE(%pc),%fp0 # force exponent to 1
+ bra.b convrt # do it
+not_zero:
+ fmov.l %d6,%fp0 # float ILOG
+ fabs.x %fp0 # get abs of ILOG
+convrt:
+ fdiv.x 24(%a1),%fp0 # compute ILOG/10^4
+ fmov.x %fp0,FP_SCR1(%a6) # store fp0 in memory
+ mov.l 4(%a2),%d2 # move word 2 to d2
+ mov.l 8(%a2),%d3 # move word 3 to d3
+ mov.w (%a2),%d0 # move exp to d0
+ beq.b x_loop_fin # if zero, skip the shift
+ sub.w &0x3ffd,%d0 # subtract off bias
+ neg.w %d0 # make exp positive
+x_loop:
+ lsr.l &1,%d2 # shift d2:d3 right
+ roxr.l &1,%d3 # the number of places
+ dbf.w %d0,x_loop # given in d0
+x_loop_fin:
+ clr.l %d1 # put zero in d1 for addx
+ add.l &0x00000080,%d3 # inc at bit 6
+ addx.l %d1,%d2 # continue inc
+ and.l &0xffffff80,%d3 # strip off lsb not used by 882
+ mov.l &4,%d0 # put 4 in d0 for binstr call
+ lea.l L_SCR1(%a6),%a0 # a0 is ptr to L_SCR1 for exp digits
+ bsr binstr # call binstr to convert exp
+ mov.l L_SCR1(%a6),%d0 # load L_SCR1 lword to d0
+ mov.l &12,%d1 # use d1 for shift count
+ lsr.l %d1,%d0 # shift d0 right by 12
+ bfins %d0,FP_SCR0(%a6){&4:&12} # put e3:e2:e1 in FP_SCR0
+ lsr.l %d1,%d0 # shift d0 right by 12
+ bfins %d0,FP_SCR0(%a6){&16:&4} # put e4 in FP_SCR0
+ tst.b %d0 # check if e4 is zero
+ beq.b A16_st # if zero, skip rest
+ or.l &opaop_mask,USER_FPSR(%a6) # set OPERR & AIOP in USER_FPSR
+
+
+# A16. Write sign bits to final string.
+# Sigma is bit 31 of initial value; RHO is bit 31 of d6 (ILOG).
+#
+# Register usage:
+# Input/Output
+# d0: x/scratch - final is x
+# d2: x/x
+# d3: x/x
+# d4: LEN/Unchanged
+# d5: ICTR:LAMBDA/LAMBDA:ICTR
+# d6: ILOG/ILOG adjusted
+# d7: k-factor/Unchanged
+# a0: ptr to L_SCR1(a6)/Unchanged
+# a1: ptr to PTENxx array/Unchanged
+# a2: ptr to FP_SCR1(a6)/Unchanged
+# fp0: float(ILOG)/Unchanged
+# fp1: 10^ISCALE/Unchanged
+# fp2: 10^LEN/Unchanged
+# F_SCR1:BCD result with correct signs
+# F_SCR2:ILOG/10^4
+# L_SCR1:Exponent digits on return from binstr
+# L_SCR2:first word of X packed/Unchanged
+
+A16_st:
+ clr.l %d0 # clr d0 for collection of signs
+ and.b &0x0f,FP_SCR0(%a6) # clear first nibble of FP_SCR0
+ tst.l L_SCR2(%a6) # check sign of original mantissa
+ bge.b mant_p # if pos, don't set SM
+ mov.l &2,%d0 # move 2 in to d0 for SM
+mant_p:
+ tst.l %d6 # check sign of ILOG
+ bge.b wr_sgn # if pos, don't set SE
+ addq.l &1,%d0 # set bit 0 in d0 for SE
+wr_sgn:
+ bfins %d0,FP_SCR0(%a6){&0:&2} # insert SM and SE into FP_SCR0
+
+# Clean up and restore all registers used.
+
+ fmov.l &0,%fpsr # clear possible inex2/ainex bits
+ fmovm.x (%sp)+,&0xe0 # {%fp0-%fp2}
+ movm.l (%sp)+,&0x4fc # {%d2-%d7/%a2}
+ rts
+
+ global PTENRN
+PTENRN:
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
+
+ global PTENRP
+PTENRP:
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59E # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D6 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CE0 # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8E # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C7 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C18 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE5 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979B # 10 ^ 4096
+
+ global PTENRM
+PTENRM:
+ long 0x40020000,0xA0000000,0x00000000 # 10 ^ 1
+ long 0x40050000,0xC8000000,0x00000000 # 10 ^ 2
+ long 0x400C0000,0x9C400000,0x00000000 # 10 ^ 4
+ long 0x40190000,0xBEBC2000,0x00000000 # 10 ^ 8
+ long 0x40340000,0x8E1BC9BF,0x04000000 # 10 ^ 16
+ long 0x40690000,0x9DC5ADA8,0x2B70B59D # 10 ^ 32
+ long 0x40D30000,0xC2781F49,0xFFCFA6D5 # 10 ^ 64
+ long 0x41A80000,0x93BA47C9,0x80E98CDF # 10 ^ 128
+ long 0x43510000,0xAA7EEBFB,0x9DF9DE8D # 10 ^ 256
+ long 0x46A30000,0xE319A0AE,0xA60E91C6 # 10 ^ 512
+ long 0x4D480000,0xC9767586,0x81750C17 # 10 ^ 1024
+ long 0x5A920000,0x9E8B3B5D,0xC53D5DE4 # 10 ^ 2048
+ long 0x75250000,0xC4605202,0x8A20979A # 10 ^ 4096
+
+#########################################################################
+# binstr(): Converts a 64-bit binary integer to bcd. #
+# #
+# INPUT *************************************************************** #
+# d2:d3 = 64-bit binary integer #
+# d0 = desired length (LEN) #
+# a0 = pointer to start in memory for bcd characters #
+# (This pointer must point to byte 4 of the first #
+# lword of the packed decimal memory string.) #
+# #
+# OUTPUT ************************************************************** #
+# a0 = pointer to LEN bcd digits representing the 64-bit integer. #
+# #
+# ALGORITHM *********************************************************** #
+# The 64-bit binary is assumed to have a decimal point before #
+# bit 63. The fraction is multiplied by 10 using a mul by 2 #
+# shift and a mul by 8 shift. The bits shifted out of the #
+# msb form a decimal digit. This process is iterated until #
+# LEN digits are formed. #
+# #
+# A1. Init d7 to 1. D7 is the byte digit counter, and if 1, the #
+# digit formed will be assumed the least significant. This is #
+# to force the first byte formed to have a 0 in the upper 4 bits. #
+# #
+# A2. Beginning of the loop: #
+# Copy the fraction in d2:d3 to d4:d5. #
+# #
+# A3. Multiply the fraction in d2:d3 by 8 using bit-field #
+# extracts and shifts. The three msbs from d2 will go into d1. #
+# #
+# A4. Multiply the fraction in d4:d5 by 2 using shifts. The msb #
+# will be collected by the carry. #
+# #
+# A5. Add using the carry the 64-bit quantities in d2:d3 and d4:d5 #
+# into d2:d3. D1 will contain the bcd digit formed. #
+# #
+# A6. Test d7. If zero, the digit formed is the ms digit. If non- #
+# zero, it is the ls digit. Put the digit in its place in the #
+# upper word of d0. If it is the ls digit, write the word #
+# from d0 to memory. #
+# #
+# A7. Decrement d6 (LEN counter) and repeat the loop until zero. #
+# #
+#########################################################################
+
+# Implementation Notes:
+#
+# The registers are used as follows:
+#
+# d0: LEN counter
+# d1: temp used to form the digit
+# d2: upper 32-bits of fraction for mul by 8
+# d3: lower 32-bits of fraction for mul by 8
+# d4: upper 32-bits of fraction for mul by 2
+# d5: lower 32-bits of fraction for mul by 2
+# d6: temp for bit-field extracts
+# d7: byte digit formation word;digit count {0,1}
+# a0: pointer into memory for packed bcd string formation
+#
+
+ global binstr
+binstr:
+ movm.l &0xff00,-(%sp) # {%d0-%d7}
+
+#
+# A1: Init d7
+#
+ mov.l &1,%d7 # init d7 for second digit
+ subq.l &1,%d0 # for dbf d0 would have LEN+1 passes
+#
+# A2. Copy d2:d3 to d4:d5. Start loop.
+#
+loop:
+ mov.l %d2,%d4 # copy the fraction before muls
+ mov.l %d3,%d5 # to d4:d5
+#
+# A3. Multiply d2:d3 by 8; extract msbs into d1.
+#
+ bfextu %d2{&0:&3},%d1 # copy 3 msbs of d2 into d1
+ asl.l &3,%d2 # shift d2 left by 3 places
+ bfextu %d3{&0:&3},%d6 # copy 3 msbs of d3 into d6
+ asl.l &3,%d3 # shift d3 left by 3 places
+ or.l %d6,%d2 # or in msbs from d3 into d2
+#
+# A4. Multiply d4:d5 by 2; add carry out to d1.
+#
+ asl.l &1,%d5 # mul d5 by 2
+ roxl.l &1,%d4 # mul d4 by 2
+ swap %d6 # put 0 in d6 lower word
+ addx.w %d6,%d1 # add in extend from mul by 2
+#
+# A5. Add mul by 8 to mul by 2. D1 contains the digit formed.
+#
+ add.l %d5,%d3 # add lower 32 bits
+ nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
+ addx.l %d4,%d2 # add with extend upper 32 bits
+ nop # ERRATA FIX #13 (Rev. 1.2 6/6/90)
+ addx.w %d6,%d1 # add in extend from add to d1
+ swap %d6 # with d6 = 0; put 0 in upper word
+#
+# A6. Test d7 and branch.
+#
+ tst.w %d7 # if zero, store digit & to loop
+ beq.b first_d # if non-zero, form byte & write
+sec_d:
+ swap %d7 # bring first digit to word d7b
+ asl.w &4,%d7 # first digit in upper 4 bits d7b
+ add.w %d1,%d7 # add in ls digit to d7b
+ mov.b %d7,(%a0)+ # store d7b byte in memory
+ swap %d7 # put LEN counter in word d7a
+ clr.w %d7 # set d7a to signal no digits done
+ dbf.w %d0,loop # do loop some more!
+ bra.b end_bstr # finished, so exit
+first_d:
+ swap %d7 # put digit word in d7b
+ mov.w %d1,%d7 # put new digit in d7b
+ swap %d7 # put LEN counter in word d7a
+ addq.w &1,%d7 # set d7a to signal first digit done
+ dbf.w %d0,loop # do loop some more!
+ swap %d7 # put last digit in string
+ lsl.w &4,%d7 # move it to upper 4 bits
+ mov.b %d7,(%a0)+ # store it in memory string
+#
+# Clean up and return with result in fp0.
+#
+end_bstr:
+ movm.l (%sp)+,&0xff # {%d0-%d7}
+ rts
+
+#########################################################################
+# XDEF **************************************************************** #
+# facc_in_b(): dmem_read_byte failed #
+# facc_in_w(): dmem_read_word failed #
+# facc_in_l(): dmem_read_long failed #
+# facc_in_d(): dmem_read of dbl prec failed #
+# facc_in_x(): dmem_read of ext prec failed #
+# #
+# facc_out_b(): dmem_write_byte failed #
+# facc_out_w(): dmem_write_word failed #
+# facc_out_l(): dmem_write_long failed #
+# facc_out_d(): dmem_write of dbl prec failed #
+# facc_out_x(): dmem_write of ext prec failed #
+# #
+# XREF **************************************************************** #
+# _real_access() - exit through access error handler #
+# #
+# INPUT *************************************************************** #
+# None #
+# #
+# OUTPUT ************************************************************** #
+# None #
+# #
+# ALGORITHM *********************************************************** #
+# Flow jumps here when an FP data fetch call gets an error #
+# result. This means the operating system wants an access error frame #
+# made out of the current exception stack frame. #
+# So, we first call restore() which makes sure that any updated #
+# -(an)+ register gets returned to its pre-exception value and then #
+# we change the stack to an access error stack frame. #
+# #
+#########################################################################
+
+facc_in_b:
+ movq.l &0x1,%d0 # one byte
+ bsr.w restore # fix An
+
+ mov.w &0x0121,EXC_VOFF(%a6) # set FSLW
+ bra.w facc_finish
+
+facc_in_w:
+ movq.l &0x2,%d0 # two bytes
+ bsr.w restore # fix An
+
+ mov.w &0x0141,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_in_l:
+ movq.l &0x4,%d0 # four bytes
+ bsr.w restore # fix An
+
+ mov.w &0x0101,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_in_d:
+ movq.l &0x8,%d0 # eight bytes
+ bsr.w restore # fix An
+
+ mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_in_x:
+ movq.l &0xc,%d0 # twelve bytes
+ bsr.w restore # fix An
+
+ mov.w &0x0161,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+################################################################
+
+facc_out_b:
+ movq.l &0x1,%d0 # one byte
+ bsr.w restore # restore An
+
+ mov.w &0x00a1,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_out_w:
+ movq.l &0x2,%d0 # two bytes
+ bsr.w restore # restore An
+
+ mov.w &0x00c1,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_out_l:
+ movq.l &0x4,%d0 # four bytes
+ bsr.w restore # restore An
+
+ mov.w &0x0081,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_out_d:
+ movq.l &0x8,%d0 # eight bytes
+ bsr.w restore # restore An
+
+ mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
+ bra.b facc_finish
+
+facc_out_x:
+ mov.l &0xc,%d0 # twelve bytes
+ bsr.w restore # restore An
+
+ mov.w &0x00e1,EXC_VOFF(%a6) # set FSLW
+
+# here's where we actually create the access error frame from the
+# current exception stack frame.
+facc_finish:
+ mov.l USER_FPIAR(%a6),EXC_PC(%a6) # store current PC
+
+ fmovm.x EXC_FPREGS(%a6),&0xc0 # restore fp0-fp1
+ fmovm.l USER_FPCR(%a6),%fpcr,%fpsr,%fpiar # restore ctrl regs
+ movm.l EXC_DREGS(%a6),&0x0303 # restore d0-d1/a0-a1
+
+ unlk %a6
+
+ mov.l (%sp),-(%sp) # store SR, hi(PC)
+ mov.l 0x8(%sp),0x4(%sp) # store lo(PC)
+ mov.l 0xc(%sp),0x8(%sp) # store EA
+ mov.l &0x00000001,0xc(%sp) # store FSLW
+ mov.w 0x6(%sp),0xc(%sp) # fix FSLW (size)
+ mov.w &0x4008,0x6(%sp) # store voff
+
+ btst &0x5,(%sp) # supervisor or user mode?
+ beq.b facc_out2 # user
+ bset &0x2,0xd(%sp) # set supervisor TM bit
+
+facc_out2:
+ bra.l _real_access
+
+##################################################################
+
+# if the effective addressing mode was predecrement or postincrement,
+# the emulation has already changed its value to the correct post-
+# instruction value. but since we're exiting to the access error
+# handler, then AN must be returned to its pre-instruction value.
+# we do that here.
+restore:
+ mov.b EXC_OPWORD+0x1(%a6),%d1
+ andi.b &0x38,%d1 # extract opmode
+ cmpi.b %d1,&0x18 # postinc?
+ beq.w rest_inc
+ cmpi.b %d1,&0x20 # predec?
+ beq.w rest_dec
+ rts
+
+rest_inc:
+ mov.b EXC_OPWORD+0x1(%a6),%d1
+ andi.w &0x0007,%d1 # fetch An
+
+ mov.w (tbl_rest_inc.b,%pc,%d1.w*2),%d1
+ jmp (tbl_rest_inc.b,%pc,%d1.w*1)
+
+tbl_rest_inc:
+ short ri_a0 - tbl_rest_inc
+ short ri_a1 - tbl_rest_inc
+ short ri_a2 - tbl_rest_inc
+ short ri_a3 - tbl_rest_inc
+ short ri_a4 - tbl_rest_inc
+ short ri_a5 - tbl_rest_inc
+ short ri_a6 - tbl_rest_inc
+ short ri_a7 - tbl_rest_inc
+
+ri_a0:
+ sub.l %d0,EXC_DREGS+0x8(%a6) # fix stacked a0
+ rts
+ri_a1:
+ sub.l %d0,EXC_DREGS+0xc(%a6) # fix stacked a1
+ rts
+ri_a2:
+ sub.l %d0,%a2 # fix a2
+ rts
+ri_a3:
+ sub.l %d0,%a3 # fix a3
+ rts
+ri_a4:
+ sub.l %d0,%a4 # fix a4
+ rts
+ri_a5:
+ sub.l %d0,%a5 # fix a5
+ rts
+ri_a6:
+ sub.l %d0,(%a6) # fix stacked a6
+ rts
+# if it's a fmove out instruction, we don't have to fix a7
+# because we hadn't changed it yet. if it's an opclass two
+# instruction (data moved in) and the exception was in supervisor
+# mode, then also also wasn't updated. if it was user mode, then
+# restore the correct a7 which is in the USP currently.
+ri_a7:
+ cmpi.b EXC_VOFF(%a6),&0x30 # move in or out?
+ bne.b ri_a7_done # out
+
+ btst &0x5,EXC_SR(%a6) # user or supervisor?
+ bne.b ri_a7_done # supervisor
+ movc %usp,%a0 # restore USP
+ sub.l %d0,%a0
+ movc %a0,%usp
+ri_a7_done:
+ rts
+
+# need to invert adjustment value if the <ea> was predec
+rest_dec:
+ neg.l %d0
+ bra.b rest_inc