1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
|
/*
* arch/sh/kernel/cpu/sh5/entry.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 - 2008 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/sys.h>
#include <cpu/registers.h>
#include <asm/processor.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
/*
* SR fields.
*/
#define SR_ASID_MASK 0x00ff0000
#define SR_FD_MASK 0x00008000
#define SR_SS 0x08000000
#define SR_BL 0x10000000
#define SR_MD 0x40000000
/*
* Event code.
*/
#define EVENT_INTERRUPT 0
#define EVENT_FAULT_TLB 1
#define EVENT_FAULT_NOT_TLB 2
#define EVENT_DEBUG 3
/* EXPEVT values */
#define RESET_CAUSE 0x20
#define DEBUGSS_CAUSE 0x980
/*
* Frame layout. Quad index.
*/
#define FRAME_T(x) FRAME_TBASE+(x*8)
#define FRAME_R(x) FRAME_RBASE+(x*8)
#define FRAME_S(x) FRAME_SBASE+(x*8)
#define FSPC 0
#define FSSR 1
#define FSYSCALL_ID 2
/* Arrange the save frame to be a multiple of 32 bytes long */
#define FRAME_SBASE 0
#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
#define FP_FRAME_BASE 0
#define SAVED_R2 0*8
#define SAVED_R3 1*8
#define SAVED_R4 2*8
#define SAVED_R5 3*8
#define SAVED_R18 4*8
#define SAVED_R6 5*8
#define SAVED_TR0 6*8
/* These are the registers saved in the TLB path that aren't saved in the first
level of the normal one. */
#define TLB_SAVED_R25 7*8
#define TLB_SAVED_TR1 8*8
#define TLB_SAVED_TR2 9*8
#define TLB_SAVED_TR3 10*8
#define TLB_SAVED_TR4 11*8
/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
breakage otherwise. */
#define TLB_SAVED_R0 12*8
#define TLB_SAVED_R1 13*8
#define CLI() \
getcon SR, r6; \
ori r6, 0xf0, r6; \
putcon r6, SR;
#define STI() \
getcon SR, r6; \
andi r6, ~0xf0, r6; \
putcon r6, SR;
#ifdef CONFIG_PREEMPT
# define preempt_stop() CLI()
#else
# define preempt_stop()
# define resume_kernel restore_all
#endif
.section .data, "aw"
#define FAST_TLBMISS_STACK_CACHELINES 4
#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
/* Register back-up area for all exceptions */
.balign 32
/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
* register saves etc. */
.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
/* This is 32 byte aligned by construction */
/* Register back-up area for all exceptions */
reg_save_area:
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
* reentrancy. Note this area may be accessed via physical address.
* Align so this fits a whole single cache line, for ease of purging.
*/
.balign 32,0,32
resvec_save_area:
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.balign 32,0,32
/* Jump table of 3rd level handlers */
trap_jtable:
.long do_exception_error /* 0x000 */
.long do_exception_error /* 0x020 */
#ifdef CONFIG_MMU
.long tlb_miss_load /* 0x040 */
.long tlb_miss_store /* 0x060 */
#else
.long do_exception_error
.long do_exception_error
#endif
! ARTIFICIAL pseudo-EXPEVT setting
.long do_debug_interrupt /* 0x080 */
#ifdef CONFIG_MMU
.long tlb_miss_load /* 0x0A0 */
.long tlb_miss_store /* 0x0C0 */
#else
.long do_exception_error
.long do_exception_error
#endif
.long do_address_error_load /* 0x0E0 */
.long do_address_error_store /* 0x100 */
#ifdef CONFIG_SH_FPU
.long do_fpu_error /* 0x120 */
#else
.long do_exception_error /* 0x120 */
#endif
.long do_exception_error /* 0x140 */
.long system_call /* 0x160 */
.long do_reserved_inst /* 0x180 */
.long do_illegal_slot_inst /* 0x1A0 */
.long do_exception_error /* 0x1C0 - NMI */
.long do_exception_error /* 0x1E0 */
.rept 15
.long do_IRQ /* 0x200 - 0x3C0 */
.endr
.long do_exception_error /* 0x3E0 */
.rept 32
.long do_IRQ /* 0x400 - 0x7E0 */
.endr
.long fpu_error_or_IRQA /* 0x800 */
.long fpu_error_or_IRQB /* 0x820 */
.long do_IRQ /* 0x840 */
.long do_IRQ /* 0x860 */
.rept 6
.long do_exception_error /* 0x880 - 0x920 */
.endr
.long do_software_break_point /* 0x940 */
.long do_exception_error /* 0x960 */
.long do_single_step /* 0x980 */
.rept 3
.long do_exception_error /* 0x9A0 - 0x9E0 */
.endr
.long do_IRQ /* 0xA00 */
.long do_IRQ /* 0xA20 */
#ifdef CONFIG_MMU
.long itlb_miss_or_IRQ /* 0xA40 */
#else
.long do_IRQ
#endif
.long do_IRQ /* 0xA60 */
.long do_IRQ /* 0xA80 */
#ifdef CONFIG_MMU
.long itlb_miss_or_IRQ /* 0xAA0 */
#else
.long do_IRQ
#endif
.long do_exception_error /* 0xAC0 */
.long do_address_error_exec /* 0xAE0 */
.rept 8
.long do_exception_error /* 0xB00 - 0xBE0 */
.endr
.rept 18
.long do_IRQ /* 0xC00 - 0xE20 */
.endr
.section .text64, "ax"
/*
* --- Exception/Interrupt/Event Handling Section
*/
/*
* VBR and RESVEC blocks.
*
* First level handler for VBR-based exceptions.
*
* To avoid waste of space, align to the maximum text block size.
* This is assumed to be at most 128 bytes or 32 instructions.
* DO NOT EXCEED 32 instructions on the first level handlers !
*
* Also note that RESVEC is contained within the VBR block
* where the room left (1KB - TEXT_SIZE) allows placing
* the RESVEC block (at most 512B + TEXT_SIZE).
*
* So first (and only) level handler for RESVEC-based exceptions.
*
* Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
* and interrupt) we are a lot tight with register space until
* saving onto the stack frame, which is done in handle_exception().
*
*/
#define TEXT_SIZE 128
#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
.balign TEXT_SIZE
LVBR_block:
.space 256, 0 /* Power-on class handler, */
/* not required here */
not_a_tlb_miss:
synco /* TAKum03020 (but probably a good idea anyway.) */
/* Save original stack pointer into KCR1 */
putcon SP, KCR1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for Non-debug, Not a TLB miss class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_NOT_TLB, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign 256
! VBR+0x200
nop
.balign 256
! VBR+0x300
nop
.balign 256
/*
* Instead of the natural .balign 1024 place RESVEC here
* respecting the final 1KB alignment.
*/
.balign TEXT_SIZE
/*
* Instead of '.space 1024-TEXT_SIZE' place the RESVEC
* block making sure the final alignment is correct.
*/
#ifdef CONFIG_MMU
tlb_miss:
synco /* TAKum03020 (but probably a good idea anyway.) */
putcon SP, KCR1
movi reg_save_area, SP
/* SP is guaranteed 32-byte aligned. */
st.q SP, TLB_SAVED_R0 , r0
st.q SP, TLB_SAVED_R1 , r1
st.q SP, SAVED_R2 , r2
st.q SP, SAVED_R3 , r3
st.q SP, SAVED_R4 , r4
st.q SP, SAVED_R5 , r5
st.q SP, SAVED_R6 , r6
st.q SP, SAVED_R18, r18
/* Save R25 for safety; as/ld may want to use it to achieve the call to
* the code in mm/tlbmiss.c */
st.q SP, TLB_SAVED_R25, r25
gettr tr0, r2
gettr tr1, r3
gettr tr2, r4
gettr tr3, r5
gettr tr4, r18
st.q SP, SAVED_TR0 , r2
st.q SP, TLB_SAVED_TR1 , r3
st.q SP, TLB_SAVED_TR2 , r4
st.q SP, TLB_SAVED_TR3 , r5
st.q SP, TLB_SAVED_TR4 , r18
pt do_fast_page_fault, tr0
getcon SSR, r2
getcon EXPEVT, r3
getcon TEA, r4
shlri r2, 30, r2
andi r2, 1, r2 /* r2 = SSR.MD */
blink tr0, LINK
pt fixup_to_invoke_general_handler, tr1
/* If the fast path handler fixed the fault, just drop through quickly
to the restore code right away to return to the excepting context.
*/
beqi/u r2, 0, tr1
fast_tlb_miss_restore:
ld.q SP, SAVED_TR0, r2
ld.q SP, TLB_SAVED_TR1, r3
ld.q SP, TLB_SAVED_TR2, r4
ld.q SP, TLB_SAVED_TR3, r5
ld.q SP, TLB_SAVED_TR4, r18
ptabs r2, tr0
ptabs r3, tr1
ptabs r4, tr2
ptabs r5, tr3
ptabs r18, tr4
ld.q SP, TLB_SAVED_R0, r0
ld.q SP, TLB_SAVED_R1, r1
ld.q SP, SAVED_R2, r2
ld.q SP, SAVED_R3, r3
ld.q SP, SAVED_R4, r4
ld.q SP, SAVED_R5, r5
ld.q SP, SAVED_R6, r6
ld.q SP, SAVED_R18, r18
ld.q SP, TLB_SAVED_R25, r25
getcon KCR1, SP
rte
nop /* for safety, in case the code is run on sh5-101 cut1.x */
fixup_to_invoke_general_handler:
/* OK, new method. Restore stuff that's not expected to get saved into
the 'first-level' reg save area, then just fall through to setting
up the registers and calling the second-level handler. */
/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
r25,tr1-4 and save r6 to get into the right state. */
ld.q SP, TLB_SAVED_TR1, r3
ld.q SP, TLB_SAVED_TR2, r4
ld.q SP, TLB_SAVED_TR3, r5
ld.q SP, TLB_SAVED_TR4, r18
ld.q SP, TLB_SAVED_R25, r25
ld.q SP, TLB_SAVED_R0, r0
ld.q SP, TLB_SAVED_R1, r1
ptabs/u r3, tr1
ptabs/u r4, tr2
ptabs/u r5, tr3
ptabs/u r18, tr4
/* Set args for Non-debug, TLB miss class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_TLB, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
#else /* CONFIG_MMU */
.balign 256
#endif
/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
DOES END UP AT VBR+0x600 */
nop
nop
nop
nop
nop
nop
.balign 256
/* VBR + 0x600 */
interrupt:
synco /* TAKum03020 (but probably a good idea anyway.) */
/* Save original stack pointer into KCR1 */
putcon SP, KCR1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for interrupt class handler */
getcon INTEVT, r2
movi ret_from_irq, r3
ori r3, 1, r3
movi EVENT_INTERRUPT, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign TEXT_SIZE /* let's waste the bare minimum */
LVBR_block_end: /* Marker. Used for total checking */
.balign 256
LRESVEC_block:
/* Panic handler. Called with MMU off. Possible causes/actions:
* - Reset: Jump to program start.
* - Single Step: Turn off Single Step & return.
* - Others: Call panic handler, passing PC as arg.
* (this may need to be extended...)
*/
reset_or_panic:
synco /* TAKum03020 (but probably a good idea anyway.) */
putcon SP, DCR
/* First save r0-1 and tr0, as we need to use these */
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
st.q SP, 0, r0
st.q SP, 8, r1
gettr tr0, r0
st.q SP, 32, r0
/* Check cause */
getcon EXPEVT, r0
movi RESET_CAUSE, r1
sub r1, r0, r1 /* r1=0 if reset */
movi _stext-CONFIG_PAGE_OFFSET, r0
ori r0, 1, r0
ptabs r0, tr0
beqi r1, 0, tr0 /* Jump to start address if reset */
getcon EXPEVT, r0
movi DEBUGSS_CAUSE, r1
sub r1, r0, r1 /* r1=0 if single step */
pta single_step_panic, tr0
beqi r1, 0, tr0 /* jump if single step */
/* Now jump to where we save the registers. */
movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
ptabs r1, tr0
blink tr0, r63
single_step_panic:
/* We are in a handler with Single Step set. We need to resume the
* handler, by turning on MMU & turning off Single Step. */
getcon SSR, r0
movi SR_MMU, r1
or r0, r1, r0
movi ~SR_SS, r1
and r0, r1, r0
putcon r0, SSR
/* Restore EXPEVT, as the rte won't do this */
getcon PEXPEVT, r0
putcon r0, EXPEVT
/* Restore regs */
ld.q SP, 32, r0
ptabs r0, tr0
ld.q SP, 0, r0
ld.q SP, 8, r1
getcon DCR, SP
synco
rte
.balign 256
debug_exception:
synco /* TAKum03020 (but probably a good idea anyway.) */
/*
* Single step/software_break_point first level handler.
* Called with MMU off, so the first thing we do is enable it
* by doing an rte with appropriate SSR.
*/
putcon SP, DCR
/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
/* With the MMU off, we are bypassing the cache, so purge any
* data that will be made stale by the following stores.
*/
ocbp SP, 0
synco
st.q SP, 0, r0
st.q SP, 8, r1
getcon SPC, r0
st.q SP, 16, r0
getcon SSR, r0
st.q SP, 24, r0
/* Enable MMU, block exceptions, set priv mode, disable single step */
movi SR_MMU | SR_BL | SR_MD, r1
or r0, r1, r0
movi ~SR_SS, r1
and r0, r1, r0
putcon r0, SSR
/* Force control to debug_exception_2 when rte is executed */
movi debug_exeception_2, r0
ori r0, 1, r0 /* force SHmedia, just in case */
putcon r0, SPC
getcon DCR, SP
synco
rte
debug_exeception_2:
/* Restore saved regs */
putcon SP, KCR1
movi resvec_save_area, SP
ld.q SP, 24, r0
putcon r0, SSR
ld.q SP, 16, r0
putcon r0, SPC
ld.q SP, 0, r0
ld.q SP, 8, r1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for debug class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_DEBUG, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign 256
debug_interrupt:
/* !!! WE COME HERE IN REAL MODE !!! */
/* Hook-up debug interrupt to allow various debugging options to be
* hooked into its handler. */
/* Save original stack pointer into KCR1 */
synco
putcon SP, KCR1
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
ocbp SP, 0
ocbp SP, 32
synco
/* Save other original registers into reg_save_area thru real addresses */
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* move (spc,ssr)->(pspc,pssr). The rte will shift
them back again, so that they look like the originals
as far as the real handler code is concerned. */
getcon spc, r6
putcon r6, pspc
getcon ssr, r6
putcon r6, pssr
! construct useful SR for handle_exception
movi 3, r6
shlli r6, 30, r6
getcon sr, r18
or r18, r6, r6
putcon r6, ssr
! SSR is now the current SR with the MD and MMU bits set
! i.e. the rte will switch back to priv mode and put
! the mmu back on
! construct spc
movi handle_exception, r18
ori r18, 1, r18 ! for safety (do we need this?)
putcon r18, spc
/* Set args for Non-debug, Not a TLB miss class handler */
! EXPEVT==0x80 is unused, so 'steal' this value to put the
! debug interrupt handler in the vectoring table
movi 0x80, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_NOT_TLB, r4
or SP, ZERO, r5
movi CONFIG_PAGE_OFFSET, r6
add r6, r5, r5
getcon KCR1, SP
synco ! for safety
rte ! -> handle_exception, switch back to priv mode again
LRESVEC_block_end: /* Marker. Unused. */
.balign TEXT_SIZE
/*
* Second level handler for VBR-based exceptions. Pre-handler.
* In common to all stack-frame sensitive handlers.
*
* Inputs:
* (KCR0) Current [current task union]
* (KCR1) Original SP
* (r2) INTEVT/EXPEVT
* (r3) appropriate return address
* (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
* (r5) Pointer to reg_save_area
* (SP) Original SP
*
* Available registers:
* (r6)
* (r18)
* (tr0)
*
*/
handle_exception:
/* Common 2nd level handler. */
/* First thing we need an appropriate stack pointer */
getcon SSR, r6
shlri r6, 30, r6
andi r6, 1, r6
pta stack_ok, tr0
bne r6, ZERO, tr0 /* Original stack pointer is fine */
/* Set stack pointer for user fault */
getcon KCR0, SP
movi THREAD_SIZE, r6 /* Point to the end */
add SP, r6, SP
stack_ok:
/* DEBUG : check for underflow/overflow of the kernel stack */
pta no_underflow, tr0
getcon KCR0, r6
movi 1024, r18
add r6, r18, r6
bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
/* Just panic to cause a crash. */
bad_sp:
ld.b r63, 0, r6
nop
no_underflow:
pta bad_sp, tr0
getcon kcr0, r6
movi THREAD_SIZE, r18
add r18, r6, r6
bgt SP, r6, tr0 ! sp above the stack
/* Make some room for the BASIC frame. */
movi -(FRAME_SIZE), r6
add SP, r6, SP
/* Could do this with no stalling if we had another spare register, but the
code below will be OK. */
ld.q r5, SAVED_R2, r6
ld.q r5, SAVED_R3, r18
st.q SP, FRAME_R(2), r6
ld.q r5, SAVED_R4, r6
st.q SP, FRAME_R(3), r18
ld.q r5, SAVED_R5, r18
st.q SP, FRAME_R(4), r6
ld.q r5, SAVED_R6, r6
st.q SP, FRAME_R(5), r18
ld.q r5, SAVED_R18, r18
st.q SP, FRAME_R(6), r6
ld.q r5, SAVED_TR0, r6
st.q SP, FRAME_R(18), r18
st.q SP, FRAME_T(0), r6
/* Keep old SP around */
getcon KCR1, r6
/* Save the rest of the general purpose registers */
st.q SP, FRAME_R(0), r0
st.q SP, FRAME_R(1), r1
st.q SP, FRAME_R(7), r7
st.q SP, FRAME_R(8), r8
st.q SP, FRAME_R(9), r9
st.q SP, FRAME_R(10), r10
st.q SP, FRAME_R(11), r11
st.q SP, FRAME_R(12), r12
st.q SP, FRAME_R(13), r13
st.q SP, FRAME_R(14), r14
/* SP is somewhere else */
st.q SP, FRAME_R(15), r6
st.q SP, FRAME_R(16), r16
st.q SP, FRAME_R(17), r17
/* r18 is saved earlier. */
st.q SP, FRAME_R(19), r19
st.q SP, FRAME_R(20), r20
st.q SP, FRAME_R(21), r21
st.q SP, FRAME_R(22), r22
st.q SP, FRAME_R(23), r23
st.q SP, FRAME_R(24), r24
st.q SP, FRAME_R(25), r25
st.q SP, FRAME_R(26), r26
st.q SP, FRAME_R(27), r27
st.q SP, FRAME_R(28), r28
st.q SP, FRAME_R(29), r29
st.q SP, FRAME_R(30), r30
st.q SP, FRAME_R(31), r31
st.q SP, FRAME_R(32), r32
st.q SP, FRAME_R(33), r33
st.q SP, FRAME_R(34), r34
st.q SP, FRAME_R(35), r35
st.q SP, FRAME_R(36), r36
st.q SP, FRAME_R(37), r37
st.q SP, FRAME_R(38), r38
st.q SP, FRAME_R(39), r39
st.q SP, FRAME_R(40), r40
st.q SP, FRAME_R(41), r41
st.q SP, FRAME_R(42), r42
st.q SP, FRAME_R(43), r43
st.q SP, FRAME_R(44), r44
st.q SP, FRAME_R(45), r45
st.q SP, FRAME_R(46), r46
st.q SP, FRAME_R(47), r47
st.q SP, FRAME_R(48), r48
st.q SP, FRAME_R(49), r49
st.q SP, FRAME_R(50), r50
st.q SP, FRAME_R(51), r51
st.q SP, FRAME_R(52), r52
st.q SP, FRAME_R(53), r53
st.q SP, FRAME_R(54), r54
st.q SP, FRAME_R(55), r55
st.q SP, FRAME_R(56), r56
st.q SP, FRAME_R(57), r57
st.q SP, FRAME_R(58), r58
st.q SP, FRAME_R(59), r59
st.q SP, FRAME_R(60), r60
st.q SP, FRAME_R(61), r61
st.q SP, FRAME_R(62), r62
/*
* Save the S* registers.
*/
getcon SSR, r61
st.q SP, FRAME_S(FSSR), r61
getcon SPC, r62
st.q SP, FRAME_S(FSPC), r62
movi -1, r62 /* Reset syscall_nr */
st.q SP, FRAME_S(FSYSCALL_ID), r62
/* Save the rest of the target registers */
gettr tr1, r6
st.q SP, FRAME_T(1), r6
gettr tr2, r6
st.q SP, FRAME_T(2), r6
gettr tr3, r6
st.q SP, FRAME_T(3), r6
gettr tr4, r6
st.q SP, FRAME_T(4), r6
gettr tr5, r6
st.q SP, FRAME_T(5), r6
gettr tr6, r6
st.q SP, FRAME_T(6), r6
gettr tr7, r6
st.q SP, FRAME_T(7), r6
! setup FP so that unwinder can wind back through nested kernel mode
! exceptions
add SP, ZERO, r14
#ifdef CONFIG_POOR_MANS_STRACE
/* We've pushed all the registers now, so only r2-r4 hold anything
* useful. Move them into callee save registers */
or r2, ZERO, r28
or r3, ZERO, r29
or r4, ZERO, r30
/* Preserve r2 as the event code */
movi evt_debug, r3
ori r3, 1, r3
ptabs r3, tr0
or SP, ZERO, r6
getcon TRA, r5
blink tr0, LINK
or r28, ZERO, r2
or r29, ZERO, r3
or r30, ZERO, r4
#endif
/* For syscall and debug race condition, get TRA now */
getcon TRA, r5
/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
* Also set FD, to catch FPU usage in the kernel.
*
* benedict.gaster@superh.com 29/07/2002
*
* On all SH5-101 revisions it is unsafe to raise the IMASK and at the
* same time change BL from 1->0, as any pending interrupt of a level
* higher than he previous value of IMASK will leak through and be
* taken unexpectedly.
*
* To avoid this we raise the IMASK and then issue another PUTCON to
* enable interrupts.
*/
getcon SR, r6
movi SR_IMASK | SR_FD, r7
or r6, r7, r6
putcon r6, SR
movi SR_UNBLOCK_EXC, r7
and r6, r7, r6
putcon r6, SR
/* Now call the appropriate 3rd level handler */
or r3, ZERO, LINK
movi trap_jtable, r3
shlri r2, 3, r2
ldx.l r2, r3, r3
shlri r2, 2, r2
ptabs r3, tr0
or SP, ZERO, r3
blink tr0, ZERO
/*
* Second level handler for VBR-based exceptions. Post-handlers.
*
* Post-handlers for interrupts (ret_from_irq), exceptions
* (ret_from_exception) and common reentrance doors (restore_all
* to get back to the original context, ret_from_syscall loop to
* check kernel exiting).
*
* ret_with_reschedule and work_notifysig are an inner lables of
* the ret_from_syscall loop.
*
* In common to all stack-frame sensitive handlers.
*
* Inputs:
* (SP) struct pt_regs *, original register's frame pointer (basic)
*
*/
.global ret_from_irq
ret_from_irq:
#ifdef CONFIG_POOR_MANS_STRACE
pta evt_debug_ret_from_irq, tr0
ori SP, 0, r2
blink tr0, LINK
#endif
ld.q SP, FRAME_S(FSSR), r6
shlri r6, 30, r6
andi r6, 1, r6
pta resume_kernel, tr0
bne r6, ZERO, tr0 /* no further checks */
STI()
pta ret_with_reschedule, tr0
blink tr0, ZERO /* Do not check softirqs */
.global ret_from_exception
ret_from_exception:
preempt_stop()
#ifdef CONFIG_POOR_MANS_STRACE
pta evt_debug_ret_from_exc, tr0
ori SP, 0, r2
blink tr0, LINK
#endif
ld.q SP, FRAME_S(FSSR), r6
shlri r6, 30, r6
andi r6, 1, r6
pta resume_kernel, tr0
bne r6, ZERO, tr0 /* no further checks */
/* Check softirqs */
#ifdef CONFIG_PREEMPT
pta ret_from_syscall, tr0
blink tr0, ZERO
resume_kernel:
CLI()
pta restore_all, tr0
getcon KCR0, r6
ld.l r6, TI_PRE_COUNT, r7
beq/u r7, ZERO, tr0
need_resched:
ld.l r6, TI_FLAGS, r7
movi (1 << TIF_NEED_RESCHED), r8
and r8, r7, r8
bne r8, ZERO, tr0
getcon SR, r7
andi r7, 0xf0, r7
bne r7, ZERO, tr0
movi preempt_schedule_irq, r7
ori r7, 1, r7
ptabs r7, tr1
blink tr1, LINK
pta need_resched, tr1
blink tr1, ZERO
#endif
.global ret_from_syscall
ret_from_syscall:
ret_with_reschedule:
getcon KCR0, r6 ! r6 contains current_thread_info
ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
movi _TIF_NEED_RESCHED, r8
and r8, r7, r8
pta work_resched, tr0
bne r8, ZERO, tr0
pta restore_all, tr1
movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
and r8, r7, r8
pta work_notifysig, tr0
bne r8, ZERO, tr0
blink tr1, ZERO
work_resched:
pta ret_from_syscall, tr0
gettr tr0, LINK
movi schedule, r6
ptabs r6, tr0
blink tr0, ZERO /* Call schedule(), return on top */
work_notifysig:
gettr tr1, LINK
movi do_notify_resume, r6
ptabs r6, tr0
or SP, ZERO, r2
or r7, ZERO, r3
blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
restore_all:
/* Do prefetches */
ld.q SP, FRAME_T(0), r6
ld.q SP, FRAME_T(1), r7
ld.q SP, FRAME_T(2), r8
ld.q SP, FRAME_T(3), r9
ptabs r6, tr0
ptabs r7, tr1
ptabs r8, tr2
ptabs r9, tr3
ld.q SP, FRAME_T(4), r6
ld.q SP, FRAME_T(5), r7
ld.q SP, FRAME_T(6), r8
ld.q SP, FRAME_T(7), r9
ptabs r6, tr4
ptabs r7, tr5
ptabs r8, tr6
ptabs r9, tr7
ld.q SP, FRAME_R(0), r0
ld.q SP, FRAME_R(1), r1
ld.q SP, FRAME_R(2), r2
ld.q SP, FRAME_R(3), r3
ld.q SP, FRAME_R(4), r4
ld.q SP, FRAME_R(5), r5
ld.q SP, FRAME_R(6), r6
ld.q SP, FRAME_R(7), r7
ld.q SP, FRAME_R(8), r8
ld.q SP, FRAME_R(9), r9
ld.q SP, FRAME_R(10), r10
ld.q SP, FRAME_R(11), r11
ld.q SP, FRAME_R(12), r12
ld.q SP, FRAME_R(13), r13
ld.q SP, FRAME_R(14), r14
ld.q SP, FRAME_R(16), r16
ld.q SP, FRAME_R(17), r17
ld.q SP, FRAME_R(18), r18
ld.q SP, FRAME_R(19), r19
ld.q SP, FRAME_R(20), r20
ld.q SP, FRAME_R(21), r21
ld.q SP, FRAME_R(22), r22
ld.q SP, FRAME_R(23), r23
ld.q SP, FRAME_R(24), r24
ld.q SP, FRAME_R(25), r25
ld.q SP, FRAME_R(26), r26
ld.q SP, FRAME_R(27), r27
ld.q SP, FRAME_R(28), r28
ld.q SP, FRAME_R(29), r29
ld.q SP, FRAME_R(30), r30
ld.q SP, FRAME_R(31), r31
ld.q SP, FRAME_R(32), r32
ld.q SP, FRAME_R(33), r33
ld.q SP, FRAME_R(34), r34
ld.q SP, FRAME_R(35), r35
ld.q SP, FRAME_R(36), r36
ld.q SP, FRAME_R(37), r37
ld.q SP, FRAME_R(38), r38
ld.q SP, FRAME_R(39), r39
ld.q SP, FRAME_R(40), r40
ld.q SP, FRAME_R(41), r41
ld.q SP, FRAME_R(42), r42
ld.q SP, FRAME_R(43), r43
ld.q SP, FRAME_R(44), r44
ld.q SP, FRAME_R(45), r45
ld.q SP, FRAME_R(46), r46
ld.q SP, FRAME_R(47), r47
ld.q SP, FRAME_R(48), r48
ld.q SP, FRAME_R(49), r49
ld.q SP, FRAME_R(50), r50
ld.q SP, FRAME_R(51), r51
ld.q SP, FRAME_R(52), r52
ld.q SP, FRAME_R(53), r53
ld.q SP, FRAME_R(54), r54
ld.q SP, FRAME_R(55), r55
ld.q SP, FRAME_R(56), r56
ld.q SP, FRAME_R(57), r57
ld.q SP, FRAME_R(58), r58
getcon SR, r59
movi SR_BLOCK_EXC, r60
or r59, r60, r59
putcon r59, SR /* SR.BL = 1, keep nesting out */
ld.q SP, FRAME_S(FSSR), r61
ld.q SP, FRAME_S(FSPC), r62
movi SR_ASID_MASK, r60
and r59, r60, r59
andc r61, r60, r61 /* Clear out older ASID */
or r59, r61, r61 /* Retain current ASID */
putcon r61, SSR
putcon r62, SPC
/* Ignore FSYSCALL_ID */
ld.q SP, FRAME_R(59), r59
ld.q SP, FRAME_R(60), r60
ld.q SP, FRAME_R(61), r61
ld.q SP, FRAME_R(62), r62
/* Last touch */
ld.q SP, FRAME_R(15), SP
rte
nop
/*
* Third level handlers for VBR-based exceptions. Adapting args to
* and/or deflecting to fourth level handlers.
*
* Fourth level handlers interface.
* Most are C-coded handlers directly pointed by the trap_jtable.
* (Third = Fourth level)
* Inputs:
* (r2) fault/interrupt code, entry number (e.g. NMI = 14,
* IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
* (r3) struct pt_regs *, original register's frame pointer
* (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
* (r5) TRA control register (for syscall/debug benefit only)
* (LINK) return address
* (SP) = r3
*
* Kernel TLB fault handlers will get a slightly different interface.
* (r2) struct pt_regs *, original register's frame pointer
* (r3) writeaccess, whether it's a store fault as opposed to load fault
* (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
* (r5) Effective Address of fault
* (LINK) return address
* (SP) = r2
*
* fpu_error_or_IRQ? is a helper to deflect to the right cause.
*
*/
#ifdef CONFIG_MMU
tlb_miss_load:
or SP, ZERO, r2
or ZERO, ZERO, r3 /* Read */
or ZERO, ZERO, r4 /* Data */
getcon TEA, r5
pta call_do_page_fault, tr0
beq ZERO, ZERO, tr0
tlb_miss_store:
or SP, ZERO, r2
movi 1, r3 /* Write */
or ZERO, ZERO, r4 /* Data */
getcon TEA, r5
pta call_do_page_fault, tr0
beq ZERO, ZERO, tr0
itlb_miss_or_IRQ:
pta its_IRQ, tr0
beqi/u r4, EVENT_INTERRUPT, tr0
or SP, ZERO, r2
or ZERO, ZERO, r3 /* Read */
movi 1, r4 /* Text */
getcon TEA, r5
/* Fall through */
call_do_page_fault:
movi do_page_fault, r6
ptabs r6, tr0
blink tr0, ZERO
#endif /* CONFIG_MMU */
fpu_error_or_IRQA:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
movi do_fpu_state_restore, r6
#else
movi do_exception_error, r6
#endif
ptabs r6, tr0
blink tr0, ZERO
fpu_error_or_IRQB:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
movi do_fpu_state_restore, r6
#else
movi do_exception_error, r6
#endif
ptabs r6, tr0
blink tr0, ZERO
its_IRQ:
movi do_IRQ, r6
ptabs r6, tr0
blink tr0, ZERO
/*
* system_call/unknown_trap third level handler:
*
* Inputs:
* (r2) fault/interrupt code, entry number (TRAP = 11)
* (r3) struct pt_regs *, original register's frame pointer
* (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
* (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
* (SP) = r3
* (LINK) return address: ret_from_exception
* (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
*
* Outputs:
* (*r3) Syscall reply (Saved r2)
* (LINK) In case of syscall only it can be scrapped.
* Common second level post handler will be ret_from_syscall.
* Common (non-trace) exit point to that is syscall_ret (saving
* result to r2). Common bad exit point is syscall_bad (returning
* ENOSYS then saved to r2).
*
*/
unknown_trap:
/* Unknown Trap or User Trace */
movi do_unknown_trapa, r6
ptabs r6, tr0
ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
andi r2, 0x1ff, r2 /* r2 = syscall # */
blink tr0, LINK
pta syscall_ret, tr0
blink tr0, ZERO
/* New syscall implementation*/
system_call:
pta unknown_trap, tr0
or r5, ZERO, r4 /* TRA (=r5) -> r4 */
shlri r4, 20, r4
bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
/* It's a system call */
st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
andi r5, 0x1ff, r5 /* syscall # -> r5 */
STI()
pta syscall_allowed, tr0
movi NR_syscalls - 1, r4 /* Last valid */
bgeu/l r4, r5, tr0
syscall_bad:
/* Return ENOSYS ! */
movi -(ENOSYS), r2 /* Fall-through */
.global syscall_ret
syscall_ret:
st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
#ifdef CONFIG_POOR_MANS_STRACE
/* nothing useful in registers at this point */
movi evt_debug2, r5
ori r5, 1, r5
ptabs r5, tr0
ld.q SP, FRAME_R(9), r2
or SP, ZERO, r3
blink tr0, LINK
#endif
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO
/* A different return path for ret_from_fork, because we now need
* to call schedule_tail with the later kernels. Because prev is
* loaded into r2 by switch_to() means we can just call it straight away
*/
.global ret_from_fork
ret_from_fork:
movi schedule_tail,r5
ori r5, 1, r5
ptabs r5, tr0
blink tr0, LINK
#ifdef CONFIG_POOR_MANS_STRACE
/* nothing useful in registers at this point */
movi evt_debug2, r5
ori r5, 1, r5
ptabs r5, tr0
ld.q SP, FRAME_R(9), r2
or SP, ZERO, r3
blink tr0, LINK
#endif
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO
syscall_allowed:
/* Use LINK to deflect the exit point, default is syscall_ret */
pta syscall_ret, tr0
gettr tr0, LINK
pta syscall_notrace, tr0
getcon KCR0, r2
ld.l r2, TI_FLAGS, r4
movi _TIF_WORK_SYSCALL_MASK, r6
and r6, r4, r6
beq/l r6, ZERO, tr0
/* Trace it by calling syscall_trace before and after */
movi do_syscall_trace_enter, r4
or SP, ZERO, r2
ptabs r4, tr0
blink tr0, LINK
/* Save the retval */
st.q SP, FRAME_R(2), r2
/* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
ld.q SP, FRAME_S(FSYSCALL_ID), r5
andi r5, 0x1ff, r5
pta syscall_ret_trace, tr0
gettr tr0, LINK
syscall_notrace:
/* Now point to the appropriate 4th level syscall handler */
movi sys_call_table, r4
shlli r5, 2, r5
ldx.l r4, r5, r5
ptabs r5, tr0
/* Prepare original args */
ld.q SP, FRAME_R(2), r2
ld.q SP, FRAME_R(3), r3
ld.q SP, FRAME_R(4), r4
ld.q SP, FRAME_R(5), r5
ld.q SP, FRAME_R(6), r6
ld.q SP, FRAME_R(7), r7
/* And now the trick for those syscalls requiring regs * ! */
or SP, ZERO, r8
/* Call it */
blink tr0, ZERO /* LINK is already properly set */
syscall_ret_trace:
/* We get back here only if under trace */
st.q SP, FRAME_R(9), r2 /* Save return value */
movi do_syscall_trace_leave, LINK
or SP, ZERO, r2
ptabs LINK, tr0
blink tr0, LINK
/* This needs to be done after any syscall tracing */
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO /* Resume normal return sequence */
/*
* --- Switch to running under a particular ASID and return the previous ASID value
* --- The caller is assumed to have done a cli before calling this.
*
* Input r2 : new ASID
* Output r2 : old ASID
*/
.global switch_and_save_asid
switch_and_save_asid:
getcon sr, r0
movi 255, r4
shlli r4, 16, r4 /* r4 = mask to select ASID */
and r0, r4, r3 /* r3 = shifted old ASID */
andi r2, 255, r2 /* mask down new ASID */
shlli r2, 16, r2 /* align new ASID against SR.ASID */
andc r0, r4, r0 /* efface old ASID from SR */
or r0, r2, r0 /* insert the new ASID */
putcon r0, ssr
movi 1f, r0
putcon r0, spc
rte
nop
1:
ptabs LINK, tr0
shlri r3, 16, r2 /* r2 = old ASID */
blink tr0, r63
.global route_to_panic_handler
route_to_panic_handler:
/* Switch to real mode, goto panic_handler, don't return. Useful for
last-chance debugging, e.g. if no output wants to go to the console.
*/
movi panic_handler - CONFIG_PAGE_OFFSET, r1
ptabs r1, tr0
pta 1f, tr1
gettr tr1, r0
putcon r0, spc
getcon sr, r0
movi 1, r1
shlli r1, 31, r1
andc r0, r1, r0
putcon r0, ssr
rte
nop
1: /* Now in real mode */
blink tr0, r63
nop
.global peek_real_address_q
peek_real_address_q:
/* Two args:
r2 : real mode address to peek
r2(out) : result quadword
This is provided as a cheapskate way of manipulating device
registers for debugging (to avoid the need to onchip_remap the debug
module, and to avoid the need to onchip_remap the watchpoint
controller in a way that identity maps sufficient bits to avoid the
SH5-101 cut2 silicon defect).
This code is not performance critical
*/
add.l r2, r63, r2 /* sign extend address */
getcon sr, r0 /* r0 = saved original SR */
movi 1, r1
shlli r1, 28, r1
or r0, r1, r1 /* r0 with block bit set */
putcon r1, sr /* now in critical section */
movi 1, r36
shlli r36, 31, r36
andc r1, r36, r1 /* turn sr.mmu off in real mode section */
putcon r1, ssr
movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
movi 1f, r37 /* virtual mode return addr */
putcon r36, spc
synco
rte
nop
.peek0: /* come here in real mode, don't touch caches!!
still in critical section (sr.bl==1) */
putcon r0, ssr
putcon r37, spc
/* Here's the actual peek. If the address is bad, all bets are now off
* what will happen (handlers invoked in real-mode = bad news) */
ld.q r2, 0, r2
synco
rte /* Back to virtual mode */
nop
1:
ptabs LINK, tr0
blink tr0, r63
.global poke_real_address_q
poke_real_address_q:
/* Two args:
r2 : real mode address to poke
r3 : quadword value to write.
This is provided as a cheapskate way of manipulating device
registers for debugging (to avoid the need to onchip_remap the debug
module, and to avoid the need to onchip_remap the watchpoint
controller in a way that identity maps sufficient bits to avoid the
SH5-101 cut2 silicon defect).
This code is not performance critical
*/
add.l r2, r63, r2 /* sign extend address */
getcon sr, r0 /* r0 = saved original SR */
movi 1, r1
shlli r1, 28, r1
or r0, r1, r1 /* r0 with block bit set */
putcon r1, sr /* now in critical section */
movi 1, r36
shlli r36, 31, r36
andc r1, r36, r1 /* turn sr.mmu off in real mode section */
putcon r1, ssr
movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
movi 1f, r37 /* virtual mode return addr */
putcon r36, spc
synco
rte
nop
.poke0: /* come here in real mode, don't touch caches!!
still in critical section (sr.bl==1) */
putcon r0, ssr
putcon r37, spc
/* Here's the actual poke. If the address is bad, all bets are now off
* what will happen (handlers invoked in real-mode = bad news) */
st.q r2, 0, r3
synco
rte /* Back to virtual mode */
nop
1:
ptabs LINK, tr0
blink tr0, r63
#ifdef CONFIG_MMU
/*
* --- User Access Handling Section
*/
/*
* User Access support. It all moved to non inlined Assembler
* functions in here.
*
* __kernel_size_t __copy_user(void *__to, const void *__from,
* __kernel_size_t __n)
*
* Inputs:
* (r2) target address
* (r3) source address
* (r4) size in bytes
*
* Ouputs:
* (*r2) target data
* (r2) non-copied bytes
*
* If a fault occurs on the user pointer, bail out early and return the
* number of bytes not copied in r2.
* Strategy : for large blocks, call a real memcpy function which can
* move >1 byte at a time using unaligned ld/st instructions, and can
* manipulate the cache using prefetch + alloco to improve the speed
* further. If a fault occurs in that function, just revert to the
* byte-by-byte approach used for small blocks; this is rare so the
* performance hit for that case does not matter.
*
* For small blocks it's not worth the overhead of setting up and calling
* the memcpy routine; do the copy a byte at a time.
*
*/
.global __copy_user
__copy_user:
pta __copy_user_byte_by_byte, tr1
movi 16, r0 ! this value is a best guess, should tune it by benchmarking
bge/u r0, r4, tr1
pta copy_user_memcpy, tr0
addi SP, -32, SP
/* Save arguments in case we have to fix-up unhandled page fault */
st.q SP, 0, r2
st.q SP, 8, r3
st.q SP, 16, r4
st.q SP, 24, r35 ! r35 is callee-save
/* Save LINK in a register to reduce RTS time later (otherwise
ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
ori LINK, 0, r35
blink tr0, LINK
/* Copy completed normally if we get back here */
ptabs r35, tr0
ld.q SP, 24, r35
/* don't restore r2-r4, pointless */
/* set result=r2 to zero as the copy must have succeeded. */
or r63, r63, r2
addi SP, 32, SP
blink tr0, r63 ! RTS
.global __copy_user_fixup
__copy_user_fixup:
/* Restore stack frame */
ori r35, 0, LINK
ld.q SP, 24, r35
ld.q SP, 16, r4
ld.q SP, 8, r3
ld.q SP, 0, r2
addi SP, 32, SP
/* Fall through to original code, in the 'same' state we entered with */
/* The slow byte-by-byte method is used if the fast copy traps due to a bad
user address. In that rare case, the speed drop can be tolerated. */
__copy_user_byte_by_byte:
pta ___copy_user_exit, tr1
pta ___copy_user1, tr0
beq/u r4, r63, tr1 /* early exit for zero length copy */
sub r2, r3, r0
addi r0, -1, r0
___copy_user1:
ld.b r3, 0, r5 /* Fault address 1 */
/* Could rewrite this to use just 1 add, but the second comes 'free'
due to load latency */
addi r3, 1, r3
addi r4, -1, r4 /* No real fixup required */
___copy_user2:
stx.b r3, r0, r5 /* Fault address 2 */
bne r4, ZERO, tr0
___copy_user_exit:
or r4, ZERO, r2
ptabs LINK, tr0
blink tr0, ZERO
/*
* __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
*
* Inputs:
* (r2) target address
* (r3) size in bytes
*
* Ouputs:
* (*r2) zero-ed target data
* (r2) non-zero-ed bytes
*/
.global __clear_user
__clear_user:
pta ___clear_user_exit, tr1
pta ___clear_user1, tr0
beq/u r3, r63, tr1
___clear_user1:
st.b r2, 0, ZERO /* Fault address */
addi r2, 1, r2
addi r3, -1, r3 /* No real fixup required */
bne r3, ZERO, tr0
___clear_user_exit:
or r3, ZERO, r2
ptabs LINK, tr0
blink tr0, ZERO
#endif /* CONFIG_MMU */
/*
* int __strncpy_from_user(unsigned long __dest, unsigned long __src,
* int __count)
*
* Inputs:
* (r2) target address
* (r3) source address
* (r4) maximum size in bytes
*
* Ouputs:
* (*r2) copied data
* (r2) -EFAULT (in case of faulting)
* copied data (otherwise)
*/
.global __strncpy_from_user
__strncpy_from_user:
pta ___strncpy_from_user1, tr0
pta ___strncpy_from_user_done, tr1
or r4, ZERO, r5 /* r5 = original count */
beq/u r4, r63, tr1 /* early exit if r4==0 */
movi -(EFAULT), r6 /* r6 = reply, no real fixup */
or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
___strncpy_from_user1:
ld.b r3, 0, r7 /* Fault address: only in reading */
st.b r2, 0, r7
addi r2, 1, r2
addi r3, 1, r3
beq/u ZERO, r7, tr1
addi r4, -1, r4 /* return real number of copied bytes */
bne/l ZERO, r4, tr0
___strncpy_from_user_done:
sub r5, r4, r6 /* If done, return copied */
___strncpy_from_user_exit:
or r6, ZERO, r2
ptabs LINK, tr0
blink tr0, ZERO
/*
* extern long __strnlen_user(const char *__s, long __n)
*
* Inputs:
* (r2) source address
* (r3) source size in bytes
*
* Ouputs:
* (r2) -EFAULT (in case of faulting)
* string length (otherwise)
*/
.global __strnlen_user
__strnlen_user:
pta ___strnlen_user_set_reply, tr0
pta ___strnlen_user1, tr1
or ZERO, ZERO, r5 /* r5 = counter */
movi -(EFAULT), r6 /* r6 = reply, no real fixup */
or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
beq r3, ZERO, tr0
___strnlen_user1:
ldx.b r2, r5, r7 /* Fault address: only in reading */
addi r3, -1, r3 /* No real fixup */
addi r5, 1, r5
beq r3, ZERO, tr0
bne r7, ZERO, tr1
! The line below used to be active. This meant led to a junk byte lying between each pair
! of entries in the argv & envp structures in memory. Whilst the program saw the right data
! via the argv and envp arguments to main, it meant the 'flat' representation visible through
! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
! addi r5, 1, r5 /* Include '\0' */
___strnlen_user_set_reply:
or r5, ZERO, r6 /* If done, return counter */
___strnlen_user_exit:
or r6, ZERO, r2
ptabs LINK, tr0
blink tr0, ZERO
/*
* extern long __get_user_asm_?(void *val, long addr)
*
* Inputs:
* (r2) dest address
* (r3) source address (in User Space)
*
* Ouputs:
* (r2) -EFAULT (faulting)
* 0 (not faulting)
*/
.global __get_user_asm_b
__get_user_asm_b:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_b1:
ld.b r3, 0, r5 /* r5 = data */
st.b r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_b_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_w
__get_user_asm_w:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_w1:
ld.w r3, 0, r5 /* r5 = data */
st.w r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_w_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_l
__get_user_asm_l:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_l1:
ld.l r3, 0, r5 /* r5 = data */
st.l r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_l_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_q
__get_user_asm_q:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_q1:
ld.q r3, 0, r5 /* r5 = data */
st.q r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_q_exit:
ptabs LINK, tr0
blink tr0, ZERO
/*
* extern long __put_user_asm_?(void *pval, long addr)
*
* Inputs:
* (r2) kernel pointer to value
* (r3) dest address (in User Space)
*
* Ouputs:
* (r2) -EFAULT (faulting)
* 0 (not faulting)
*/
.global __put_user_asm_b
__put_user_asm_b:
ld.b r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_b1:
st.b r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_b_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_w
__put_user_asm_w:
ld.w r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_w1:
st.w r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_w_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_l
__put_user_asm_l:
ld.l r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_l1:
st.l r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_l_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_q
__put_user_asm_q:
ld.q r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_q1:
st.q r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_q_exit:
ptabs LINK, tr0
blink tr0, ZERO
panic_stash_regs:
/* The idea is : when we get an unhandled panic, we dump the registers
to a known memory location, the just sit in a tight loop.
This allows the human to look at the memory region through the GDB
session (assuming the debug module's SHwy initiator isn't locked up
or anything), to hopefully analyze the cause of the panic. */
/* On entry, former r15 (SP) is in DCR
former r0 is at resvec_saved_area + 0
former r1 is at resvec_saved_area + 8
former tr0 is at resvec_saved_area + 32
DCR is the only register whose value is lost altogether.
*/
movi 0xffffffff80000000, r0 ! phy of dump area
ld.q SP, 0x000, r1 ! former r0
st.q r0, 0x000, r1
ld.q SP, 0x008, r1 ! former r1
st.q r0, 0x008, r1
st.q r0, 0x010, r2
st.q r0, 0x018, r3
st.q r0, 0x020, r4
st.q r0, 0x028, r5
st.q r0, 0x030, r6
st.q r0, 0x038, r7
st.q r0, 0x040, r8
st.q r0, 0x048, r9
st.q r0, 0x050, r10
st.q r0, 0x058, r11
st.q r0, 0x060, r12
st.q r0, 0x068, r13
st.q r0, 0x070, r14
getcon dcr, r14
st.q r0, 0x078, r14
st.q r0, 0x080, r16
st.q r0, 0x088, r17
st.q r0, 0x090, r18
st.q r0, 0x098, r19
st.q r0, 0x0a0, r20
st.q r0, 0x0a8, r21
st.q r0, 0x0b0, r22
st.q r0, 0x0b8, r23
st.q r0, 0x0c0, r24
st.q r0, 0x0c8, r25
st.q r0, 0x0d0, r26
st.q r0, 0x0d8, r27
st.q r0, 0x0e0, r28
st.q r0, 0x0e8, r29
st.q r0, 0x0f0, r30
st.q r0, 0x0f8, r31
st.q r0, 0x100, r32
st.q r0, 0x108, r33
st.q r0, 0x110, r34
st.q r0, 0x118, r35
st.q r0, 0x120, r36
st.q r0, 0x128, r37
st.q r0, 0x130, r38
st.q r0, 0x138, r39
st.q r0, 0x140, r40
st.q r0, 0x148, r41
st.q r0, 0x150, r42
st.q r0, 0x158, r43
st.q r0, 0x160, r44
st.q r0, 0x168, r45
st.q r0, 0x170, r46
st.q r0, 0x178, r47
st.q r0, 0x180, r48
st.q r0, 0x188, r49
st.q r0, 0x190, r50
st.q r0, 0x198, r51
st.q r0, 0x1a0, r52
st.q r0, 0x1a8, r53
st.q r0, 0x1b0, r54
st.q r0, 0x1b8, r55
st.q r0, 0x1c0, r56
st.q r0, 0x1c8, r57
st.q r0, 0x1d0, r58
st.q r0, 0x1d8, r59
st.q r0, 0x1e0, r60
st.q r0, 0x1e8, r61
st.q r0, 0x1f0, r62
st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
ld.q SP, 0x020, r1 ! former tr0
st.q r0, 0x200, r1
gettr tr1, r1
st.q r0, 0x208, r1
gettr tr2, r1
st.q r0, 0x210, r1
gettr tr3, r1
st.q r0, 0x218, r1
gettr tr4, r1
st.q r0, 0x220, r1
gettr tr5, r1
st.q r0, 0x228, r1
gettr tr6, r1
st.q r0, 0x230, r1
gettr tr7, r1
st.q r0, 0x238, r1
getcon sr, r1
getcon ssr, r2
getcon pssr, r3
getcon spc, r4
getcon pspc, r5
getcon intevt, r6
getcon expevt, r7
getcon pexpevt, r8
getcon tra, r9
getcon tea, r10
getcon kcr0, r11
getcon kcr1, r12
getcon vbr, r13
getcon resvec, r14
st.q r0, 0x240, r1
st.q r0, 0x248, r2
st.q r0, 0x250, r3
st.q r0, 0x258, r4
st.q r0, 0x260, r5
st.q r0, 0x268, r6
st.q r0, 0x270, r7
st.q r0, 0x278, r8
st.q r0, 0x280, r9
st.q r0, 0x288, r10
st.q r0, 0x290, r11
st.q r0, 0x298, r12
st.q r0, 0x2a0, r13
st.q r0, 0x2a8, r14
getcon SPC,r2
getcon SSR,r3
getcon EXPEVT,r4
/* Prepare to jump to C - physical address */
movi panic_handler-CONFIG_PAGE_OFFSET, r1
ori r1, 1, r1
ptabs r1, tr0
getcon DCR, SP
blink tr0, ZERO
nop
nop
nop
nop
/*
* --- Signal Handling Section
*/
/*
* extern long long _sa_default_rt_restorer
* extern long long _sa_default_restorer
*
* or, better,
*
* extern void _sa_default_rt_restorer(void)
* extern void _sa_default_restorer(void)
*
* Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
* from user space. Copied into user space by signal management.
* Both must be quad aligned and 2 quad long (4 instructions).
*
*/
.balign 8
.global sa_default_rt_restorer
sa_default_rt_restorer:
movi 0x10, r9
shori __NR_rt_sigreturn, r9
trapa r9
nop
.balign 8
.global sa_default_restorer
sa_default_restorer:
movi 0x10, r9
shori __NR_sigreturn, r9
trapa r9
nop
/*
* --- __ex_table Section
*/
/*
* User Access Exception Table.
*/
.section __ex_table, "a"
.global asm_uaccess_start /* Just a marker */
asm_uaccess_start:
#ifdef CONFIG_MMU
.long ___copy_user1, ___copy_user_exit
.long ___copy_user2, ___copy_user_exit
.long ___clear_user1, ___clear_user_exit
#endif
.long ___strncpy_from_user1, ___strncpy_from_user_exit
.long ___strnlen_user1, ___strnlen_user_exit
.long ___get_user_asm_b1, ___get_user_asm_b_exit
.long ___get_user_asm_w1, ___get_user_asm_w_exit
.long ___get_user_asm_l1, ___get_user_asm_l_exit
.long ___get_user_asm_q1, ___get_user_asm_q_exit
.long ___put_user_asm_b1, ___put_user_asm_b_exit
.long ___put_user_asm_w1, ___put_user_asm_w_exit
.long ___put_user_asm_l1, ___put_user_asm_l_exit
.long ___put_user_asm_q1, ___put_user_asm_q_exit
.global asm_uaccess_end /* Just a marker */
asm_uaccess_end:
/*
* --- .init.text Section
*/
__INIT
/*
* void trap_init (void)
*
*/
.global trap_init
trap_init:
addi SP, -24, SP /* Room to save r28/r29/r30 */
st.q SP, 0, r28
st.q SP, 8, r29
st.q SP, 16, r30
/* Set VBR and RESVEC */
movi LVBR_block, r19
andi r19, -4, r19 /* reset MMUOFF + reserved */
/* For RESVEC exceptions we force the MMU off, which means we need the
physical address. */
movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
andi r20, -4, r20 /* reset reserved */
ori r20, 1, r20 /* set MMUOFF */
putcon r19, VBR
putcon r20, RESVEC
/* Sanity check */
movi LVBR_block_end, r21
andi r21, -4, r21
movi BLOCK_SIZE, r29 /* r29 = expected size */
or r19, ZERO, r30
add r19, r29, r19
/*
* Ugly, but better loop forever now than crash afterwards.
* We should print a message, but if we touch LVBR or
* LRESVEC blocks we should not be surprised if we get stuck
* in trap_init().
*/
pta trap_init_loop, tr1
gettr tr1, r28 /* r28 = trap_init_loop */
sub r21, r30, r30 /* r30 = actual size */
/*
* VBR/RESVEC handlers overlap by being bigger than
* allowed. Very bad. Just loop forever.
* (r28) panic/loop address
* (r29) expected size
* (r30) actual size
*/
trap_init_loop:
bne r19, r21, tr1
/* Now that exception vectors are set up reset SR.BL */
getcon SR, r22
movi SR_UNBLOCK_EXC, r23
and r22, r23, r22
putcon r22, SR
addi SP, 24, SP
ptabs LINK, tr0
blink tr0, ZERO
|