1/* 2 * Parisc tlb and cache flushing support 3 * Copyright (C) 2000 Hewlett-Packard (John Marvin) 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2, or (at your option) 8 * any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 */ 19 20/* 21 * NOTE: fdc,fic, and pdc instructions that use base register modification 22 * should only use index and base registers that are not shadowed, 23 * so that the fast path emulation in the non access miss handler 24 * can be used. 25 */ 26 27#ifdef __LP64__ 28#define ADDIB addib,* 29#define CMPB cmpb,* 30#define ANDCM andcm,* 31 32 .level 2.0w 33#else 34#define ADDIB addib, 35#define CMPB cmpb, 36#define ANDCM andcm 37 38 .level 2.0 39#endif 40 41#include <asm/assembly.h> 42#include <asm/psw.h> 43#include <asm/pgtable.h> 44#include <asm/cache.h> 45 46 .text 47 .align 128 48 49 .export flush_tlb_all_local,code 50 51flush_tlb_all_local: 52 .proc 53 .callinfo NO_CALLS 54 .entry 55 56 /* 57 * The pitlbe and pdtlbe instructions should only be used to 58 * flush the entire tlb. Also, there needs to be no intervening 59 * tlb operations, e.g. tlb misses, so the operation needs 60 * to happen in real mode with all interruptions disabled. 61 */ 62 63 /* 64 * Once again, we do the rfi dance ... some day we need examine 65 * all of our uses of this type of code and see what can be 66 * consolidated. 67 */ 68 69 rsm PSW_SM_I,%r19 /* relied upon translation! */ 70 nop 71 nop 72 nop 73 nop 74 nop 75 nop 76 nop 77 78 rsm PSW_SM_Q,%r0 /* Turn off Q bit to load iia queue */ 79 ldil L%REAL_MODE_PSW, %r1 80 ldo R%REAL_MODE_PSW(%r1), %r1 81 mtctl %r1, %cr22 82 mtctl %r0, %cr17 83 mtctl %r0, %cr17 84 ldil L%PA(1f),%r1 85 ldo R%PA(1f)(%r1),%r1 86 mtctl %r1, %cr18 87 ldo 4(%r1), %r1 88 mtctl %r1, %cr18 89 rfi 90 nop 91 921: ldil L%PA(cache_info),%r1 93 ldo R%PA(cache_info)(%r1),%r1 94 95 /* Flush Instruction Tlb */ 96 97 LDREG ITLB_SID_BASE(%r1),%r20 98 LDREG ITLB_SID_STRIDE(%r1),%r21 99 LDREG ITLB_SID_COUNT(%r1),%r22 100 LDREG ITLB_OFF_BASE(%r1),%arg0 101 LDREG ITLB_OFF_STRIDE(%r1),%arg1 102 LDREG ITLB_OFF_COUNT(%r1),%arg2 103 LDREG ITLB_LOOP(%r1),%arg3 104 105 ADDIB= -1,%arg3,fitoneloop /* Preadjust and test */ 106 movb,<,n %arg3,%r31,fitdone /* If loop < 0, skip */ 107 copy %arg0,%r28 /* Init base addr */ 108 109fitmanyloop: /* Loop if LOOP >= 2 */ 110 mtsp %r20,%sr1 111 add %r21,%r20,%r20 /* increment space */ 112 copy %arg2,%r29 /* Init middle loop count */ 113 114fitmanymiddle: /* Loop if LOOP >= 2 */ 115 ADDIB> -1,%r31,fitmanymiddle /* Adjusted inner loop decr */ 116 pitlbe 0(%sr1,%r28) 117 pitlbe,m %arg1(%sr1,%r28) /* Last pitlbe and addr adjust */ 118 ADDIB> -1,%r29,fitmanymiddle /* Middle loop decr */ 119 copy %arg3,%r31 /* Re-init inner loop count */ 120 121 movb,tr %arg0,%r28,fitmanyloop /* Re-init base addr */ 122 ADDIB<=,n -1,%r22,fitdone /* Outer loop count decr */ 123 124fitoneloop: /* Loop if LOOP = 1 */ 125 mtsp %r20,%sr1 126 copy %arg0,%r28 /* init base addr */ 127 copy %arg2,%r29 /* init middle loop count */ 128 129fitonemiddle: /* Loop if LOOP = 1 */ 130 ADDIB> -1,%r29,fitonemiddle /* Middle loop count decr */ 131 pitlbe,m %arg1(%sr1,%r28) /* pitlbe for one loop */ 132 133 ADDIB> -1,%r22,fitoneloop /* Outer loop count decr */ 134 add %r21,%r20,%r20 /* increment space */ 135 136fitdone: 137 138 /* Flush Data Tlb */ 139 140 LDREG DTLB_SID_BASE(%r1),%r20 141 LDREG DTLB_SID_STRIDE(%r1),%r21 142 LDREG DTLB_SID_COUNT(%r1),%r22 143 LDREG DTLB_OFF_BASE(%r1),%arg0 144 LDREG DTLB_OFF_STRIDE(%r1),%arg1 145 LDREG DTLB_OFF_COUNT(%r1),%arg2 146 LDREG DTLB_LOOP(%r1),%arg3 147 148 ADDIB= -1,%arg3,fdtoneloop /* Preadjust and test */ 149 movb,<,n %arg3,%r31,fdtdone /* If loop < 0, skip */ 150 copy %arg0,%r28 /* Init base addr */ 151 152fdtmanyloop: /* Loop if LOOP >= 2 */ 153 mtsp %r20,%sr1 154 add %r21,%r20,%r20 /* increment space */ 155 copy %arg2,%r29 /* Init middle loop count */ 156 157fdtmanymiddle: /* Loop if LOOP >= 2 */ 158 ADDIB> -1,%r31,fdtmanymiddle /* Adjusted inner loop decr */ 159 pdtlbe 0(%sr1,%r28) 160 pdtlbe,m %arg1(%sr1,%r28) /* Last pdtlbe and addr adjust */ 161 ADDIB> -1,%r29,fdtmanymiddle /* Middle loop decr */ 162 copy %arg3,%r31 /* Re-init inner loop count */ 163 164 movb,tr %arg0,%r28,fdtmanyloop /* Re-init base addr */ 165 ADDIB<=,n -1,%r22,fdtdone /* Outer loop count decr */ 166 167fdtoneloop: /* Loop if LOOP = 1 */ 168 mtsp %r20,%sr1 169 copy %arg0,%r28 /* init base addr */ 170 copy %arg2,%r29 /* init middle loop count */ 171 172fdtonemiddle: /* Loop if LOOP = 1 */ 173 ADDIB> -1,%r29,fdtonemiddle /* Middle loop count decr */ 174 pdtlbe,m %arg1(%sr1,%r28) /* pdtlbe for one loop */ 175 176 ADDIB> -1,%r22,fdtoneloop /* Outer loop count decr */ 177 add %r21,%r20,%r20 /* increment space */ 178 179fdtdone: 180 181 /* Switch back to virtual mode */ 182 183 rsm PSW_SM_Q,%r0 /* clear Q bit to load iia queue */ 184 ldil L%KERNEL_PSW, %r1 185 ldo R%KERNEL_PSW(%r1), %r1 186 or %r1,%r19,%r1 /* Set I bit if set on entry */ 187 mtctl %r1, %cr22 188 mtctl %r0, %cr17 189 mtctl %r0, %cr17 190 ldil L%(2f), %r1 191 ldo R%(2f)(%r1), %r1 192 mtctl %r1, %cr18 193 ldo 4(%r1), %r1 194 mtctl %r1, %cr18 195 rfi 196 nop 197 1982: bv %r0(%r2) 199 nop 200 .exit 201 202 .procend 203 204 .export flush_instruction_cache_local,code 205 .import cache_info,data 206 207flush_instruction_cache_local: 208 .proc 209 .callinfo NO_CALLS 210 .entry 211 212 mtsp %r0,%sr1 213 ldil L%cache_info,%r1 214 ldo R%cache_info(%r1),%r1 215 216 /* Flush Instruction Cache */ 217 218 LDREG ICACHE_BASE(%r1),%arg0 219 LDREG ICACHE_STRIDE(%r1),%arg1 220 LDREG ICACHE_COUNT(%r1),%arg2 221 LDREG ICACHE_LOOP(%r1),%arg3 222 ADDIB= -1,%arg3,fioneloop /* Preadjust and test */ 223 movb,<,n %arg3,%r31,fisync /* If loop < 0, do sync */ 224 225fimanyloop: /* Loop if LOOP >= 2 */ 226 ADDIB> -1,%r31,fimanyloop /* Adjusted inner loop decr */ 227 fice 0(%sr1,%arg0) 228 fice,m %arg1(%sr1,%arg0) /* Last fice and addr adjust */ 229 movb,tr %arg3,%r31,fimanyloop /* Re-init inner loop count */ 230 ADDIB<=,n -1,%arg2,fisync /* Outer loop decr */ 231 232fioneloop: /* Loop if LOOP = 1 */ 233 ADDIB> -1,%arg2,fioneloop /* Outer loop count decr */ 234 fice,m %arg1(%sr1,%arg0) /* Fice for one loop */ 235 236fisync: 237 sync 238 bv %r0(%r2) 239 nop 240 .exit 241 242 .procend 243 244 .export flush_data_cache_local,code 245 .import cache_info,data 246 247flush_data_cache_local: 248 .proc 249 .callinfo NO_CALLS 250 .entry 251 252 mtsp %r0,%sr1 253 ldil L%cache_info,%r1 254 ldo R%cache_info(%r1),%r1 255 256 /* Flush Data Cache */ 257 258 LDREG DCACHE_BASE(%r1),%arg0 259 LDREG DCACHE_STRIDE(%r1),%arg1 260 LDREG DCACHE_COUNT(%r1),%arg2 261 LDREG DCACHE_LOOP(%r1),%arg3 262 rsm PSW_SM_I,%r22 263 ADDIB= -1,%arg3,fdoneloop /* Preadjust and test */ 264 movb,<,n %arg3,%r31,fdsync /* If loop < 0, do sync */ 265 266fdmanyloop: /* Loop if LOOP >= 2 */ 267 ADDIB> -1,%r31,fdmanyloop /* Adjusted inner loop decr */ 268 fdce 0(%sr1,%arg0) 269 fdce,m %arg1(%sr1,%arg0) /* Last fdce and addr adjust */ 270 movb,tr %arg3,%r31,fdmanyloop /* Re-init inner loop count */ 271 ADDIB<=,n -1,%arg2,fdsync /* Outer loop decr */ 272 273fdoneloop: /* Loop if LOOP = 1 */ 274 ADDIB> -1,%arg2,fdoneloop /* Outer loop count decr */ 275 fdce,m %arg1(%sr1,%arg0) /* Fdce for one loop */ 276 277fdsync: 278 syncdma 279 sync 280 mtsm %r22 281 bv %r0(%r2) 282 nop 283 .exit 284 285 .procend 286 287 .export copy_user_page_asm,code 288 289copy_user_page_asm: 290 .proc 291 .callinfo NO_CALLS 292 .entry 293 294 ldi 64,%r1 295 296 /* 297 * This loop is optimized for PCXL/PCXL2 ldw/ldw and stw/stw 298 * bundles (very restricted rules for bundling). It probably 299 * does OK on PCXU and better, but we could do better with 300 * ldd/std instructions. Note that until (if) we start saving 301 * the full 64 bit register values on interrupt, we can't 302 * use ldd/std on a 32 bit kernel. 303 */ 304 305 3061: 307 ldw 0(%r25),%r19 308 ldw 4(%r25),%r20 309 ldw 8(%r25),%r21 310 ldw 12(%r25),%r22 311 stw %r19,0(%r26) 312 stw %r20,4(%r26) 313 stw %r21,8(%r26) 314 stw %r22,12(%r26) 315 ldw 16(%r25),%r19 316 ldw 20(%r25),%r20 317 ldw 24(%r25),%r21 318 ldw 28(%r25),%r22 319 stw %r19,16(%r26) 320 stw %r20,20(%r26) 321 stw %r21,24(%r26) 322 stw %r22,28(%r26) 323 ldw 32(%r25),%r19 324 ldw 36(%r25),%r20 325 ldw 40(%r25),%r21 326 ldw 44(%r25),%r22 327 stw %r19,32(%r26) 328 stw %r20,36(%r26) 329 stw %r21,40(%r26) 330 stw %r22,44(%r26) 331 ldw 48(%r25),%r19 332 ldw 52(%r25),%r20 333 ldw 56(%r25),%r21 334 ldw 60(%r25),%r22 335 stw %r19,48(%r26) 336 stw %r20,52(%r26) 337 stw %r21,56(%r26) 338 stw %r22,60(%r26) 339 ldo 64(%r26),%r26 340 ADDIB> -1,%r1,1b 341 ldo 64(%r25),%r25 342 343 bv %r0(%r2) 344 nop 345 .exit 346 347 .procend 348 349#if (TMPALIAS_MAP_START >= 0x80000000UL) 350Warning TMPALIAS_MAP_START changed. If > 2 Gb, code in pacache.S is bogus 351#endif 352 353/* 354 * NOTE: Code in clear_user_page has a hard coded dependency on the 355 * maximum alias boundary being 4 Mb. We've been assured by the 356 * parisc chip designers that there will not ever be a parisc 357 * chip with a larger alias boundary (Never say never :-) ). 358 * 359 * Subtle: the dtlb miss handlers support the temp alias region by 360 * "knowing" that if a dtlb miss happens within the temp alias 361 * region it must have occurred while in clear_user_page. Since 362 * this routine makes use of processor local translations, we 363 * don't want to insert them into the kernel page table. Instead, 364 * we load up some general registers (they need to be registers 365 * which aren't shadowed) with the physical page numbers (preshifted 366 * for tlb insertion) needed to insert the translations. When we 367 * miss on the translation, the dtlb miss handler inserts the 368 * translation into the tlb using these values: 369 * 370 * %r26 physical page (shifted for tlb insert) of "to" translation 371 * %r23 physical page (shifted for tlb insert) of "from" translation 372 */ 373 374 375 .export clear_user_page_asm,code 376 377clear_user_page_asm: 378 .proc 379 .callinfo NO_CALLS 380 .entry 381 382 tophys_r1 %r26 383 384 ldil L%(TMPALIAS_MAP_START),%r28 385#ifdef __LP64__ 386 extrd,u %r26,56,32,%r26 /* convert phys addr to tlb insert format */ 387 depd %r25,63,22,%r28 /* Form aliased virtual address 'to' */ 388 depdi 0,63,12,%r28 /* Clear any offset bits */ 389#else 390 extrw,u %r26,24,25,%r26 /* convert phys addr to tlb insert format */ 391 depw %r25,31,22,%r28 /* Form aliased virtual address 'to' */ 392 depwi 0,31,12,%r28 /* Clear any offset bits */ 393#endif 394 395 /* Purge any old translation */ 396 397 pdtlb 0(%r28) 398 399 ldi 64,%r1 400 4011: 402 stw %r0,0(%r28) 403 stw %r0,4(%r28) 404 stw %r0,8(%r28) 405 stw %r0,12(%r28) 406 stw %r0,16(%r28) 407 stw %r0,20(%r28) 408 stw %r0,24(%r28) 409 stw %r0,28(%r28) 410 stw %r0,32(%r28) 411 stw %r0,36(%r28) 412 stw %r0,40(%r28) 413 stw %r0,44(%r28) 414 stw %r0,48(%r28) 415 stw %r0,52(%r28) 416 stw %r0,56(%r28) 417 stw %r0,60(%r28) 418 ADDIB> -1,%r1,1b 419 ldo 64(%r28),%r28 420 421 bv %r0(%r2) 422 nop 423 .exit 424 425 .procend 426 427 .export flush_kernel_dcache_page 428 429flush_kernel_dcache_page: 430 .proc 431 .callinfo NO_CALLS 432 .entry 433 434 ldil L%dcache_stride,%r1 435 ldw R%dcache_stride(%r1),%r23 436 437#ifdef __LP64__ 438 depdi,z 1,63-PAGE_SHIFT,1,%r25 439#else 440 depwi,z 1,31-PAGE_SHIFT,1,%r25 441#endif 442 add %r26,%r25,%r25 443 sub %r25,%r23,%r25 444 445 4461: fdc,m %r23(%r26) 447 fdc,m %r23(%r26) 448 fdc,m %r23(%r26) 449 fdc,m %r23(%r26) 450 fdc,m %r23(%r26) 451 fdc,m %r23(%r26) 452 fdc,m %r23(%r26) 453 fdc,m %r23(%r26) 454 fdc,m %r23(%r26) 455 fdc,m %r23(%r26) 456 fdc,m %r23(%r26) 457 fdc,m %r23(%r26) 458 fdc,m %r23(%r26) 459 fdc,m %r23(%r26) 460 fdc,m %r23(%r26) 461 CMPB<< %r26,%r25,1b 462 fdc,m %r23(%r26) 463 464 sync 465 bv %r0(%r2) 466 nop 467 .exit 468 469 .procend 470 471 .export purge_kernel_dcache_page 472 473purge_kernel_dcache_page: 474 .proc 475 .callinfo NO_CALLS 476 .entry 477 478 ldil L%dcache_stride,%r1 479 ldw R%dcache_stride(%r1),%r23 480 481#ifdef __LP64__ 482 depdi,z 1,63-PAGE_SHIFT,1,%r25 483#else 484 depwi,z 1,31-PAGE_SHIFT,1,%r25 485#endif 486 add %r26,%r25,%r25 487 sub %r25,%r23,%r25 488 4891: pdc,m %r23(%r26) 490 pdc,m %r23(%r26) 491 pdc,m %r23(%r26) 492 pdc,m %r23(%r26) 493 pdc,m %r23(%r26) 494 pdc,m %r23(%r26) 495 pdc,m %r23(%r26) 496 pdc,m %r23(%r26) 497 pdc,m %r23(%r26) 498 pdc,m %r23(%r26) 499 pdc,m %r23(%r26) 500 pdc,m %r23(%r26) 501 pdc,m %r23(%r26) 502 pdc,m %r23(%r26) 503 pdc,m %r23(%r26) 504 CMPB<< %r26,%r25,1b 505 pdc,m %r23(%r26) 506 507 sync 508 bv %r0(%r2) 509 nop 510 .exit 511 512 .procend 513 514 515 .export flush_user_dcache_range_asm 516 517flush_user_dcache_range_asm: 518 .proc 519 .callinfo NO_CALLS 520 .entry 521 522 ldil L%dcache_stride,%r1 523 ldw R%dcache_stride(%r1),%r23 524 ldo -1(%r23),%r21 525 ANDCM %r26,%r21,%r26 526 5271: CMPB<<,n %r26,%r25,1b 528 fdc,m %r23(%sr3,%r26) 529 530 sync 531 bv %r0(%r2) 532 nop 533 .exit 534 535 .procend 536 537 .export flush_kernel_dcache_range_asm 538 539flush_kernel_dcache_range_asm: 540 .proc 541 .callinfo NO_CALLS 542 .entry 543 544 ldil L%dcache_stride,%r1 545 ldw R%dcache_stride(%r1),%r23 546 ldo -1(%r23),%r21 547 ANDCM %r26,%r21,%r26 548 5491: CMPB<<,n %r26,%r25,1b 550 fdc,m %r23(%r26) 551 552 sync 553 syncdma 554 bv %r0(%r2) 555 nop 556 .exit 557 558 .procend 559 560 .export flush_user_icache_range_asm 561 562flush_user_icache_range_asm: 563 .proc 564 .callinfo NO_CALLS 565 .entry 566 567 ldil L%icache_stride,%r1 568 ldw R%icache_stride(%r1),%r23 569 ldo -1(%r23),%r21 570 ANDCM %r26,%r21,%r26 571 5721: CMPB<<,n %r26,%r25,1b 573 fic,m %r23(%sr3,%r26) 574 575 sync 576 bv %r0(%r2) 577 nop 578 .exit 579 580 .procend 581 582 .export flush_kernel_icache_page 583 584flush_kernel_icache_page: 585 .proc 586 .callinfo NO_CALLS 587 .entry 588 589 ldil L%icache_stride,%r1 590 ldw R%icache_stride(%r1),%r23 591 592#ifdef __LP64__ 593 depdi,z 1,63-PAGE_SHIFT,1,%r25 594#else 595 depwi,z 1,31-PAGE_SHIFT,1,%r25 596#endif 597 add %r26,%r25,%r25 598 sub %r25,%r23,%r25 599 600 6011: fic,m %r23(%r26) 602 fic,m %r23(%r26) 603 fic,m %r23(%r26) 604 fic,m %r23(%r26) 605 fic,m %r23(%r26) 606 fic,m %r23(%r26) 607 fic,m %r23(%r26) 608 fic,m %r23(%r26) 609 fic,m %r23(%r26) 610 fic,m %r23(%r26) 611 fic,m %r23(%r26) 612 fic,m %r23(%r26) 613 fic,m %r23(%r26) 614 fic,m %r23(%r26) 615 fic,m %r23(%r26) 616 CMPB<< %r26,%r25,1b 617 fic,m %r23(%r26) 618 619 sync 620 bv %r0(%r2) 621 nop 622 .exit 623 624 .procend 625 626 .export flush_kernel_icache_range_asm 627 628flush_kernel_icache_range_asm: 629 .proc 630 .callinfo NO_CALLS 631 .entry 632 633 ldil L%icache_stride,%r1 634 ldw R%icache_stride(%r1),%r23 635 ldo -1(%r23),%r21 636 ANDCM %r26,%r21,%r26 637 6381: CMPB<<,n %r26,%r25,1b 639 fic,m %r23(%r26) 640 641 sync 642 bv %r0(%r2) 643 nop 644 .exit 645 646 .procend 647 648 .align 128 649 650 .export disable_sr_hashing_asm,code 651 652disable_sr_hashing_asm: 653 .proc 654 .callinfo NO_CALLS 655 .entry 656 657 /* Switch to real mode */ 658 659 ssm 0,%r0 /* relied upon translation! */ 660 nop 661 nop 662 nop 663 nop 664 nop 665 nop 666 nop 667 668 rsm (PSW_SM_Q|PSW_SM_I),%r0 /* disable Q&I to load the iia queue */ 669 ldil L%REAL_MODE_PSW, %r1 670 ldo R%REAL_MODE_PSW(%r1), %r1 671 mtctl %r1, %cr22 672 mtctl %r0, %cr17 673 mtctl %r0, %cr17 674 ldil L%PA(1f),%r1 675 ldo R%PA(1f)(%r1),%r1 676 mtctl %r1, %cr18 677 ldo 4(%r1), %r1 678 mtctl %r1, %cr18 679 rfi 680 nop 681 6821: cmpib,=,n SRHASH_PCXST,%r26,srdis_pcxs 683 cmpib,=,n SRHASH_PCXL,%r26,srdis_pcxl 684 cmpib,=,n SRHASH_PA20,%r26,srdis_pa20 685 b,n srdis_done 686 687srdis_pcxs: 688 689 /* Disable Space Register Hashing for PCXS,PCXT,PCXT' */ 690 691 .word 0x141c1a00 /* mfdiag %dr0,%r28 */ 692 .word 0x141c1a00 /* must issue twice */ 693 depwi 0,18,1,%r28 /* Clear DHE (dcache hash enable) */ 694 depwi 0,20,1,%r28 /* Clear IHE (icache hash enable) */ 695 .word 0x141c1600 /* mtdiag %r28,%dr0 */ 696 .word 0x141c1600 /* must issue twice */ 697 b,n srdis_done 698 699srdis_pcxl: 700 701 /* Disable Space Register Hashing for PCXL */ 702 703 .word 0x141c0600 /* mfdiag %dr0,%r28 */ 704 depwi 0,28,2,%r28 /* Clear DHASH_EN & IHASH_EN */ 705 .word 0x141c0240 /* mtdiag %r28,%dr0 */ 706 b,n srdis_done 707 708srdis_pa20: 709 710 /* Disable Space Register Hashing for PCXU,PCXU+,PCXW,PCXW+ */ 711 712 .word 0x144008bc /* mfdiag %dr2,%r28 */ 713 depdi 0,54,1,%r28 /* clear DIAG_SPHASH_ENAB (bit 54) */ 714 .word 0x145c1840 /* mtdiag %r28,%dr2 */ 715 716srdis_done: 717 718 /* Switch back to virtual mode */ 719 720 rsm PSW_SM_Q,%r0 /* clear Q bit to load iia queue */ 721 ldil L%KERNEL_PSW, %r1 722 ldo R%KERNEL_PSW(%r1), %r1 723 mtctl %r1, %cr22 724 mtctl %r0, %cr17 725 mtctl %r0, %cr17 726 ldil L%(2f), %r1 727 ldo R%(2f)(%r1), %r1 728 mtctl %r1, %cr18 729 ldo 4(%r1), %r1 730 mtctl %r1, %cr18 731 rfi 732 nop 733 7342: bv %r0(%r2) 735 nop 736 .exit 737 738 .procend 739 740 .end 741