1/* 2 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. Please obtain a copy of the License at 10 * http://www.opensource.apple.com/apsl/ and read it before using this 11 * file. 12 * 13 * The Original Code and all software distributed under the License are 14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 18 * Please see the License for the specific language governing rights and 19 * limitations under the License. 20 * 21 * @APPLE_LICENSE_HEADER_END@ 22 */ 23#ifndef RLD 24#include <stdio.h> 25#include <limits.h> 26#endif /* RLD */ 27#include <mach-o/fat.h> 28#include <stuff/best_arch.h> 29 30#ifndef RLD 31/* 32 * cpusubtype_findbestarch() is passed a cputype and cpusubtype and a set of 33 * fat_arch structs and selects the best one that matches (if any) and returns 34 * a pointer to that fat_arch struct (or NULL). The fat_arch structs must be 35 * in the host byte sex and correct such that the fat_archs really points to 36 * enough memory for nfat_arch structs. It is possible that this routine could 37 * fail if new cputypes or cpusubtypes are added and an old version of this 38 * routine is used. But if there is an exact match between the cputype and 39 * cpusubtype and one of the fat_arch structs this routine will always succeed. 40 */ 41__private_extern__ 42struct fat_arch * 43cpusubtype_findbestarch( 44cpu_type_t cputype, 45cpu_subtype_t cpusubtype, 46struct fat_arch *fat_archs, 47uint32_t nfat_archs) 48{ 49 uint32_t i; 50 long lowest_family, lowest_model, lowest_index; 51 52 /* 53 * Look for the first exact match. 54 */ 55 for(i = 0; i < nfat_archs; i++){ 56 if(fat_archs[i].cputype == cputype && 57 (fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 58 (cpusubtype & ~CPU_SUBTYPE_MASK)) 59 return(fat_archs + i); 60 } 61 62 /* 63 * An exact match was not found so find the next best match which is 64 * cputype dependent. 65 */ 66 switch(cputype){ 67 68 /* 64-bit architectures */ 69 70 case CPU_TYPE_POWERPC64: 71 /* 72 * An exact match was not found. So for all the PowerPC64 subtypes 73 * pick the subtype from the following order starting from a subtype 74 * that will work (contains 64-bit instructions or altivec if 75 * needed): 76 * 970 (currently only the one 64-bit subtype) 77 * For an unknown subtype pick only the ALL type if it exists. 78 */ 79 switch(cpusubtype & ~CPU_SUBTYPE_MASK){ 80 case CPU_SUBTYPE_POWERPC_ALL: 81 /* 82 * The CPU_SUBTYPE_POWERPC_ALL is only used by the development 83 * environment tools when building a generic ALL type binary. 84 * In the case of a non-exact match we pick the most current 85 * processor. 86 */ 87 case CPU_SUBTYPE_POWERPC_970: 88 for(i = 0; i < nfat_archs; i++){ 89 if(fat_archs[i].cputype != cputype) 90 continue; 91 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 92 CPU_SUBTYPE_POWERPC_970) 93 return(fat_archs + i); 94 } 95 default: 96 for(i = 0; i < nfat_archs; i++){ 97 if(fat_archs[i].cputype != cputype) 98 continue; 99 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 100 CPU_SUBTYPE_POWERPC_ALL) 101 return(fat_archs + i); 102 } 103 } 104 break; 105 106 case CPU_TYPE_X86_64: 107 /* 108 * We have no subtypes for x86-64, so treat all cases the same here. 109 */ 110 switch(cpusubtype & ~CPU_SUBTYPE_MASK){ 111 default: 112 for(i = 0; i < nfat_archs; i++){ 113 if(fat_archs[i].cputype != cputype) 114 continue; 115 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 116 CPU_SUBTYPE_I386_ALL) 117 return(fat_archs + i); 118 } 119 } 120 break; 121 122 /* 32-bit architectures */ 123 124 case CPU_TYPE_I386: 125 switch(cpusubtype & ~CPU_SUBTYPE_MASK){ 126 default: 127 /* 128 * Intel cpusubtypes after the pentium (same as 586) are handled 129 * such that they require an exact match or they can use the 130 * pentium. If that is not found call into the loop for the 131 * earilier subtypes. 132 */ 133 for(i = 0; i < nfat_archs; i++){ 134 if(fat_archs[i].cputype != cputype) 135 continue; 136 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 137 CPU_SUBTYPE_PENT) 138 return(fat_archs + i); 139 } 140 case CPU_SUBTYPE_PENT: 141 case CPU_SUBTYPE_486SX: 142 /* 143 * Since an exact match as not found look for the i486 else 144 * break into the loop to look for the i386_ALL. 145 */ 146 for(i = 0; i < nfat_archs; i++){ 147 if(fat_archs[i].cputype != cputype) 148 continue; 149 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 150 CPU_SUBTYPE_486) 151 return(fat_archs + i); 152 } 153 break; 154 case CPU_SUBTYPE_I386_ALL: 155 /* case CPU_SUBTYPE_I386: same as above */ 156 case CPU_SUBTYPE_486: 157 break; 158 } 159 for(i = 0; i < nfat_archs; i++){ 160 if(fat_archs[i].cputype != cputype) 161 continue; 162 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 163 CPU_SUBTYPE_I386_ALL) 164 return(fat_archs + i); 165 } 166 167 /* 168 * A match failed, promote as little as possible. 169 */ 170 for(i = 0; i < nfat_archs; i++){ 171 if(fat_archs[i].cputype != cputype) 172 continue; 173 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 174 CPU_SUBTYPE_486) 175 return(fat_archs + i); 176 } 177 for(i = 0; i < nfat_archs; i++){ 178 if(fat_archs[i].cputype != cputype) 179 continue; 180 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 181 CPU_SUBTYPE_486SX) 182 return(fat_archs + i); 183 } 184 for(i = 0; i < nfat_archs; i++){ 185 if(fat_archs[i].cputype != cputype) 186 continue; 187 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 188 CPU_SUBTYPE_586) 189 return(fat_archs + i); 190 } 191 /* 192 * Now look for the lowest family and in that the lowest model. 193 */ 194 lowest_family = CPU_SUBTYPE_INTEL_FAMILY_MAX + 1; 195 for(i = 0; i < nfat_archs; i++){ 196 if(fat_archs[i].cputype != cputype) 197 continue; 198 if(CPU_SUBTYPE_INTEL_FAMILY(fat_archs[i].cpusubtype & 199 ~CPU_SUBTYPE_MASK) < 200 lowest_family) 201 lowest_family = CPU_SUBTYPE_INTEL_FAMILY( 202 fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK); 203 } 204 /* if no intel cputypes found return NULL */ 205 if(lowest_family == CPU_SUBTYPE_INTEL_FAMILY_MAX + 1) 206 return(NULL); 207 lowest_model = LONG_MAX; 208 lowest_index = -1; 209 for(i = 0; i < nfat_archs; i++){ 210 if(fat_archs[i].cputype != cputype) 211 continue; 212 if(CPU_SUBTYPE_INTEL_FAMILY(fat_archs[i].cpusubtype & 213 ~CPU_SUBTYPE_MASK) == 214 lowest_family){ 215 if(CPU_SUBTYPE_INTEL_MODEL(fat_archs[i].cpusubtype & 216 ~CPU_SUBTYPE_MASK) < 217 lowest_model){ 218 lowest_model = CPU_SUBTYPE_INTEL_MODEL( 219 fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK); 220 lowest_index = i; 221 } 222 } 223 } 224 return(fat_archs + lowest_index); 225 case CPU_TYPE_MC680x0: 226 for(i = 0; i < nfat_archs; i++){ 227 if(fat_archs[i].cputype != cputype) 228 continue; 229 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 230 CPU_SUBTYPE_MC680x0_ALL) 231 return(fat_archs + i); 232 } 233 /* 234 * Try to promote if starting from CPU_SUBTYPE_MC680x0_ALL and 235 * favor the CPU_SUBTYPE_MC68040 over the CPU_SUBTYPE_MC68030_ONLY. 236 */ 237 if((cpusubtype & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC680x0_ALL){ 238 for(i = 0; i < nfat_archs; i++){ 239 if(fat_archs[i].cputype != cputype) 240 continue; 241 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 242 CPU_SUBTYPE_MC68040) 243 return(fat_archs + i); 244 } 245 for(i = 0; i < nfat_archs; i++){ 246 if(fat_archs[i].cputype != cputype) 247 continue; 248 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 249 CPU_SUBTYPE_MC68030_ONLY) 250 return(fat_archs + i); 251 } 252 } 253 break; 254 case CPU_TYPE_POWERPC: 255 /* 256 * An exact match as not found. So for all the PowerPC subtypes 257 * pick the subtype from the following order starting from a subtype 258 * that will work (contains 64-bit instructions or altivec if 259 * needed): 260 * 970, 7450, 7400, 750, 604e, 604, 603ev, 603e, 603, ALL 261 * Note the 601 is NOT in the list above. It is only picked via 262 * an exact match. For an unknown subtype pick only the ALL type if 263 * it exists. 264 */ 265 switch(cpusubtype & ~CPU_SUBTYPE_MASK){ 266 case CPU_SUBTYPE_POWERPC_ALL: 267 /* 268 * The CPU_SUBTYPE_POWERPC_ALL is only used by the development 269 * environment tools when building a generic ALL type binary. 270 * In the case of a non-exact match we pick the most current 271 * processor. 272 */ 273 case CPU_SUBTYPE_POWERPC_970: 274 for(i = 0; i < nfat_archs; i++){ 275 if(fat_archs[i].cputype != cputype) 276 continue; 277 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 278 CPU_SUBTYPE_POWERPC_970) 279 return(fat_archs + i); 280 } 281 case CPU_SUBTYPE_POWERPC_7450: 282 case CPU_SUBTYPE_POWERPC_7400: 283 for(i = 0; i < nfat_archs; i++){ 284 if(fat_archs[i].cputype != cputype) 285 continue; 286 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 287 CPU_SUBTYPE_POWERPC_7450) 288 return(fat_archs + i); 289 } 290 for(i = 0; i < nfat_archs; i++){ 291 if(fat_archs[i].cputype != cputype) 292 continue; 293 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 294 CPU_SUBTYPE_POWERPC_7400) 295 return(fat_archs + i); 296 } 297 case CPU_SUBTYPE_POWERPC_750: 298 case CPU_SUBTYPE_POWERPC_604e: 299 case CPU_SUBTYPE_POWERPC_604: 300 case CPU_SUBTYPE_POWERPC_603ev: 301 case CPU_SUBTYPE_POWERPC_603e: 302 case CPU_SUBTYPE_POWERPC_603: 303 for(i = 0; i < nfat_archs; i++){ 304 if(fat_archs[i].cputype != cputype) 305 continue; 306 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 307 CPU_SUBTYPE_POWERPC_750) 308 return(fat_archs + i); 309 } 310 for(i = 0; i < nfat_archs; i++){ 311 if(fat_archs[i].cputype != cputype) 312 continue; 313 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 314 CPU_SUBTYPE_POWERPC_604e) 315 return(fat_archs + i); 316 } 317 for(i = 0; i < nfat_archs; i++){ 318 if(fat_archs[i].cputype != cputype) 319 continue; 320 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) 321 == CPU_SUBTYPE_POWERPC_604) 322 return(fat_archs + i); 323 } 324 for(i = 0; i < nfat_archs; i++){ 325 if(fat_archs[i].cputype != cputype) 326 continue; 327 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 328 CPU_SUBTYPE_POWERPC_603ev) 329 return(fat_archs + i); 330 } 331 for(i = 0; i < nfat_archs; i++){ 332 if(fat_archs[i].cputype != cputype) 333 continue; 334 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 335 CPU_SUBTYPE_POWERPC_603e) 336 return(fat_archs + i); 337 } 338 for(i = 0; i < nfat_archs; i++){ 339 if(fat_archs[i].cputype != cputype) 340 continue; 341 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 342 CPU_SUBTYPE_POWERPC_603) 343 return(fat_archs + i); 344 } 345 default: 346 for(i = 0; i < nfat_archs; i++){ 347 if(fat_archs[i].cputype != cputype) 348 continue; 349 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 350 CPU_SUBTYPE_POWERPC_ALL) 351 return(fat_archs + i); 352 } 353 } 354 break; 355 case CPU_TYPE_VEO: 356 /* 357 * An exact match was not found. So for the VEO subtypes if VEO1 358 * or VEO3 is wanted then VEO2 can be used. If VEO4 is wanted then 359 * either VEO2 or (preferably) VEO3 can be used. But if VEO2 is 360 * wanted only VEO2 can be used. Any unknown values don't match. 361 */ 362 switch(cpusubtype & ~CPU_SUBTYPE_MASK){ 363 case CPU_SUBTYPE_VEO_1: 364 case CPU_SUBTYPE_VEO_3: 365 for(i = 0; i < nfat_archs; i++){ 366 if(fat_archs[i].cputype != cputype) 367 continue; 368 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 369 CPU_SUBTYPE_VEO_2) 370 return(fat_archs + i); 371 } 372 case CPU_SUBTYPE_VEO_4: 373 for(i = 0; i < nfat_archs; i++){ 374 if(fat_archs[i].cputype != cputype) 375 continue; 376 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 377 CPU_SUBTYPE_VEO_3) 378 return(fat_archs + i); 379 } 380 for(i = 0; i < nfat_archs; i++){ 381 if(fat_archs[i].cputype != cputype) 382 continue; 383 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 384 CPU_SUBTYPE_VEO_2) 385 return(fat_archs + i); 386 } 387 } 388 break; 389 case CPU_TYPE_MC88000: 390 for(i = 0; i < nfat_archs; i++){ 391 if(fat_archs[i].cputype != cputype) 392 continue; 393 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 394 CPU_SUBTYPE_MC88000_ALL) 395 return(fat_archs + i); 396 } 397 break; 398 case CPU_TYPE_I860: 399 for(i = 0; i < nfat_archs; i++){ 400 if(fat_archs[i].cputype != cputype) 401 continue; 402 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 403 CPU_SUBTYPE_I860_ALL) 404 return(fat_archs + i); 405 } 406 break; 407 case CPU_TYPE_HPPA: 408 for(i = 0; i < nfat_archs; i++){ 409 if(fat_archs[i].cputype != cputype) 410 continue; 411 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 412 CPU_SUBTYPE_HPPA_ALL) 413 return(fat_archs + i); 414 } 415 break; 416 case CPU_TYPE_SPARC: 417 for(i = 0; i < nfat_archs; i++){ 418 if(fat_archs[i].cputype != cputype) 419 continue; 420 if((fat_archs[i].cpusubtype & ~CPU_SUBTYPE_MASK) == 421 CPU_SUBTYPE_SPARC_ALL) 422 return(fat_archs + i); 423 } 424 break; 425 case CPU_TYPE_ARM: 426 /* 427 * If it weren't for xscale, we could have a simple 428 * heirarchy like ppc. However, xscale has instructions 429 * which aren't present on v5 or v6. Here's the acceptable 430 * fat slices for each ARM subtype, for most to least 431 * preferred: 432 * v4t: v4t, ALL 433 * v5: v5, v4t, ALL 434 * xscale: xscale, v4t, ALL 435 * v6: v7, v6, v5, v4t, ALL 436 * ALL: v6, v5, xscale, v4t, ALL 437 */ 438 if(cpusubtype == CPU_SUBTYPE_ARM_ALL || 439 cpusubtype == CPU_SUBTYPE_ARM_V7K){ 440 for(i = 0; i < nfat_archs; i++){ 441 if(fat_archs[i].cputype == cputype && 442 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V7S) 443 return(fat_archs + i); 444 } 445 } 446 if(cpusubtype == CPU_SUBTYPE_ARM_ALL || 447 cpusubtype == CPU_SUBTYPE_ARM_V7S){ 448 for(i = 0; i < nfat_archs; i++){ 449 if(fat_archs[i].cputype == cputype && 450 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V7F) 451 return(fat_archs + i); 452 } 453 } 454 if(cpusubtype == CPU_SUBTYPE_ARM_ALL || 455 cpusubtype == CPU_SUBTYPE_ARM_V7F){ 456 for(i = 0; i < nfat_archs; i++){ 457 if(fat_archs[i].cputype == cputype && 458 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V7) 459 return(fat_archs + i); 460 } 461 } 462 if(cpusubtype == CPU_SUBTYPE_ARM_ALL || 463 cpusubtype == CPU_SUBTYPE_ARM_V7 || 464 cpusubtype == CPU_SUBTYPE_ARM_V6){ 465 for(i = 0; i < nfat_archs; i++){ 466 if(fat_archs[i].cputype == cputype && 467 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V6) 468 return(fat_archs + i); 469 } 470 } 471 if(cpusubtype == CPU_SUBTYPE_ARM_ALL || 472 cpusubtype == CPU_SUBTYPE_ARM_V6 || 473 cpusubtype == CPU_SUBTYPE_ARM_V5TEJ){ 474 for(i = 0; i < nfat_archs; i++){ 475 if(fat_archs[i].cputype == cputype && 476 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V5TEJ) 477 return(fat_archs + i); 478 } 479 } 480 if(cpusubtype == CPU_SUBTYPE_ARM_ALL || 481 cpusubtype == CPU_SUBTYPE_ARM_XSCALE){ 482 for(i = 0; i < nfat_archs; i++){ 483 if(fat_archs[i].cputype == cputype && 484 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_XSCALE) 485 return(fat_archs + i); 486 } 487 } 488 for(i = 0; i < nfat_archs; i++){ 489 if(fat_archs[i].cputype == cputype && 490 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V4T) 491 return(fat_archs + i); 492 } 493 for(i = 0; i < nfat_archs; i++){ 494 if(fat_archs[i].cputype == cputype && 495 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V6M) 496 return(fat_archs + i); 497 } 498 for(i = 0; i < nfat_archs; i++){ 499 if(fat_archs[i].cputype == cputype && 500 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V7M) 501 return(fat_archs + i); 502 } 503 for(i = 0; i < nfat_archs; i++){ 504 if(fat_archs[i].cputype == cputype && 505 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_V7EM) 506 return(fat_archs + i); 507 } 508 for(i = 0; i < nfat_archs; i++){ 509 if(fat_archs[i].cputype == cputype && 510 fat_archs[i].cpusubtype == CPU_SUBTYPE_ARM_ALL) 511 return(fat_archs + i); 512 } 513 514 default: 515 return(NULL); 516 } 517 return(NULL); 518} 519#endif /* RLD */ 520 521/* 522 * cpusubtype_combine() returns the resulting cpusubtype when combining two 523 * differnet cpusubtypes for the specified cputype. If the two cpusubtypes 524 * can't be combined (the specific subtypes are mutually exclusive) -1 is 525 * returned indicating it is an error to combine them. This can also fail and 526 * return -1 if new cputypes or cpusubtypes are added and an old version of 527 * this routine is used. But if the cpusubtypes are the same they can always 528 * be combined and this routine will return the cpusubtype pass in. 529 */ 530__private_extern__ 531cpu_subtype_t 532cpusubtype_combine( 533cpu_type_t cputype, 534cpu_subtype_t cpusubtype1, 535cpu_subtype_t cpusubtype2) 536{ 537 /* 538 * We now combine any i386 or x86_64 subtype to the ALL subtype. 539 */ 540 if(cputype == CPU_TYPE_I386 || cputype == CPU_TYPE_X86_64) 541 return(CPU_SUBTYPE_I386_ALL); 542 543 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == 544 (cpusubtype2 & ~CPU_SUBTYPE_MASK)) 545 return(cpusubtype1); 546 547 switch(cputype){ 548 case CPU_TYPE_MC680x0: 549 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC680x0_ALL && 550 (cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC68030_ONLY && 551 (cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC68040) 552 return((cpu_subtype_t)-1); 553 if((cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC680x0_ALL && 554 (cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC68030_ONLY && 555 (cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC68040) 556 return((cpu_subtype_t)-1); 557 558 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC68030_ONLY && 559 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC68040) 560 return((cpu_subtype_t)-1); 561 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC68040 && 562 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC68030_ONLY) 563 return((cpu_subtype_t)-1); 564 565 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC68030_ONLY || 566 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC68030_ONLY) 567 return(CPU_SUBTYPE_MC68030_ONLY); 568 569 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC68040 || 570 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC68040) 571 return(CPU_SUBTYPE_MC68040); 572 break; /* logically can't get here */ 573 574 case CPU_TYPE_POWERPC: 575 /* 576 * Combining with the ALL type becomes the other type. Combining 577 * anything with the 601 becomes 601. All other non exact matches 578 * combine to the higher value subtype. 579 */ 580 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_POWERPC_ALL) 581 return(cpusubtype2); 582 if((cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_POWERPC_ALL) 583 return(cpusubtype1); 584 585 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_POWERPC_601 || 586 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_POWERPC_601) 587 return(CPU_SUBTYPE_POWERPC_601); 588 589 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) > 590 (cpusubtype2 & ~CPU_SUBTYPE_MASK)) 591 return(cpusubtype1); 592 else 593 return(cpusubtype2); 594 break; /* logically can't get here */ 595 596 case CPU_TYPE_POWERPC64: 597 /* 598 * Combining with the ALL type becomes the other type. All other 599 * non exact matches combine to the higher value subtype. 600 */ 601 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_POWERPC_ALL) 602 return(cpusubtype2); 603 if((cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_POWERPC_ALL) 604 return(cpusubtype1); 605 606 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) > 607 (cpusubtype2 & ~CPU_SUBTYPE_MASK)) 608 return(cpusubtype1); 609 else 610 return(cpusubtype2); 611 break; /* logically can't get here */ 612 613 case CPU_TYPE_VEO: 614 /* 615 * Combining VEO1 with VEO2 returns VEO1. Any unknown values don't 616 * combine. 617 */ 618 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_1 && 619 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_2) 620 return(CPU_SUBTYPE_VEO_1); 621 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_2 && 622 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_1) 623 return(CPU_SUBTYPE_VEO_1); 624 /* 625 * Combining VEO3 with VEO2 returns VEO3. Any unknown values don't 626 * combine. 627 */ 628 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_3 && 629 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_2) 630 return(CPU_SUBTYPE_VEO_3); 631 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_2 && 632 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_3) 633 return(CPU_SUBTYPE_VEO_3); 634 /* 635 * Combining VEO4 with VEO2 or VEO3 returns VEO4. Any unknown 636 * values don't combine. 637 */ 638 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_4 && 639 ((cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_2 640 || (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_3)) 641 return(CPU_SUBTYPE_VEO_4); 642 if(((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_2 643 || (cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_3) && 644 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_VEO_4) 645 return(CPU_SUBTYPE_VEO_4); 646 return((cpu_subtype_t)-1); 647 break; /* logically can't get here */ 648 649 case CPU_TYPE_MC88000: 650 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC88000_ALL && 651 (cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC88110) 652 return((cpu_subtype_t)-1); 653 if((cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC88000_ALL && 654 (cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_MC88110) 655 return((cpu_subtype_t)-1); 656 657 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC88110 || 658 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_MC88110) 659 return(CPU_SUBTYPE_MC88110); 660 661 break; /* logically can't get here */ 662 663 case CPU_TYPE_I860: 664 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_I860_ALL && 665 (cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_I860_860) 666 return((cpu_subtype_t)-1); 667 if((cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_I860_ALL && 668 (cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_I860_860) 669 return((cpu_subtype_t)-1); 670 671 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_I860_860 || 672 (cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_I860_860) 673 return(CPU_SUBTYPE_I860_860); 674 break; /* logically can't get here */ 675 676 case CPU_TYPE_HPPA: 677 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_HPPA_ALL && 678 (cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_HPPA_7100LC) 679 return((cpu_subtype_t)-1); 680 if((cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_HPPA_ALL && 681 (cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_HPPA_7100LC) 682 return((cpu_subtype_t)-1); 683 684 return(CPU_SUBTYPE_HPPA_7100LC); 685 break; /* logically can't get here */ 686 687 case CPU_TYPE_SPARC: 688 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_SPARC_ALL) 689 return((cpu_subtype_t)-1); 690 if((cpusubtype2 & ~CPU_SUBTYPE_MASK) != CPU_SUBTYPE_SPARC_ALL) 691 return((cpu_subtype_t)-1); 692 break; /* logically can't get here */ 693 694 case CPU_TYPE_ARM: 695 /* 696 * Combinability matrix for ARM: 697 * V4T V5 XSCALE V6 V7 ALL 698 * ~~~ ~~ ~~~~~~ ~~ ~~ ~~~ 699 * V4T V4T V5 XSCALE V6 V7 ALL 700 * V5 V5 V5 -- V6 V7 ALL 701 * XSCALE XSCALE -- XSCALE -- -- ALL 702 * V6 V6 V6 -- V6 V7 ALL 703 * V7 V7 V7 -- V7 V7 ALL 704 * ALL ALL ALL ALL ALL ALL ALL 705 */ 706 if((cpusubtype1 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM_ALL) 707 return(cpusubtype2); 708 if((cpusubtype2 & ~CPU_SUBTYPE_MASK) == CPU_SUBTYPE_ARM_ALL) 709 return(cpusubtype1); 710 switch((cpusubtype1 & ~CPU_SUBTYPE_MASK)){ 711 case CPU_SUBTYPE_ARM_V7: 712 switch((cpusubtype2 & ~CPU_SUBTYPE_MASK)){ 713 case CPU_SUBTYPE_ARM_XSCALE: 714 return((cpu_subtype_t)-1); 715 default: 716 return(CPU_SUBTYPE_ARM_V7); 717 } 718 case CPU_SUBTYPE_ARM_V6: 719 switch((cpusubtype2 & ~CPU_SUBTYPE_MASK)){ 720 case CPU_SUBTYPE_ARM_XSCALE: 721 return((cpu_subtype_t)-1); 722 default: 723 return(CPU_SUBTYPE_ARM_V6); 724 } 725 case CPU_SUBTYPE_ARM_XSCALE: 726 switch((cpusubtype2 & ~CPU_SUBTYPE_MASK)){ 727 case CPU_SUBTYPE_ARM_V7: 728 case CPU_SUBTYPE_ARM_V6: 729 case CPU_SUBTYPE_ARM_V5TEJ: 730 return((cpu_subtype_t)-1); 731 default: 732 return(CPU_SUBTYPE_ARM_XSCALE); 733 } 734 case CPU_SUBTYPE_ARM_V5TEJ: 735 switch((cpusubtype2 & ~CPU_SUBTYPE_MASK)){ 736 case CPU_SUBTYPE_ARM_XSCALE: 737 return((cpu_subtype_t)-1); 738 case CPU_SUBTYPE_ARM_V7: 739 return(CPU_SUBTYPE_ARM_V7); 740 case CPU_SUBTYPE_ARM_V6: 741 return(CPU_SUBTYPE_ARM_V6); 742 default: 743 return(CPU_SUBTYPE_ARM_V5TEJ); 744 } 745 case CPU_SUBTYPE_ARM_V4T: 746 return((cpusubtype2 & ~CPU_SUBTYPE_MASK)); 747 default: 748 return((cpu_subtype_t)-1); 749 } 750 751 default: 752 return((cpu_subtype_t)-1); 753 } 754 return((cpu_subtype_t)-1); /* logically can't get here */ 755} 756 757#ifndef RLD 758/* 759 * cpusubtype_execute() returns TRUE if the exec_cpusubtype can be used for 760 * execution on the host_cpusubtype for the specified cputype (this routine is 761 * used by the dynamic linker and should match the kernel's exec(2) code). If 762 * the exec_cpusubtype can't be run on the host_cpusubtype FALSE is returned 763 * indicating it can't be run on that cpu. This can also return FALSE and 764 * if new cputypes or cpusubtypes are added and an old version of this routine 765 * is used. But if the cpusubtypes are the same they can always be executed 766 * and this routine will return TRUE. And ALL subtypes are always allowed to be 767 * executed on unknown host_cpusubtype's. 768 */ 769__private_extern__ 770enum bool 771cpusubtype_execute( 772cpu_type_t host_cputype, 773cpu_subtype_t host_cpusubtype, /* can NOT be the ALL type */ 774cpu_subtype_t exec_cpusubtype) /* can be the ALL type */ 775{ 776 if((host_cpusubtype & ~CPU_SUBTYPE_MASK) == 777 (exec_cpusubtype & ~CPU_SUBTYPE_MASK)) 778 return(TRUE); 779 780 switch(host_cputype){ 781 case CPU_TYPE_POWERPC: 782 /* 783 * The 970 has 64-bit and altivec instructions 784 * The 7450 and 7400 have altivec instructions 785 * The 601 has Power instructions (can only execute on a 601) 786 * other known subtypes can execute anywhere 787 * unknown hosts will only be allowed to execute the ALL subtype 788 */ 789 switch(host_cpusubtype & ~CPU_SUBTYPE_MASK){ 790 case CPU_SUBTYPE_POWERPC_970: 791 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 792 case CPU_SUBTYPE_POWERPC_970: 793 case CPU_SUBTYPE_POWERPC_7450: 794 case CPU_SUBTYPE_POWERPC_7400: 795 case CPU_SUBTYPE_POWERPC_750: 796 case CPU_SUBTYPE_POWERPC_620: 797 case CPU_SUBTYPE_POWERPC_604e: 798 case CPU_SUBTYPE_POWERPC_604: 799 case CPU_SUBTYPE_POWERPC_603ev: 800 case CPU_SUBTYPE_POWERPC_603e: 801 case CPU_SUBTYPE_POWERPC_603: 802 case CPU_SUBTYPE_POWERPC_602: 803 case CPU_SUBTYPE_POWERPC_ALL: 804 return(TRUE); 805 case CPU_SUBTYPE_POWERPC_601: 806 default: 807 return(FALSE); 808 } 809 break; /* logically can't get here */ 810 811 case CPU_SUBTYPE_POWERPC_7450: 812 case CPU_SUBTYPE_POWERPC_7400: 813 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 814 case CPU_SUBTYPE_POWERPC_7450: 815 case CPU_SUBTYPE_POWERPC_7400: 816 case CPU_SUBTYPE_POWERPC_750: 817 case CPU_SUBTYPE_POWERPC_620: 818 case CPU_SUBTYPE_POWERPC_604e: 819 case CPU_SUBTYPE_POWERPC_604: 820 case CPU_SUBTYPE_POWERPC_603ev: 821 case CPU_SUBTYPE_POWERPC_603e: 822 case CPU_SUBTYPE_POWERPC_603: 823 case CPU_SUBTYPE_POWERPC_602: 824 case CPU_SUBTYPE_POWERPC_ALL: 825 return(TRUE); 826 case CPU_SUBTYPE_POWERPC_970: 827 case CPU_SUBTYPE_POWERPC_601: 828 default: 829 return(FALSE); 830 } 831 break; /* logically can't get here */ 832 833 case CPU_SUBTYPE_POWERPC_750: 834 case CPU_SUBTYPE_POWERPC_620: 835 case CPU_SUBTYPE_POWERPC_604e: 836 case CPU_SUBTYPE_POWERPC_604: 837 case CPU_SUBTYPE_POWERPC_603ev: 838 case CPU_SUBTYPE_POWERPC_603e: 839 case CPU_SUBTYPE_POWERPC_603: 840 case CPU_SUBTYPE_POWERPC_602: 841 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 842 case CPU_SUBTYPE_POWERPC_750: 843 case CPU_SUBTYPE_POWERPC_620: 844 case CPU_SUBTYPE_POWERPC_604e: 845 case CPU_SUBTYPE_POWERPC_604: 846 case CPU_SUBTYPE_POWERPC_603ev: 847 case CPU_SUBTYPE_POWERPC_603e: 848 case CPU_SUBTYPE_POWERPC_603: 849 case CPU_SUBTYPE_POWERPC_602: 850 case CPU_SUBTYPE_POWERPC_ALL: 851 return(TRUE); 852 case CPU_SUBTYPE_POWERPC_970: 853 case CPU_SUBTYPE_POWERPC_7450: 854 case CPU_SUBTYPE_POWERPC_7400: 855 case CPU_SUBTYPE_POWERPC_601: 856 default: 857 return(FALSE); 858 } 859 break; /* logically can't get here */ 860 861 case CPU_SUBTYPE_POWERPC_601: 862 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 863 case CPU_SUBTYPE_POWERPC_750: 864 case CPU_SUBTYPE_POWERPC_620: 865 case CPU_SUBTYPE_POWERPC_604e: 866 case CPU_SUBTYPE_POWERPC_604: 867 case CPU_SUBTYPE_POWERPC_603ev: 868 case CPU_SUBTYPE_POWERPC_603e: 869 case CPU_SUBTYPE_POWERPC_603: 870 case CPU_SUBTYPE_POWERPC_602: 871 case CPU_SUBTYPE_POWERPC_601: 872 case CPU_SUBTYPE_POWERPC_ALL: 873 return(TRUE); 874 case CPU_SUBTYPE_POWERPC_970: 875 case CPU_SUBTYPE_POWERPC_7450: 876 case CPU_SUBTYPE_POWERPC_7400: 877 default: 878 return(FALSE); 879 } 880 break; /* logically can't get here */ 881 882 default: /* unknown host */ 883 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 884 case CPU_SUBTYPE_POWERPC_ALL: 885 return(TRUE); 886 default: 887 return(FALSE); 888 } 889 break; /* logically can't get here */ 890 } 891 break; /* logically can't get here */ 892 893 case CPU_TYPE_I386: 894 /* 895 * On i386 if it is any known subtype it is allowed to execute on 896 * any host (even unknown hosts). And the binary is expected to 897 * have code to avoid instuctions that will not execute on the 898 * host cpu. 899 */ 900 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 901 case CPU_SUBTYPE_I386_ALL: /* same as CPU_SUBTYPE_386 */ 902 case CPU_SUBTYPE_486: 903 case CPU_SUBTYPE_486SX: 904 case CPU_SUBTYPE_586: /* same as CPU_SUBTYPE_PENT */ 905 case CPU_SUBTYPE_PENTPRO: 906 case CPU_SUBTYPE_PENTII_M3: 907 case CPU_SUBTYPE_PENTII_M5: 908 case CPU_SUBTYPE_PENTIUM_4: 909 return(TRUE); 910 default: 911 return(FALSE); 912 } 913 break; /* logically can't get here */ 914 915 case CPU_TYPE_MC680x0: 916 switch(host_cpusubtype & ~CPU_SUBTYPE_MASK){ 917 case CPU_SUBTYPE_MC68040: 918 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 919 case CPU_SUBTYPE_MC68040: 920 case CPU_SUBTYPE_MC680x0_ALL: /* same as CPU_SUBTYPE_MC68030 */ 921 return(TRUE); 922 case CPU_SUBTYPE_MC68030_ONLY: 923 default: 924 return(FALSE); 925 } 926 break; /* logically can't get here */ 927 928 case CPU_SUBTYPE_MC68030: 929 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 930 case CPU_SUBTYPE_MC680x0_ALL: /* same as CPU_SUBTYPE_MC68030 */ 931 case CPU_SUBTYPE_MC68030_ONLY: 932 return(TRUE); 933 case CPU_SUBTYPE_MC68040: 934 default: 935 return(FALSE); 936 } 937 break; /* logically can't get here */ 938 939 default: /* unknown host */ 940 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 941 case CPU_SUBTYPE_MC680x0_ALL: /* same as CPU_SUBTYPE_MC68030 */ 942 return(TRUE); 943 default: 944 return(FALSE); 945 } 946 break; /* logically can't get here */ 947 } 948 break; /* logically can't get here */ 949 950 case CPU_TYPE_MC88000: 951 switch(host_cpusubtype & ~CPU_SUBTYPE_MASK){ 952 case CPU_SUBTYPE_MC88110: 953 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 954 case CPU_SUBTYPE_MC88110: 955 case CPU_SUBTYPE_MC88000_ALL: 956 return(TRUE); 957 default: 958 return(FALSE); 959 } 960 break; /* logically can't get here */ 961 962 default: /* unknown host */ 963 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 964 case CPU_SUBTYPE_MC88000_ALL: 965 return(TRUE); 966 default: 967 return(FALSE); 968 } 969 break; /* logically can't get here */ 970 } 971 break; /* logically can't get here */ 972 973 case CPU_TYPE_HPPA: 974 switch(host_cpusubtype & ~CPU_SUBTYPE_MASK){ 975 case CPU_SUBTYPE_HPPA_7100LC: 976 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 977 case CPU_SUBTYPE_HPPA_ALL: /* same as CPU_SUBTYPE_HPPA_7100 */ 978 case CPU_SUBTYPE_HPPA_7100LC: 979 return(TRUE); 980 default: 981 return(FALSE); 982 } 983 break; /* logically can't get here */ 984 985 case CPU_SUBTYPE_HPPA_7100: 986 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 987 case CPU_SUBTYPE_HPPA_ALL: /* same as CPU_SUBTYPE_HPPA_7100 */ 988 return(TRUE); 989 case CPU_SUBTYPE_HPPA_7100LC: 990 default: 991 return(FALSE); 992 } 993 break; /* logically can't get here */ 994 995 default: /* unknown host */ 996 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 997 case CPU_SUBTYPE_HPPA_ALL: /* same as CPU_SUBTYPE_HPPA_7100 */ 998 return(TRUE); 999 default: 1000 return(FALSE); 1001 } 1002 break; /* logically can't get here */ 1003 } 1004 break; /* logically can't get here */ 1005 1006 case CPU_TYPE_SPARC: 1007 /* 1008 * For Sparc we only have the ALL subtype defined. 1009 */ 1010 switch(exec_cpusubtype & ~CPU_SUBTYPE_MASK){ 1011 case CPU_SUBTYPE_SPARC_ALL: 1012 return(TRUE); 1013 default: 1014 return(FALSE); 1015 } 1016 break; /* logically can't get here */ 1017 1018 case CPU_TYPE_ARM: 1019 switch (host_cpusubtype){ 1020 case CPU_SUBTYPE_ARM_V6: 1021 switch(exec_cpusubtype){ 1022 case CPU_SUBTYPE_ARM_ALL: 1023 case CPU_SUBTYPE_ARM_V4T: 1024 case CPU_SUBTYPE_ARM_V5TEJ: 1025 case CPU_SUBTYPE_ARM_V6: 1026 return(TRUE); 1027 default: 1028 return(FALSE); 1029 } 1030 break; /* logically can't get here */ 1031 1032 case CPU_SUBTYPE_ARM_V5TEJ: 1033 switch(exec_cpusubtype){ 1034 case CPU_SUBTYPE_ARM_ALL: 1035 case CPU_SUBTYPE_ARM_V5TEJ: 1036 case CPU_SUBTYPE_ARM_V4T: 1037 return(TRUE); 1038 default: 1039 return(FALSE); 1040 } 1041 break; /* logically can't get here */ 1042 1043 case CPU_SUBTYPE_ARM_XSCALE: 1044 switch(exec_cpusubtype){ 1045 case CPU_SUBTYPE_ARM_ALL: 1046 case CPU_SUBTYPE_ARM_XSCALE: 1047 case CPU_SUBTYPE_ARM_V4T: 1048 return(TRUE); 1049 default: 1050 return(FALSE); 1051 } 1052 break; /* logically can't get here */ 1053 1054 case CPU_SUBTYPE_ARM_V4T: 1055 switch(exec_cpusubtype){ 1056 case CPU_SUBTYPE_ARM_ALL: 1057 case CPU_SUBTYPE_ARM_V4T: 1058 return(TRUE); 1059 default: 1060 return(FALSE); 1061 } 1062 break; /* logically can't get here */ 1063 1064 default: 1065 switch (exec_cpusubtype){ 1066 case CPU_SUBTYPE_ARM_ALL: 1067 return(TRUE); 1068 default: 1069 return(FALSE); 1070 } 1071 break; /* logically can't get here */ 1072 } 1073 break; /* logically can't get here */ 1074 1075 case CPU_TYPE_VEO: /* not used with the dynamic linker */ 1076 case CPU_TYPE_I860: /* not used with the dynamic linker */ 1077 default: 1078 return(FALSE); 1079 } 1080 return(FALSE); /* logically can't get here */ 1081} 1082#endif /* RLD */ 1083