Compare commits
1321 Commits
1.1.3
...
1.7.0-0.91
| Author | SHA1 | Date | |
|---|---|---|---|
| 0de6c6b8f9 | |||
| 5ffad78b87 | |||
| 542418b1fc | |||
| b95a2fcfab | |||
| 1b11496f26 | |||
| 7c0e624b13 | |||
| 0b66bab992 | |||
| 63ed4e7af0 | |||
| d7cf39883f | |||
| 40f8091fab | |||
| a20e1acf01 | |||
| b3d7bbda56 | |||
| 9a60997ea0 | |||
| 4b66373813 | |||
| b44b11ace7 | |||
| ebc91cea0e | |||
| 58106d791a | |||
| 56b51d4f97 | |||
| bafe540d86 | |||
| d78a0fd05d | |||
| 999bc91b4f | |||
| b3bd2ea9b3 | |||
| d3d9e2400d | |||
| 199407b2a1 | |||
| 5973d66e2d | |||
| d7ef74659b | |||
| ac86affecc | |||
| 2026cf8dad | |||
| 1d135492c3 | |||
| 1cfc5ca71f | |||
| 7ee533d620 | |||
| 28334c7a29 | |||
| 697e9386b3 | |||
| 0e787b731e | |||
| 612f364e6a | |||
| ceee4c379f | |||
| 36c981bc34 | |||
| fd941dad44 | |||
| 5f5b9f94d1 | |||
| 3f3c4acd71 | |||
| 00007dafaa | |||
| cbe2b2149d | |||
| 4cecde3fba | |||
| 8022a2a8c0 | |||
| 3328ce03d9 | |||
| 97b107f61c | |||
| 6f3be17c19 | |||
| dea7d00545 | |||
| 4512778569 | |||
| a7adb266ff | |||
| 2566f4f213 | |||
| ac0081eddd | |||
| d4056acfc3 | |||
| 1910543380 | |||
| 6332903f0d | |||
| 29d27b7c8d | |||
| 7136384384 | |||
| 2fe5c8de2e | |||
| e774e1b984 | |||
| 33b7414615 | |||
| 3c646e2485 | |||
| a5fcc91656 | |||
| d370e9241f | |||
| 3e254c06bf | |||
| 07537cd2e7 | |||
| a37f72da0e | |||
| ab11b168f0 | |||
| eac414d6d8 | |||
| bb725f5f50 | |||
| 5224551782 | |||
| 91146acfe5 | |||
| f64731ab34 | |||
| cd46cbd4b3 | |||
| 39780917af | |||
| 0f8f6d298e | |||
| f8e8b21f04 | |||
| 5c2f9b8239 | |||
| 1afc3d9b70 | |||
| 17a8f68d60 | |||
| 2b9a053504 | |||
| 6441aa1abb | |||
| 9b55b68934 | |||
| 83ef96a739 | |||
| b5337358cf | |||
| 2db3717e57 | |||
| 5395891966 | |||
| c32a5e261b | |||
| c0c80b71ca | |||
| d15a396d5a | |||
| e35ec09da1 | |||
| 5e44c9c9f9 | |||
| 0f6c36870c | |||
| 2ec2112cc5 | |||
| c86a38e18f | |||
| 6aa7b50e26 | |||
| c3c57940ba | |||
| 7aa2d64294 | |||
| 51fe77cdae | |||
| d5aafca1ae | |||
| 54b529c82d | |||
| 232bc9c44b | |||
| f34373d1c0 | |||
| 4698ae166c | |||
| db9ca358f9 | |||
| 16a6a1d08b | |||
| 2e2e973d78 | |||
| c3c0b7197f | |||
| d086100b35 | |||
| 8f74888f87 | |||
| 8e42c2a254 | |||
| caf0f5ef63 | |||
| 3d030391e8 | |||
| 0aeab6b840 | |||
| 367bbda713 | |||
| 0082447043 | |||
| 4f50c90f6e | |||
| 79950e045e | |||
| 6cf7cebb2d | |||
| c9f05f238d | |||
| f1caaa9b74 | |||
| 97cd379ee2 | |||
| 8ee1d61d0f | |||
| 04d17dd3e9 | |||
| 33eef71133 | |||
| c10b4a1c16 | |||
| 8cf70900e7 | |||
| b2618a98f5 | |||
| 01d06cb218 | |||
| c78803ac08 | |||
| 3300e65efc | |||
| d82ac31bc6 | |||
| 4946fbdd82 | |||
| 33cba1ad48 | |||
| 7c69cfaf67 | |||
| b3cbdeec84 | |||
| 1d1ec39a27 | |||
| 0a4e6b49b4 | |||
| bb7e140655 | |||
| 32b32f0c4a | |||
| bf7fd81c1b | |||
| 92d191de9e | |||
| baf68f7e71 | |||
| 26bebb2749 | |||
| 9e2196c9ce | |||
| 93581cb142 | |||
| 67f5a1d4e0 | |||
| edf7b36669 | |||
| 1a204b6674 | |||
| 305511b48f | |||
| 606db376fd | |||
| 5719b4c64a | |||
| 343121c3d0 | |||
| 86c45484e3 | |||
| 767792808a | |||
| 117f070fd6 | |||
| a27909be88 | |||
| cec6f24559 | |||
| b3b8283f87 | |||
| d62f80a7c0 | |||
| 6d584feaef | |||
| e2e015e120 | |||
| 5fb3abe87b | |||
| 37fd9e0cd2 | |||
| 7e748b4ecb | |||
| cafb46efc7 | |||
| 41ea9d16c4 | |||
| 4bbdee395e | |||
| 597baf8445 | |||
| 55faba77a5 | |||
| 6bef773741 | |||
| 7882110e9f | |||
| d1df17ffb7 | |||
| 72af689e69 | |||
| 153d0609de | |||
| 83bbb87a0f | |||
| f00d03445c | |||
| 911b07f507 | |||
| 5b26fe2956 | |||
| 1db00ebc04 | |||
| d5de68e97b | |||
| 1526237bc6 | |||
| b8d96a74ce | |||
| 3c256e1a6c | |||
| 7fc4272b89 | |||
| d052acab1d | |||
| 91ea69cf8f | |||
| 0c63a2a3cd | |||
| a8696d811d | |||
| 569dc33a9c | |||
| 4b252a990f | |||
| adb6cce3ce | |||
| ed21b6849d | |||
| 37605740a4 | |||
| e069694c12 | |||
| dca1cb2625 | |||
| caac060684 | |||
| d330721421 | |||
| 157eeca41a | |||
| 8ba725b225 | |||
| a563d780c1 | |||
| 621533bbd3 | |||
| 37ea770f8c | |||
| edd3ea0103 | |||
| 41d37bcd30 | |||
| 309145587f | |||
| bc06d68d84 | |||
| 18412616e1 | |||
| c371fbf13b | |||
| 1492f16d67 | |||
| fd38ab6fd0 | |||
| f115bae8a7 | |||
| ba80dd8650 | |||
| 06960a41d9 | |||
| 86a2aabb24 | |||
| b4101d9c36 | |||
| ec31d72483 | |||
| 83ade5cdcd | |||
| dec133c1dd | |||
| 04a528ab27 | |||
| 8e4073c2ca | |||
| ff982b8594 | |||
| 299d47abf5 | |||
| f2460695c4 | |||
| 6ce5c754f3 | |||
| e932f2e70c | |||
| bb08742467 | |||
| 3e9fdfc0f1 | |||
| 58f4593478 | |||
| de0e07f29e | |||
| a4b83dc6d4 | |||
| beac6c3e80 | |||
| 5d6715078f | |||
| 0615a0b00b | |||
| 51cd7cbb6c | |||
| 0c1cae45fe | |||
| 11ef2f8092 | |||
| 12aef0b578 | |||
| 9b3450ee7e | |||
| 0d3ef65092 | |||
| 258156b57e | |||
| 8efced7bf7 | |||
| 2dd8687974 | |||
| f0bc1a6b07 | |||
| c52370b959 | |||
| 9c78d4d249 | |||
| b6285c9aa9 | |||
| b945367c90 | |||
| 0f434288e1 | |||
| b5cd813229 | |||
| 7268942c35 | |||
| f8cad24a9a | |||
| 2b6b3f31e5 | |||
| ca19ee434a | |||
| bb2589bac4 | |||
| e1c6e17400 | |||
| 207eba93ea | |||
| 06af2d62c6 | |||
| 3e267e24cb | |||
| e58e1c6e33 | |||
| fb924ebb9d | |||
| ac61577414 | |||
| 4cee9b1a27 | |||
| b55e164669 | |||
| aa66fe2cb1 | |||
| 3b74b0a093 | |||
| 0267a0c8ea | |||
| b3b7801d51 | |||
| 10f1fe76db | |||
| 089b443aaf | |||
| e9955a4bba | |||
| dc52c8a11a | |||
| bc4629dfb0 | |||
| 99fba2df1c | |||
| 239c95449b | |||
| 9dfc139eae | |||
| bc81d362b4 | |||
| 90b6aec53d | |||
| 0887e0de6d | |||
| 2c5c47344d | |||
| b9f223ceca | |||
| 6297181dcd | |||
| 80f964e44f | |||
| cc07d6e017 | |||
| 07c517828d | |||
| 75e42badf4 | |||
| bdccbf7356 | |||
| ad3ee26d36 | |||
| 16f8ccb35b | |||
| 3fda54ece8 | |||
| 4d252c2bb2 | |||
| 0cf89c5682 | |||
| 0d902872a1 | |||
| 9b6a88eeeb | |||
| 96b4729cd5 | |||
| 3372bbfd23 | |||
| f17c30da07 | |||
| 9a0eb915fb | |||
| a5ded1fc06 | |||
| de042b2cb2 | |||
| 2cee82673b | |||
| dfb3bef96d | |||
| 2dc51530f3 | |||
| 13758417c5 | |||
| c32edff2bb | |||
| 8356ef6c96 | |||
| 63d500515a | |||
| 791e8c2114 | |||
| 0bb612caea | |||
| 5e992bc195 | |||
| 08f817a654 | |||
| b87ac8b8c0 | |||
| a48a2cd3e8 | |||
| 7c238c27c9 | |||
| de77d2b061 | |||
| 52f89cf8fa | |||
| c96dfb0c68 | |||
| 21c9e57646 | |||
| 312b6c171b | |||
| 2ce695b47b | |||
| e5c1fdf129 | |||
| 9e3dd53c58 | |||
| fe53c6e0a5 | |||
| e988bfaf50 | |||
| f6f48b1210 | |||
| 70b42fde5d | |||
| ccb36a5849 | |||
| ea7f517e3d | |||
| ac18a24a27 | |||
| 8880710fad | |||
| 03a85825ed | |||
| 940eeca6f5 | |||
| 19b02cf4ed | |||
| 76a0cc71fc | |||
| ab39798181 | |||
| 0cc3496747 | |||
| 10cca81401 | |||
| 0c79de67b4 | |||
| 3fbad79afb | |||
| 1b76aaa7e1 | |||
| aa3c5e91db | |||
| 20d5900c35 | |||
| 414cffd95b | |||
| 9ec0aeeab5 | |||
| 06e96005a6 | |||
| 4606714c07 | |||
| a5d5baf8a8 | |||
| 8074445d59 | |||
| 6a456f11aa | |||
| 81e665cb48 | |||
| e0b9c5deec | |||
| 62772c8a24 | |||
| 63d15f7dfc | |||
| fb3f1c58a8 | |||
| 69846345de | |||
| b8155cc618 | |||
| f07e20a381 | |||
| 764948b51f | |||
| 7da5fede8b | |||
| 6810506c3d | |||
| c82c2c1231 | |||
| 5bc54a3bbe | |||
| 07aa96ef95 | |||
| dac99f708c | |||
| f3c9fbf4ea | |||
| 54122360e8 | |||
| 21cf953a03 | |||
| c59d8db1b3 | |||
| abc0a7bdac | |||
| 2f456b8752 | |||
| 2a63c962fc | |||
| 4bdd9cf512 | |||
| bc2a444828 | |||
| d9b2924249 | |||
| 501531f3b3 | |||
| 366e95856c | |||
| bdf5175d4c | |||
| b174fb8099 | |||
| e828398c8b | |||
| 641d9f1b39 | |||
| c1270cdf6d | |||
| 022e04b62b | |||
| 9cfc373538 | |||
| fb24dcea2e | |||
| 207d653b41 | |||
| 0a49b6eca5 | |||
| 950ea678dd | |||
| cd42d186b7 | |||
| 66bc44f88a | |||
| 34a995d290 | |||
| d0d99adfb3 | |||
| d78883c692 | |||
| ff0395581c | |||
| f5023c9730 | |||
| fe08ac4a67 | |||
| 60dcd0e798 | |||
| 4d215de641 | |||
| 97e0219f50 | |||
| f9d8d98af1 | |||
| 3738b70ad3 | |||
| 9bf225d193 | |||
| 6fc9ec1c92 | |||
| 112ade484a | |||
| be708674d3 | |||
| 557f33a705 | |||
| 7dd0cbd9a6 | |||
| 6ed2e5ffc1 | |||
| 649059f2d2 | |||
| 312c1168f3 | |||
| d29419d336 | |||
| 9f7425c152 | |||
| 100754f556 | |||
| 6d38c34993 | |||
| 7f1c17fc4c | |||
| 25ef4e9261 | |||
| d4d78e9c61 | |||
| e52d748744 | |||
| 39b21e7ba9 | |||
| 8db2d3beec | |||
| f5320fc2b4 | |||
| 0fbdcc44b9 | |||
| 351fdead3b | |||
| 859e976348 | |||
| 49353e252b | |||
| 452d93f14d | |||
| 9e5472bb94 | |||
| 516ab87ab9 | |||
| a9884453e2 | |||
| 0f01312040 | |||
| fb9832af6d | |||
| 0e895478a1 | |||
| 19659aa908 | |||
| e5de0b81ca | |||
| f299fff266 | |||
| 206df33658 | |||
| ad8a3ae962 | |||
| 3c1fd54a92 | |||
| ca34154a43 | |||
| a10f4b861c | |||
| 36d473c5b5 | |||
| 342a2e1287 | |||
| 238f563e88 | |||
| 03cadbcba2 | |||
| 2b254f02f8 | |||
| 960a6f5f90 | |||
| 0cc3120a01 | |||
| 9f31abf402 | |||
| dfd23c3ebe | |||
| eb184419ea | |||
| 13e29c0da5 | |||
| 8aaf0f8551 | |||
| ef9fda23a9 | |||
| cd5cb469eb | |||
| 7a8f5043c5 | |||
| cf6514def9 | |||
| 96b6d773a9 | |||
| 4ba4bbd711 | |||
| 410bf13367 | |||
| 7c231928ab | |||
| 50de3820ad | |||
| c4e5bf6d6b | |||
| c319fe08a4 | |||
| 24d3da32ed | |||
| c4fbbb6027 | |||
| 0449437c15 | |||
| 639d0e496b | |||
| b6de164e9a | |||
| d1b36aab62 | |||
| 8a2f4be443 | |||
| 8a684587fa | |||
| 05c315857c | |||
| 1422838dd1 | |||
| c9fc110fc6 | |||
| ed3c138e1f | |||
| 60c97d0e60 | |||
| 95e90c727e | |||
| ec844bb6e3 | |||
| a11d4d7a9d | |||
| 0ee446923a | |||
| 01b2a1d213 | |||
| 52cd57fed2 | |||
| bbc39480d2 | |||
| 8521b98730 | |||
| da02f76a25 | |||
| dbe5e99cf9 | |||
| 6b293409e5 | |||
| b94247c478 | |||
| 556a64ac5e | |||
| 3f11c1aee5 | |||
| de70eac619 | |||
| 2ba3ec8a4c | |||
| 394a1ef3c5 | |||
| 1954aec0ea | |||
| 2b1b82b242 | |||
| 502463ed9e | |||
| 715f67f32f | |||
| 82a57d5f55 | |||
| 56abe988f3 | |||
| 68c581f721 | |||
| 6ca5aaa1fc | |||
| b2a58ce3e3 | |||
| cfcf0137eb | |||
| 00395d68d4 | |||
| dc1f96fee3 | |||
| c585a37440 | |||
| 98aa633856 | |||
| ddde519263 | |||
| f240671fc8 | |||
| cf113d392a | |||
| 9e57db5427 | |||
| 739472bd86 | |||
| 136b749349 | |||
| ae9a1f39df | |||
| 10dc87dd3f | |||
| 724e0eb7d0 | |||
| 04e0456232 | |||
| 6626204c99 | |||
| 190039f5d9 | |||
| 583cb94667 | |||
| db4d19e419 | |||
| 04c11f35e9 | |||
| e12d5ed341 | |||
| 1253f4d18c | |||
| 527adedaa3 | |||
| 525b90d028 | |||
| 38e68f358a | |||
| 7a3f4d7501 | |||
| 1a5b10277f | |||
| a59c55c188 | |||
| 1d6a078afa | |||
| fb98664f49 | |||
| 9db8d115d9 | |||
| e26e693e58 | |||
| fc2775c932 | |||
| 6581f9b4b2 | |||
| 3a90521489 | |||
| 03802052ed | |||
| c21485d427 | |||
| 18d50e48dc | |||
| a2be475ae4 | |||
| 38f683d1d0 | |||
| 59828db5c9 | |||
| 1a3c73468f | |||
| 85c936a6cb | |||
| 6f9fef2b13 | |||
| cc1d39e55d | |||
| fd8bed670e | |||
| 24a3b236a0 | |||
| 27e55b8cf1 | |||
| 70e52faf36 | |||
| 8db36c3828 | |||
| 06dd71a7e0 | |||
| 01fe83dcb3 | |||
| c86d168165 | |||
| a032dc3d1b | |||
| 201fa7fb55 | |||
| dd676f7149 | |||
| a751e96b1a | |||
| c3bfa3f6a9 | |||
| 1e1fa4f70d | |||
| 39f9d7fdff | |||
| 3e3ccf377c | |||
| 13e71ac9dc | |||
| b1681f4a3a | |||
| 1226e692d9 | |||
| 73ea4b1ce9 | |||
| 09f663c246 | |||
| 9b77630c8b | |||
| b70d470e20 | |||
| ecc850dfef | |||
| b11377f2e9 | |||
| ed1edb152b | |||
| 28c434a230 | |||
| daa234d8b9 | |||
| e803698618 | |||
| c862b29d65 | |||
| dd58d366c3 | |||
| ab284b0531 | |||
| 42b9b31606 | |||
| 29c5c68761 | |||
| 38c08a6663 | |||
| 57258e7f59 | |||
| 8c33c92720 | |||
| a269d96978 | |||
| 2910818f06 | |||
| 3df82d61ce | |||
| 159092c58e | |||
| 60011718d2 | |||
| 7e342751a2 | |||
| c23bc8d401 | |||
| 5e760db417 | |||
| e4da71010c | |||
| c25fb2aa39 | |||
| b51886421e | |||
| 22c6c5c736 | |||
| cd00fc3a78 | |||
| 00a34a8ba3 | |||
| 8900c2cec5 | |||
| fca02ee248 | |||
| 781a69617b | |||
| 04d4145b3e | |||
| 96aab7e215 | |||
| 98ee584ab6 | |||
| 6b031c5472 | |||
| e42c414454 | |||
| e613483bee | |||
| c0271f4727 | |||
| 4969762f15 | |||
| 09d3648e43 | |||
| 4e905cd412 | |||
| 8c11daf726 | |||
| 5cb8a1f10f | |||
| dbba7dea18 | |||
| b6ab5911b7 | |||
| b0d7f890d0 | |||
| b9c0cdddab | |||
| 7ee7dd5e2c | |||
| 07db4a80a7 | |||
| f04e5c24ab | |||
| b8bacdd2de | |||
| a121ffc785 | |||
| 88f9693390 | |||
| 124ec580a0 | |||
| af7f61db49 | |||
| ee299b5780 | |||
| c60a778c8d | |||
| 25a129ea6a | |||
| 8e9924c523 | |||
| c71291a429 | |||
| ba93b83d68 | |||
| c2f41ca9ad | |||
| 062d7ecae3 | |||
| 58d038fcac | |||
| 510310342c | |||
| a6198f267b | |||
| 5e78bd85ab | |||
| 85c0c8a01f | |||
| e29f579061 | |||
| 63703589e5 | |||
| 5c8c1986b5 | |||
| e4370d235c | |||
| 31ac007cb5 | |||
| 56da7e2de9 | |||
| 35300e7b4f | |||
| 439dc0928b | |||
| 4b3e58fd3d | |||
| b7cdbd6c42 | |||
| 77f5cac2bf | |||
| 9102b176c4 | |||
| bb4317beaf | |||
| d24b7585b7 | |||
| 4438f994dc | |||
| 52afbbbc98 | |||
| 460917c4a0 | |||
| 7803468afe | |||
| 8f2c7d2265 | |||
| c6c3a84a46 | |||
| 5a7ca14fcc | |||
| d7b882855a | |||
| 2337832e4c | |||
| be635ceb19 | |||
| 0b0b7b03d7 | |||
| 82914c6a2e | |||
| f127dfdf1e | |||
| 567dcd3846 | |||
| b080e0f301 | |||
| ff383d96ba | |||
| 0bcd3d5de3 | |||
| 9d6e0319f7 | |||
| 0e50eb44a9 | |||
| 2db69d0f24 | |||
| a697f5e98d | |||
| 4439b04d9f | |||
| 38c3b2358a | |||
| 221ce34da2 | |||
| 4246d41007 | |||
| 65df9c8084 | |||
| 7836aa0136 | |||
| 1cf7fad15a | |||
| 0076e1f5e0 | |||
| cae6b9f154 | |||
| 5fcbfa2eb5 | |||
| 9a20cfaefb | |||
| f57b0c5d4f | |||
| 0fdeb254b3 | |||
| 895a8c4099 | |||
| e531ee626e | |||
| 94d093f058 | |||
| 9b8424523a | |||
| ebc702624b | |||
| ea125cb58c | |||
| 689a799bb9 | |||
| 802b1ac14b | |||
| affe3e9010 | |||
| 0b2169964a | |||
| f18d1f5383 | |||
| ea35954613 | |||
| 61a942acdc | |||
| c4b4b7222e | |||
| 21af0351d1 | |||
| 1e1c91962e | |||
| b1aa94d417 | |||
| a6a9bac5b7 | |||
| 240a23a21b | |||
| d5108dba80 | |||
| 20368dd317 | |||
| b93e14f695 | |||
| 3e3f3c5590 | |||
| e8f8660b73 | |||
| 794684985f | |||
| 625607e6db | |||
| 05afa8b6dd | |||
| 6cf89076dc | |||
| 29a658716b | |||
| a7c9988aeb | |||
| d4fa953975 | |||
| 786649d2a3 | |||
| d7416c6f79 | |||
| cb1522ca92 | |||
| 14660a10c3 | |||
| 1387c9687b | |||
| ec99adde4a | |||
| c716e87c53 | |||
| d898f18293 | |||
| bc0759e2dc | |||
| 1aa429d4f5 | |||
| 1543119139 | |||
| 0a0a78ac2e | |||
| 6999d0a3f9 | |||
| f01a883971 | |||
| 3185334c1c | |||
| bc887aab44 | |||
| 6f7c428a34 | |||
| 68c702d024 | |||
| 97273adcc5 | |||
| ad2cb6375a | |||
| 6df4bd8f8c | |||
| 0994c3300e | |||
| a5c3e48843 | |||
| df2c993721 | |||
| dc8d6b740c | |||
| c2e1b8d694 | |||
| f6d8138e05 | |||
| 9d587dcbe8 | |||
| eb675818c7 | |||
| 3ce7763715 | |||
| fd429ecc5b | |||
| ed7f5abc28 | |||
| 79e5026f01 | |||
| a1b50051ed | |||
| 9a79920ef9 | |||
| 141fa5120e | |||
| 699cb4f88c | |||
| bc3e6ded65 | |||
| eae5c40f60 | |||
| 0c7384f980 | |||
| 67ebcca74d | |||
| 3d365b0d7a | |||
| 94e96927a6 | |||
| 3636c8e7e4 | |||
| b920da5103 | |||
| f1a40a409f | |||
| 4ce4c9f264 | |||
| e770a22fa5 | |||
| 9bb8076dc0 | |||
| 229b041320 | |||
| e1f204de4a | |||
| c6cc0bf07a | |||
| 04e54ead5d | |||
| 992705d465 | |||
| ae09d979b6 | |||
| 1cbe389879 | |||
| 0758f6254e | |||
| db732a245c | |||
| 08f2840f7d | |||
| 521bdc6181 | |||
| e7b6a3472b | |||
| 11756d96ef | |||
| f185be06eb | |||
| 854bc85602 | |||
| ab8fe0bbbf | |||
| b87c06cbcb | |||
| b939ca9370 | |||
| ec202a1ca9 | |||
| d4471df94e | |||
| a6ac4acf40 | |||
| 8ff754c466 | |||
| 90dba00742 | |||
| 86ae1380e4 | |||
| 9bb48186e6 | |||
| 139123dc12 | |||
| 6602cf442c | |||
| f148863586 | |||
| ec375da27a | |||
| c50e7c1029 | |||
| 5f4dbb2c71 | |||
| 328609269b | |||
| 056fdb2633 | |||
| 09d0a59e22 | |||
| 511555c8cb | |||
| 81699345cc | |||
| 130751ff66 | |||
| f3d18eb9de | |||
| 249bda4aef | |||
| aaa246f86f | |||
| c52f7a5b49 | |||
| 90a34f54c9 | |||
| bfb5080b71 | |||
| 641dfed37e | |||
| 4572e6be3f | |||
| 12e44050c9 | |||
| d5190990f5 | |||
| 82822b1f16 | |||
| 7f02889f76 | |||
| 9dc86869d8 | |||
| 02bb127007 | |||
| c26c4aba4f | |||
| e8d8ad60c2 | |||
| a7f645f7df | |||
| 73731d2a0d | |||
| 0f049c5ed7 | |||
| 8d5f95de04 | |||
| 88fca2c0df | |||
| 81d18e35dd | |||
| 309da8fc53 | |||
| 535e3f3af6 | |||
| 4c80dca479 | |||
| 7bef1f5117 | |||
| bb8c8355c2 | |||
| fab0641813 | |||
| ce3af4734a | |||
| e2dea4e9f8 | |||
| 0d9c1df75a | |||
| 6a979cf4b8 | |||
| c107d1fdf9 | |||
| bc89a51e00 | |||
| 9da9e755fa | |||
| fe42481d6f | |||
| b1ea6eb82a | |||
| 8c2e20c3aa | |||
| 65667709a8 | |||
| 51bc5fd61f | |||
| 3b277b2354 | |||
| 3e4c9bdd90 | |||
| 06b1b4f8ab | |||
| 7b4de6e6c2 | |||
| 1c266f4849 | |||
| b7a7281195 | |||
| b77732fb4f | |||
| a224bf648a | |||
| 642520f80c | |||
| 5cb75b00c7 | |||
| 7dd0d1137f | |||
| cb2fe29f06 | |||
| 3432f46d8b | |||
| afcf1a24aa | |||
| 140f813d77 | |||
| 7ad6f9595c | |||
| 1796c20b88 | |||
| 0da5b76916 | |||
| 4ac1efae6c | |||
| 523a066245 | |||
| 98df469d29 | |||
| f46287a711 | |||
| c260b5c6f3 | |||
| c9157f273f | |||
| 840acd6021 | |||
| c949a894c6 | |||
| 228f8f8533 | |||
| 8ee9eca74e | |||
| 748429fc92 | |||
| a9dfcd9a89 | |||
| 559fc9746c | |||
| 54169bc3ea | |||
| 142e923222 | |||
| 86efc86945 | |||
| ebaafa95d8 | |||
| b8ee144e67 | |||
| 722ae0e7d5 | |||
| f56e087208 | |||
| f55f01cc11 | |||
| 1fa398cfab | |||
| 8123cc413e | |||
| d4459cf9f3 | |||
| 4bb65494e9 | |||
| 2f2b3cdc6f | |||
| 1e9f9d9809 | |||
| 1b25379c02 | |||
| 38bbb4e390 | |||
| 0fa88f513f | |||
| cd54c5983a | |||
| 6084faeecd | |||
| d209c00a30 | |||
| 9a5d5feb9c | |||
| 0cda763f95 | |||
| cc7be46b7d | |||
| 589504dc33 | |||
| bf2f38051b | |||
| 2d2d0af6fb | |||
| 7f47dc78a1 | |||
| c3c9187ed5 | |||
| aebacb243e | |||
| 5a8d1f09e8 | |||
| 0e10b6d1ee | |||
| d649d6fc2d | |||
| bad487cc07 | |||
| 3b6056fb1a | |||
| 5cc738d6bd | |||
| c9fa445f54 | |||
| d273a2f58b | |||
| 4e7069d499 | |||
| 66f44e77af | |||
| 35f908b75c | |||
| 2f0089dfb9 | |||
| 2af6d5115a | |||
| ac25c5e1e7 | |||
| 90c0355d90 | |||
| 43230eb623 | |||
| f18dc8428d | |||
| ab53c8e0a4 | |||
| 6c33e236d7 | |||
| 85d36f1469 | |||
| 0ecf31d896 | |||
| 08a625cc0d | |||
| 12840601e1 | |||
| 2ae6883a8b | |||
| d5629606c5 | |||
| 285059e504 | |||
| 5b6d0a887c | |||
| 3573b8649e | |||
| d7523cdd84 | |||
| 5753db5846 | |||
| 2d7cb0af89 | |||
| 1cb9b435a9 | |||
| 43ecf06e83 | |||
| 51982de36b | |||
| 0a22320a3c | |||
| 8813e890c5 | |||
| e664ffba18 | |||
| 3bd0137c25 | |||
| 4f2b4aa402 | |||
| 682cd34b74 | |||
| 2bc4d06a48 | |||
| 4f2c1e07c1 | |||
| 77bb3038d3 | |||
| 931448a94d | |||
| c51bbbabc6 | |||
| 2ddc52e1a4 | |||
| 3c93958c48 | |||
| 9763c40f64 | |||
| 3bf77446cc | |||
| c3dfb1663d | |||
| 217dd9c1e5 | |||
| d4cd756a91 | |||
| b894619d1b | |||
| b962da700b | |||
| 196379854b | |||
| d213efac79 | |||
| 38910fe13d | |||
| 4d4279121b | |||
| 99da5b6484 | |||
| 6b60dee890 | |||
| dd08a3151e | |||
| e1442bf12b | |||
| 86f297ddc4 | |||
| 823b222af9 | |||
| 9c25eb8ef2 | |||
| 665eead78b | |||
| f8ef43c77d | |||
| 8f4afe410f | |||
| da9bb421cc | |||
| 1e89796d3e | |||
| a1a2900606 | |||
| 79b977ac06 | |||
| 37e3118df6 | |||
| be4d84c0c1 | |||
| c43c1b640a | |||
| e294db7e53 | |||
| df3f388e09 | |||
| a2fbe99b60 | |||
| 9c847c0a8f | |||
| 58c1fd4512 | |||
| dae9a5ff13 | |||
| 4d9a1628f2 | |||
| 47b4bd5aba | |||
| ea831c614e | |||
| 5b51eb80a3 | |||
| daa7526127 | |||
| a1af7edd6e | |||
| c5d71c325d | |||
| aa7cb970c4 | |||
| 5664125e57 | |||
| 203bfc2492 | |||
| 973d8ddd2c | |||
| a491e49bbc | |||
| 2f9af42b2e | |||
| b3613e2535 | |||
| 2a46fd0b2d | |||
| 230272438f | |||
| 99a45f20c2 | |||
| 43db8e2d65 | |||
| 4eed36f124 | |||
| cdfa4015b7 | |||
| a05b6e1ba8 | |||
| 325082a571 | |||
| 0278a876db | |||
| 707b245009 | |||
| 8cc264d794 | |||
| 9a550b310c | |||
| 9989f41fd3 | |||
| 704096b139 | |||
| 99ca46663b | |||
| 90fbfd6f7d | |||
| f4c32e5507 | |||
| 4b3f220659 | |||
| 82a0f155d8 | |||
| a2b8235e83 | |||
| b53fb5f5cb | |||
| 236a072311 | |||
| 74f15783d2 | |||
| 184c2d311c | |||
| 75e2bb7793 | |||
| 6d4d6440aa | |||
| 9194742de8 | |||
| 831a0637a1 | |||
| ac432504a7 | |||
| b39fec1104 | |||
| 86dedc32fa | |||
| effde241b9 | |||
| 101cab5b0a | |||
| 4cd1c120fa | |||
| bf5ac7afc8 | |||
| bc423255d9 | |||
| 6714161c25 | |||
| 992a292c08 | |||
| 64c2e437c6 | |||
| dd9675d65e | |||
| 51ed8dce06 | |||
| 01f5e46865 | |||
| 38961fca78 | |||
| 2d7890731e | |||
| 7d181fccd9 | |||
| bd75e80df2 | |||
| 035e7913d8 | |||
| 7d38c7c147 | |||
| a801bcc591 | |||
| d7b8e7f4f4 | |||
| 6afea4af48 | |||
| 6415dcfdcc | |||
| 0f58e9e77d | |||
| 72e3f5ee50 | |||
| 8d57ad9bc4 | |||
| 35b36c2d33 | |||
| 632611d78c | |||
| d48d44d365 | |||
| 4c0f401424 | |||
| 06f824c829 | |||
| 7a606baad4 | |||
| 4c6c66555e | |||
| 8426cf589a | |||
| da7421e8ee | |||
| 209748d913 | |||
| f81722c63b | |||
| 2189c55d99 | |||
| 201a7e2595 | |||
| 5cdd194856 | |||
| 0061adadfb | |||
| 67843151d3 | |||
| 083cf3fcc9 | |||
| 4236323661 | |||
| 5a9bee55c9 | |||
| 6e23b07b20 | |||
| e64bd49d9e | |||
| 72b8f99d3b | |||
| 090937a5a3 | |||
| 2082acdf0d | |||
| a8f11634e6 | |||
| 4f9865cc8f | |||
| 07efb3ab9a | |||
| 2afc9d37d1 | |||
| fa6f20a3c4 | |||
| 52bc052e1a | |||
| f84415c310 | |||
| 1a853e07d7 | |||
| 07b0954610 | |||
| 1f006b2381 | |||
| 4dfd806aa7 | |||
| c6e3185246 | |||
| d9e6ff235d | |||
| b03f69783a | |||
| ab915f3331 | |||
| 7773c4aef6 | |||
| 58e531eb58 | |||
| 9beef7d901 | |||
| 0733592eb5 | |||
| 4d0e0728f4 | |||
| 66fad4c7a4 | |||
| 5758dba7cf | |||
| 1ca16b9693 | |||
| d29922c820 | |||
| 46b48ac59b | |||
| 446ef0465b | |||
| 200fe9aec4 | |||
| fedba28a93 | |||
| b527503937 | |||
| 6bdafbd33b | |||
| 12e7ed644f | |||
| edf059888d | |||
| a66fb96cd9 | |||
| dd2ef89997 | |||
| ba7edf1981 | |||
| a669fc5125 | |||
| c0cabc2d83 | |||
| e306b1e838 | |||
| 0c3b705f98 | |||
| 9f55263528 | |||
| 74c5f61fd5 | |||
| cadb66e5c1 | |||
| 9b5ccb5a33 | |||
| c5079898c2 | |||
| 746b459e7f | |||
| 4c42086154 | |||
| 56ee0787c9 | |||
| e901d42fb6 | |||
| 29ab087fa2 | |||
| 105d373765 | |||
| 0dd2fad33b | |||
| e554f4e2f9 | |||
| a256280118 | |||
| d75be7228b | |||
| 923dc4aa11 | |||
| e3e0f6a174 | |||
| dd6f721e03 | |||
| 9c25d47d9b | |||
| 5a4148aaaf | |||
| 32c8f6192d | |||
| e2f424846c | |||
| 989af7e045 | |||
| 721cee05a2 | |||
| 86aa76e088 | |||
| ab113658f1 | |||
| 2d72042021 | |||
| 610463ff39 | |||
| dfb0a37305 | |||
| 26b9484bae | |||
| b4aecfd43c | |||
| bf036f19f7 | |||
| 182202523e | |||
| afb7cb3a1e | |||
| fdbdcbd0ee | |||
| a18fd1f45c | |||
| d8170e292c | |||
| fee5234c54 | |||
| 6309095fd2 | |||
| b005adc103 | |||
| 21373338cc | |||
| 39352cd364 | |||
| 84025cc9cb | |||
| 04cbfbb025 | |||
| ba58054c9d | |||
| 7fd55dc83f | |||
| d66af42f7b | |||
| 4b964b8e0d | |||
| 65dc3440cb | |||
| fbd9086ce5 | |||
| c2b1d8e3ef | |||
| e2d59e2cb9 | |||
| 3de0f5ea19 | |||
| 373e9ea63c | |||
| 8daffa939e | |||
| eaa4d35fab | |||
| a968c935b5 | |||
| e01f6dd6ea | |||
| a07d802cbe | |||
| 1e442cce10 | |||
| 3f870b69a6 | |||
| 0fef80cb19 | |||
| 9992fe0d72 | |||
| 2d19ed9391 | |||
| 2f2f04d5a1 | |||
| 1541b26086 | |||
| e6c4d7731d | |||
| 94b527e027 | |||
| 8c9b207557 | |||
| dacb05844b | |||
| c3ec5d20ca | |||
| 92a40f92dd | |||
| 45bddf3caa | |||
| b7671fedd3 | |||
| c38d536aaa | |||
| 4ee0c05e08 | |||
| f2ab0193e5 | |||
| ef910fdf0e | |||
| b97a8c5138 | |||
| 034d10b185 | |||
| 3fe2257929 | |||
| eca4018ecb | |||
| e936b2ebe1 | |||
| d8112f92f8 | |||
| 1076010de4 | |||
| da4a5ec44b | |||
| d35aa9b100 | |||
| ba8dbf1b19 | |||
| 6213f0e488 | |||
| 4ef82c2683 | |||
| e066a8798c | |||
| b702c9691e | |||
| addbe91e59 | |||
| b812848a0e | |||
| ad214c8206 | |||
| 1bc3218fc1 | |||
| 5cc420a6c3 | |||
| c7686fdf4e | |||
| c1dae4d8b0 | |||
| 2473025201 | |||
| fa5c1b23ca | |||
| f2f499aace | |||
| bd47b909bf | |||
| d646c2a4b9 | |||
| 865ada46bf | |||
| cdffc5e853 | |||
| 0e67e9266b | |||
| 1ff0afe6fb | |||
| d34884f9a4 | |||
| 7a0c204dc1 | |||
| 25f67c9ef8 | |||
| a776464a7e | |||
| c40e7105e6 | |||
| 5bac38ce8b | |||
| e3f0662130 | |||
| 21df56b233 | |||
| 393cec513c | |||
| 4437ecc69a | |||
| 40d75baca2 | |||
| 00f3fe0840 | |||
| 47a8b5bda5 | |||
| ec75095073 | |||
| 1794232989 | |||
| 40978d162e | |||
| 536ce9f927 | |||
| 4e5ec74ffe | |||
| a6d8125fd7 | |||
| 15d3a0361e | |||
| 6ad84a96a3 | |||
| 16e846e9b6 | |||
| 5bc7185f07 | |||
| 32462dfb2d | |||
| e3ef88c0cf | |||
| 829aae7b8d | |||
| b836b84825 | |||
| 3e1f154412 | |||
| e7af537452 | |||
| 3565959af7 | |||
| 4667136a4c | |||
| 972d14611a | |||
| e90eef8910 | |||
| f81927b85b | |||
| 701cdcdab1 | |||
| 9635a628a9 | |||
| 3e1b16f3fc | |||
| ff37ff9ccf | |||
| 5b7bcb7170 | |||
| 6a5fe90f98 | |||
| 91373337ba | |||
| 56ed726a88 | |||
| bce10e11e4 | |||
| 91cdb16158 | |||
| c58ab0f648 | |||
| f410af1cfc | |||
| aa15e5eea8 | |||
| df9f1f8f78 | |||
| 7ace35d737 | |||
| 551999ff6b | |||
| 052b3f44ca | |||
| fdcf766337 | |||
| 7d13bfb14e | |||
| 202bfd9955 | |||
| c99e36235b | |||
| 3cecafac59 | |||
| 61fc4c5e55 | |||
| fad73cacc1 | |||
| 8fced29978 | |||
| b0f4ae4890 | |||
| 7070094a31 | |||
| 011185e3f7 | |||
| 461881e46a | |||
| ddc33821cf | |||
| 0ab7d02994 | |||
| a8c4ab221b | |||
| 87d36a7752 | |||
| 998ded414c | |||
| f78d031e64 | |||
| 4ab37dd34a | |||
| 8129dec2f7 | |||
| a1035a1878 | |||
| db169c5f90 | |||
| bbb55ef261 | |||
| 1130cafe41 | |||
| a1cf27e232 | |||
| 5a1ce99d87 | |||
| c7db296e1b | |||
| f634a750c5 | |||
| d07a196c8e | |||
| 8c56c75d2c | |||
| e54895efde | |||
| 2f8cca2d6d | |||
| 64607152ee | |||
| 20383ad3d0 | |||
| 787d34f650 | |||
| ae618a0c68 | |||
| f480376153 | |||
| e4b3a88fc6 | |||
| 69a5c53074 | |||
| 259583e936 | |||
| 0f826290d0 | |||
| e46f027894 | |||
| 3e093f6a40 | |||
| 00996b551f | |||
| 24d8697cef | |||
| be4f6741f9 | |||
| 7a2f67f5f0 | |||
| bba0425267 |
40
.gitignore
vendored
40
.gitignore
vendored
@ -1,3 +1,4 @@
|
||||
*~
|
||||
*.o
|
||||
*.elf
|
||||
*.bin
|
||||
@ -8,9 +9,36 @@
|
||||
Module.symvers
|
||||
*.order
|
||||
.tmp_versions
|
||||
|
||||
elfboot/elfboot
|
||||
elfboot/elfboot_test
|
||||
linux/executer/mcexec
|
||||
linux/mod_test*
|
||||
linux/target
|
||||
old_timestamp
|
||||
CMakeFiles
|
||||
CMakeCache.txt
|
||||
Makefile
|
||||
!test/*/*/Makefile
|
||||
!test/signalonfork+wait/Makefile
|
||||
!test/perf_overflow/Makefile
|
||||
!test/*/*/*.cmd
|
||||
Kbuild
|
||||
cmake_install.cmake
|
||||
config.h
|
||||
mcstop+release.sh
|
||||
mcreboot.sh
|
||||
mcreboot.1
|
||||
mcoverlay-destroy.sh
|
||||
mcoverlay-create.sh
|
||||
kernel/mckernel.img
|
||||
kernel/include/swapfmt.h
|
||||
executer/user/vmcore2mckdump
|
||||
executer/user/ql_talker
|
||||
executer/user/mcexec.1
|
||||
executer/user/mcexec
|
||||
executer/user/libsched_yield.so.1.0.0
|
||||
executer/user/libsched_yield.so
|
||||
executer/user/libmcexec.a
|
||||
executer/user/libldump2mcdump.so
|
||||
executer/user/eclair
|
||||
tools/mcstat/mcstat
|
||||
/_CPack_Packages
|
||||
/CPackSourceConfig.cmake
|
||||
CPackConfig.cmake
|
||||
/build
|
||||
mckernel-*.tar.gz
|
||||
|
||||
6
.gitmodules
vendored
Normal file
6
.gitmodules
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
[submodule "ihk"]
|
||||
path = ihk
|
||||
url = https://github.com/RIKEN-SysSoft/ihk.git
|
||||
[submodule "executer/user/lib/libdwarf/libdwarf"]
|
||||
path = executer/user/lib/libdwarf/libdwarf
|
||||
url = https://github.com/bgerofi/libdwarf.git
|
||||
262
CMakeLists.txt
Normal file
262
CMakeLists.txt
Normal file
@ -0,0 +1,262 @@
|
||||
cmake_minimum_required(VERSION 2.6)
|
||||
|
||||
if (NOT CMAKE_BUILD_TYPE)
|
||||
set (CMAKE_BUILD_TYPE "Debug" CACHE STRING "Build type: Debug Release..." FORCE)
|
||||
endif (NOT CMAKE_BUILD_TYPE)
|
||||
|
||||
enable_language(C ASM)
|
||||
|
||||
project(mckernel C ASM)
|
||||
set(MCKERNEL_VERSION "1.7.0")
|
||||
|
||||
# See "Fedora Packaging Guidlines -- Versioning"
|
||||
set(MCKERNEL_RELEASE "0.91")
|
||||
|
||||
set(CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake/modules)
|
||||
# for rpmbuild
|
||||
if(DEFINED SYSCONF_INSTALL_DIR)
|
||||
set(CMAKE_INSTALL_SYSCONFDIR "${SYSCONF_INSTALL_DIR}")
|
||||
endif()
|
||||
|
||||
if (CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
|
||||
set(BUILD_TARGET "smp-x86" CACHE STRING "Build target: smp-x86 | smp-arm64")
|
||||
elseif (CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64")
|
||||
set(BUILD_TARGET "smp-arm64" CACHE STRING "Build target: smp-x86 | smp-arm64")
|
||||
endif()
|
||||
|
||||
if (BUILD_TARGET STREQUAL "smp-x86")
|
||||
set(ARCH "x86_64")
|
||||
elseif (BUILD_TARGET STREQUAL "smp-arm64")
|
||||
set(ARCH "arm64")
|
||||
endif()
|
||||
|
||||
include(GNUInstallDirs)
|
||||
include(CMakeParseArguments)
|
||||
include(Kbuild)
|
||||
include(CheckCCompilerFlag)
|
||||
include(AutoconfHelper)
|
||||
|
||||
CHECK_C_COMPILER_FLAG(-Wno-implicit-fallthrough IMPLICIT_FALLTHROUGH)
|
||||
if(IMPLICIT_FALLTHROUGH)
|
||||
set(EXTRA_WARNINGS "-Wno-implicit-fallthrough")
|
||||
endif(IMPLICIT_FALLTHROUGH)
|
||||
|
||||
# build options
|
||||
set(CFLAGS_WARNING "-Wall" "-Wextra" "-Wno-unused-parameter" "-Wno-sign-compare" "-Wno-unused-function" ${EXTRA_WARNINGS} CACHE STRING "Warning flags")
|
||||
add_compile_options(${CFLAGS_WARNING})
|
||||
|
||||
option(ENABLE_WERROR "Enable -Werror" OFF)
|
||||
if (ENABLE_WERROR)
|
||||
add_compile_options("-Werror")
|
||||
endif(ENABLE_WERROR)
|
||||
|
||||
option(ENABLE_LINUX_WORK_IRQ_FOR_IKC "Use Linux work IRQ for IKC IPI" ON)
|
||||
if (ENABLE_LINUX_WORK_IRQ_FOR_IKC)
|
||||
set(KBUILD_C_FLAGS "${KBUILD_C_FLAGS} -DIHK_IKC_USE_LINUX_WORK_IRQ")
|
||||
add_definitions(-DIHK_IKC_USE_LINUX_WORK_IRQ)
|
||||
endif()
|
||||
|
||||
if (BUILD_TARGET STREQUAL "smp-arm64")
|
||||
foreach(i RANGE 1 120)
|
||||
add_definitions(-DPOSTK_DEBUG_ARCH_DEP_${i} -DPOSTK_DEBUG_TEMP_FIX_${i})
|
||||
set(KBUILD_C_FLAGS "${KBUILD_C_FLAGS} -DPOSTK_DEBUG_ARCH_DEP_${i} -DPOSTK_DEBUG_TEMP_FIX_${i}")
|
||||
endforeach()
|
||||
|
||||
execute_process(COMMAND awk -F= "$1 == \"CONFIG_ARM64_64K_PAGES\" { print $2; exit; }" "${KERNEL_DIR}/.config"
|
||||
OUTPUT_VARIABLE CONFIG_ARM64_64K_PAGES OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND awk -F= "$1 == \"CONFIG_ARM64_VA_BITS\" { print $2; exit; }" "${KERNEL_DIR}/.config"
|
||||
OUTPUT_VARIABLE CONFIG_ARM64_VA_BITS OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
message("Host kernel CONFIG_ARM64_64K_PAGES=${CONFIG_ARM64_64K_PAGES}")
|
||||
message("Host kernel CONFIG_ARM64_VA_BITS=${CONFIG_ARM64_VA_BITS}")
|
||||
|
||||
if(CONFIG_ARM64_64K_PAGES STREQUAL "y")
|
||||
if(CONFIG_ARM64_VA_BITS STREQUAL 42)
|
||||
add_definitions(-DCONFIG_ARM64_PGTABLE_LEVELS=2 -DCONFIG_ARM64_VA_BITS=42 -DCONFIG_ARM64_64K_PAGES)
|
||||
set(LINKER_SCRIPT "smp-arm64_type3.lds")
|
||||
elseif(CONFIG_ARM64_VA_BITS STREQUAL 48)
|
||||
add_definitions(-DCONFIG_ARM64_PGTABLE_LEVELS=3 -DCONFIG_ARM64_VA_BITS=48 -DCONFIG_ARM64_64K_PAGES)
|
||||
set(LINKER_SCRIPT "smp-arm64_type4.lds")
|
||||
endif()
|
||||
else(CONFIG_ARM64_64K_PAGES STREQUAL "y")
|
||||
if(CONFIG_ARM64_VA_BITS STREQUAL 39)
|
||||
add_definitions(-DCONFIG_ARM64_PGTABLE_LEVELS=3 -DCONFIG_ARM64_VA_BITS=39)
|
||||
set(LINKER_SCRIPT "smp-arm64_type1.lds")
|
||||
elseif(CONFIG_ARM64_VA_BITS STREQUAL 48)
|
||||
add_definitions(-DCONFIG_ARM64_PGTABLE_LEVELS=4 -DCONFIG_ARM64_VA_BITS=48)
|
||||
set(LINKER_SCRIPT "smp-arm64_type2.lds")
|
||||
endif()
|
||||
endif(CONFIG_ARM64_64K_PAGES STREQUAL "y")
|
||||
endif()
|
||||
set_property(CACHE BUILD_TARGET PROPERTY STRINGS smp-x86 smp-arm64)
|
||||
|
||||
# define MAP_KERNEL_START
|
||||
|
||||
set(tmpdir ${CMAKE_CURRENT_BINARY_DIR}/tmp.resolve_MODULES_END)
|
||||
file(REMOVE_RECURSE ${tmpdir})
|
||||
file(MAKE_DIRECTORY ${tmpdir})
|
||||
file(WRITE ${tmpdir}/driver.c "#include <linux/module.h>\n")
|
||||
file(APPEND ${tmpdir}/driver.c "unsigned long MAP_KERNEL_START = MODULES_END - (1UL << 23);\n")
|
||||
file(WRITE ${tmpdir}/Makefile "obj-m := driver.o\n")
|
||||
file(APPEND ${tmpdir}/Makefile "all:\n")
|
||||
file(APPEND ${tmpdir}/Makefile "\tmake ${KBUILD_MAKE_FLAGS_STR} -C ${KERNEL_DIR} M=${tmpdir} modules\n")
|
||||
|
||||
execute_process(COMMAND make -C ${tmpdir})
|
||||
execute_process(COMMAND bash -c "offset=`readelf -S ${tmpdir}/driver.ko | grep .data | sed 's/.* //g'`; echo $((0x$offset))"
|
||||
OUTPUT_VARIABLE MAP_KERNEL_START_OFFSET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
execute_process(COMMAND bash -c "dd if=${tmpdir}/driver.ko bs=1 skip=${MAP_KERNEL_START_OFFSET} count=8 2>/dev/null | od -tx8 -Ax | head -1 | sed 's|.* |0x|g'"
|
||||
OUTPUT_VARIABLE MAP_KERNEL_START OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
|
||||
set(ENABLE_MEMDUMP ON)
|
||||
option(ENABLE_PERF "Enable perf support" ON)
|
||||
option(ENABLE_RUSAGE "Enable rusage support" ON)
|
||||
option(ENABLE_QLMPI "Enable qlmpi programs" OFF)
|
||||
option(ENABLE_UTI "Enable uti support" OFF)
|
||||
option(ENABLE_UBSAN "Enable undefined behaviour sanitizer on mckernel size" OFF)
|
||||
option(ENABLE_PER_CPU_ALLOC_CACHE "Enable per-CPU allocator cache (ThunderX2 workaround)" OFF)
|
||||
|
||||
find_package(PkgConfig REQUIRED)
|
||||
set(PKG_CONFIG_USE_CMAKE_PREFIX_PATH ON)
|
||||
|
||||
find_library(LIBRT rt)
|
||||
if (NOT LIBRT)
|
||||
message(FATAL_ERROR "error: couldn't find librt")
|
||||
endif()
|
||||
find_library(LIBNUMA numa)
|
||||
if (NOT LIBNUMA)
|
||||
message(FATAL_ERROR "error: couldn't find libnuma")
|
||||
endif()
|
||||
find_library(LIBBFD bfd)
|
||||
if (NOT LIBBFD)
|
||||
message(FATAL_ERROR "error: couldn't find libbfd")
|
||||
endif()
|
||||
find_library(LIBIBERTY iberty)
|
||||
if (NOT LIBIBERTY)
|
||||
message(FATAL_ERROR "error: couldn't find libiberty")
|
||||
endif()
|
||||
|
||||
find_library(LIBDWARF dwarf)
|
||||
|
||||
if (NOT LIBDWARF)
|
||||
if (CMAKE_CROSSCOMPILING)
|
||||
message(FATAL_ERROR "Could not find libdwarf.so, install libdwarf-devel to ${CMAKE_FIND_ROOT_PATH}")
|
||||
endif()
|
||||
message("WARNING: libdwarf will be compiled locally")
|
||||
enable_language(CXX)
|
||||
else()
|
||||
# Note that libdwarf-devel provides /usr/include/libdwarf/dwarf.h
|
||||
# but elfutils-devel provides /usr/include/dwarf.h
|
||||
# while mcinspect.c performs "#include <dwarf.h>"
|
||||
find_path(DWARF_H dwarf.h PATH_SUFFIXES libdwarf)
|
||||
endif()
|
||||
|
||||
if (ENABLE_QLMPI)
|
||||
find_package(MPI REQUIRED)
|
||||
endif()
|
||||
|
||||
if (ENABLE_UTI)
|
||||
pkg_check_modules(LIBSYSCALL_INTERCEPT REQUIRED libsyscall_intercept)
|
||||
link_directories(${LIBSYSCALL_INTERCEPT_LIBRARY_DIRS})
|
||||
endif()
|
||||
|
||||
string(REGEX REPLACE "^([0-9]+)\\.([0-9]+)\\.([0-9]+)(-([0-9]+)(.*))?" "\\1;\\2;\\3;\\5;\\6" LINUX_VERSION ${UNAME_R})
|
||||
list(GET LINUX_VERSION 0 LINUX_VERSION_MAJOR)
|
||||
list(GET LINUX_VERSION 1 LINUX_VERSION_MINOR)
|
||||
list(GET LINUX_VERSION 2 LINUX_VERSION_PATCH)
|
||||
list(GET LINUX_VERSION 3 LINUX_VERSION_RELEASE)
|
||||
math(EXPR LINUX_VERSION_CODE "${LINUX_VERSION_MAJOR} * 65536 + ${LINUX_VERSION_MINOR} * 256 + ${LINUX_VERSION_PATCH}")
|
||||
|
||||
# compat with various install paths
|
||||
set(BINDIR ${CMAKE_INSTALL_FULL_BINDIR})
|
||||
set(SBINDIR ${CMAKE_INSTALL_FULL_SBINDIR})
|
||||
set(ETCDIR ${CMAKE_INSTALL_PREFIX}/etc)
|
||||
set(ROOTFSDIR "/rootfs")
|
||||
if (CMAKE_INSTALL_PREFIX STREQUAL "/usr")
|
||||
set(KMODDIR "/lib/modules/${UNAME_R}/extra/mckernel")
|
||||
set(MCKERNELDIR "${CMAKE_INSTALL_FULL_DATADIR}/mckernel/${BUILD_TARGET}")
|
||||
else()
|
||||
set(KMODDIR "${CMAKE_INSTALL_PREFIX}/kmod")
|
||||
set(MCKERNELDIR "${CMAKE_INSTALL_PREFIX}/${BUILD_TARGET}/kernel")
|
||||
endif()
|
||||
set(prefix ${CMAKE_INSTALL_PREFIX})
|
||||
|
||||
# set rpath for everyone
|
||||
set(CMAKE_INSTALL_RPATH ${CMAKE_INSTALL_FULL_LIBDIR})
|
||||
|
||||
# ihk: ultimately should support extrnal build, but add as subproject for now
|
||||
if (EXISTS ${PROJECT_SOURCE_DIR}/ihk/CMakeLists.txt)
|
||||
set(IHK_SOURCE_DIR "ihk" CACHE STRINGS "path to ihk source directory from mckernel sources")
|
||||
elseif (EXISTS ${PROJECT_SOURCE_DIR}/../ihk/CMakeLists.txt)
|
||||
set(IHK_SOURCE_DIR "../ihk" CACHE STRINGS "path to ihk source directory from mckernel sources")
|
||||
else()
|
||||
set(IHK_SOURCE_DIR "ihk" CACHE STRINGS "path to ihk source directory from mckernel sources")
|
||||
endif()
|
||||
if (EXISTS ${PROJECT_SOURCE_DIR}/${IHK_SOURCE_DIR}/CMakeLists.txt)
|
||||
set(IHK_FULL_SOURCE_DIR ${PROJECT_SOURCE_DIR}/${IHK_SOURCE_DIR})
|
||||
elseif (EXISTS /${IHK_SOURCE_DIR}/CMakeLists.txt)
|
||||
set(IHK_FULL_SOURCE_DIR /${IHK_SOURCE_DIR})
|
||||
else()
|
||||
message(FATAL_ERROR "Could not find ihk dir, or it does not contain CMakeLists.txt, either clone ihk or run git submodule update --init")
|
||||
endif()
|
||||
|
||||
add_subdirectory(${IHK_SOURCE_DIR} ihk)
|
||||
|
||||
configure_file(config.h.in config.h)
|
||||
|
||||
# actual build section - just subdirs
|
||||
add_subdirectory(executer/kernel/mcctrl)
|
||||
add_subdirectory(executer/user)
|
||||
add_subdirectory(kernel)
|
||||
add_subdirectory(tools/mcstat)
|
||||
add_subdirectory(tools/crash)
|
||||
|
||||
configure_file(scripts/mcreboot-smp.sh.in mcreboot.sh @ONLY)
|
||||
configure_file(scripts/mcstop+release-smp.sh.in mcstop+release.sh @ONLY)
|
||||
configure_file(scripts/mcreboot.1in mcreboot.1 @ONLY)
|
||||
configure_file(scripts/eclair-dump-backtrace.exp.in eclair-dump-backtrace.exp @ONLY)
|
||||
install(PROGRAMS
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/mcreboot.sh"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/mcstop+release.sh"
|
||||
DESTINATION "${CMAKE_INSTALL_SBINDIR}")
|
||||
install(PROGRAMS
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/eclair-dump-backtrace.exp"
|
||||
DESTINATION "${CMAKE_INSTALL_BINDIR}")
|
||||
install(FILES "scripts/irqbalance_mck.in"
|
||||
DESTINATION "${CMAKE_INSTALL_SYSCONFDIR}")
|
||||
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/mcreboot.1"
|
||||
DESTINATION "${CMAKE_INSTALL_MANDIR}/man1")
|
||||
|
||||
|
||||
configure_file(scripts/mckernel.spec.in scripts/mckernel.spec @ONLY)
|
||||
set(CPACK_SOURCE_PACKAGE_FILE_NAME "${CMAKE_PROJECT_NAME}-${MCKERNEL_VERSION}")
|
||||
set(CPACK_SOURCE_IGNORE_FILES "/.git/;/build;/CMakeCache.txt$;/CMakeFiles$;/Makefile$")
|
||||
set(CPACK_SOURCE_INSTALLED_DIRECTORIES "${CMAKE_SOURCE_DIR};/;${IHK_FULL_SOURCE_DIR};/ihk;${CMAKE_BINARY_DIR}/scripts;/scripts")
|
||||
set(CPACK_SOURCE_GENERATOR "TGZ")
|
||||
include(CPack)
|
||||
add_custom_target(dist COMMAND ${CMAKE_MAKE_PROGRAM} package_source)
|
||||
|
||||
|
||||
# config report
|
||||
message("-------------------------------")
|
||||
message("Option summary")
|
||||
message("-------------------------------")
|
||||
message("Build type: ${CMAKE_BUILD_TYPE}")
|
||||
message("Build target: ${BUILD_TARGET}")
|
||||
message("IHK_SOURCE_DIR: ${IHK_SOURCE_DIR} (relative to mckernel source tree)")
|
||||
message("UNAME_R: ${UNAME_R}")
|
||||
message("KERNEL_DIR: ${KERNEL_DIR}")
|
||||
message("SYSTEM_MAP: ${SYSTEM_MAP}")
|
||||
message("VMLINUX: ${VMLINUX}")
|
||||
message("KBUILD_C_FLAGS: ${KBUILD_C_FLAGS}")
|
||||
message("MAP_KERNEL_START: ${MAP_KERNEL_START}")
|
||||
message("ENABLE_MEMDUMP: ${ENABLE_MEMDUMP}")
|
||||
message("ENABLE_PERF: ${ENABLE_PERF}")
|
||||
message("ENABLE_RUSAGE: ${ENABLE_RUSAGE}")
|
||||
message("ENABLE_QLMPI: ${ENABLE_QLMPI}")
|
||||
message("ENABLE_UTI: ${ENABLE_UTI}")
|
||||
message("ENABLE_WERROR: ${ENABLE_WERROR}")
|
||||
message("ENABLE_UBSAN: ${ENABLE_UBSAN}")
|
||||
message("ENABLE_LINUX_WORK_IRQ_FOR_IKC: ${ENABLE_LINUX_WORK_IRQ_FOR_IKC}")
|
||||
message("ENABLE_PER_CPU_ALLOC_CACHE: ${ENABLE_PER_CPU_ALLOC_CACHE}")
|
||||
message("-------------------------------")
|
||||
70
KNOWN_BUGS.md
Normal file
70
KNOWN_BUGS.md
Normal file
@ -0,0 +1,70 @@
|
||||
Linux crash when offlining CPU (el7, hardware-specific)
|
||||
=========================================================
|
||||
|
||||
On some hardware with el7 kernel, linux can crash due to a bug in the
|
||||
irq handling when offlining CPUs (reserve cpu part of mcreboot)
|
||||
|
||||
Example stack trace:
|
||||
```
|
||||
[ 4147.052753] BUG: unable to handle kernel NULL pointer dereference at 0000000000000040
|
||||
[ 4147.060677] IP: [<ffffffff8102ce26>] check_irq_vectors_for_cpu_disable+0x86/0x1c0
|
||||
[ 4147.068226] PGD 1057e44067 PUD 105f1e7067 PMD 0
|
||||
[ 4147.072935] Oops: 0000 [#1] SMP
|
||||
[ 4147.076230] Modules linked in: mcctrl(OE) ihk_smp_x86_64(OE) ihk(OE) xt_CHECKSUM ipt_MASQUERADE nf_nat_masquerade_ipv4 tun rpcsec_gss_krb5 nfsv4 dns_resolver nfs fscache ip6t_rpfilter ipt_REJECT nf_reject_ipv4 ip6t_REJECT nf_reject_ipv6 xt_conntrack ip_set nfnetlink ebtable_nat ebtable_broute bridge stp llc ip6table_nat nf_conntrack_ipv6 nf_defrag_ipv6 nf_nat_ipv6 ip6table_mangle ip6table_security ip6table_raw iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_mangle iptable_security iptable_raw ebtable_filter ebtables ip6table_filter ip6_tables iptable_filter rpcrdma ib_isert iscsi_target_mod ib_iser libiscsi scsi_transport_iscsi ib_srpt target_core_mod ib_srp scsi_transport_srp scsi_tgt ib_ipoib rdma_ucm ib_ucm ib_uverbs ib_umad rdma_cm ib_cm iw_cm mlx4_ib ib_core
|
||||
[ 4147.148619] dm_mirror dm_region_hash dm_log dm_mod sb_edac edac_core intel_powerclamp coretemp ext4 mbcache jbd2 intel_rapl iosf_mbi kvm_intel kvm irqbypass crc32_pclmul ghash_clmulni_intel aesni_intel lrw gf128mul ipmi_ssif glue_helper ablk_helper joydev iTCO_wdt iTCO_vendor_support cryptd ipmi_si ipmi_devintf ipmi_msghandler pcspkr wmi mei_me mei lpc_ich i2c_i801 sg ioatdma shpchp nfsd auth_rpcgss nfs_acl lockd grace sunrpc ip_tables xfs libcrc32c mlx4_en sd_mod crc_t10dif crct10dif_generic mgag200 drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops ttm isci igb drm mlx4_core libsas ahci libahci scsi_transport_sas libata crct10dif_pclmul ptp crct10dif_common pps_core crc32c_intel dca i2c_algo_bit i2c_core devlink [last unloaded: ihk]
|
||||
[ 4147.215370] CPU: 6 PID: 38 Comm: migration/6 Tainted: G OE ------------ T 3.10.0-693.2.2.el7.x86_64 #1
|
||||
[ 4147.225672] Hardware name: SGI.COM C1104G-RP5/X9DRG-HF, BIOS 3.0 10/25/2013
|
||||
[ 4147.232747] task: ffff880174689fa0 ti: ffff8801746ac000 task.ti: ffff8801746ac000
|
||||
[ 4147.240278] RIP: 0010:[<ffffffff8102ce26>] [<ffffffff8102ce26>] check_irq_vectors_for_cpu_disable+0x86/0x1c0
|
||||
[ 4147.250275] RSP: 0018:ffff8801746afd30 EFLAGS: 00010046
|
||||
[ 4147.255608] RAX: 0000000000000000 RBX: 000000000000004e RCX: 0000000000000000
|
||||
[ 4147.262770] RDX: 0000000000000020 RSI: 000000000000005f RDI: 0000000000000023
|
||||
[ 4147.269936] RBP: ffff8801746afd58 R08: 0000000000000001 R09: ffff88017f800490
|
||||
[ 4147.277103] R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000006
|
||||
[ 4147.284269] R13: 0000000000000000 R14: ffff88085ca82500 R15: 000000000000005f
|
||||
[ 4147.291429] FS: 0000000000000000(0000) GS:ffff88085fb80000(0000) knlGS:0000000000000000
|
||||
[ 4147.299556] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
|
||||
[ 4147.305326] CR2: 0000000000000040 CR3: 0000001059704000 CR4: 00000000001407e0
|
||||
[ 4147.312490] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
|
||||
[ 4147.319659] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
|
||||
[ 4147.326827] Stack:
|
||||
[ 4147.328857] ffff8808f43078c8 ffff8808f4307850 0000000000000286 ffff8808f4307701
|
||||
[ 4147.336384] 0000000000000000 ffff8801746afd70 ffffffff81052a82 0000000200000000
|
||||
[ 4147.343915] ffff8801746afd88 ffffffff81693ca3 0000000000000003 ffff8801746afdc0
|
||||
[ 4147.351447] Call Trace:
|
||||
[ 4147.353921] [<ffffffff81052a82>] native_cpu_disable+0x12/0x40
|
||||
[ 4147.359795] [<ffffffff81693ca3>] take_cpu_down+0x13/0x40
|
||||
[ 4147.365236] [<ffffffff81116899>] multi_cpu_stop+0xd9/0x100
|
||||
[ 4147.370850] [<ffffffff811167c0>] ? cpu_stop_should_run+0x50/0x50
|
||||
[ 4147.376983] [<ffffffff81116ab7>] cpu_stopper_thread+0x97/0x150
|
||||
[ 4147.382942] [<ffffffff816a8fad>] ? __schedule+0x39d/0x8b0
|
||||
[ 4147.388461] [<ffffffff810b909f>] smpboot_thread_fn+0x12f/0x180
|
||||
[ 4147.394406] [<ffffffff810b8f70>] ? lg_double_unlock+0x40/0x40
|
||||
[ 4147.400276] [<ffffffff810b098f>] kthread+0xcf/0xe0
|
||||
[ 4147.405182] [<ffffffff810b08c0>] ? insert_kthread_work+0x40/0x40
|
||||
[ 4147.411319] [<ffffffff816b4f58>] ret_from_fork+0x58/0x90
|
||||
[ 4147.418893] [<ffffffff810b08c0>] ? insert_kthread_work+0x40/0x40
|
||||
[ 4147.426524] Code: 81 fb 00 01 00 00 0f 84 8a 00 00 00 89 d8 65 44 8b 3c 85 20 c6 00 00 45 85 ff 78 e1 44 89 ff e8 91 31 10 00 48 63 15 7e 10 af 00 <48> 8b 70 40 48 c7 c7 80 71 cf 81 49 89 c6 48 83 c2 3f 48 c1 fa
|
||||
[ 4147.450352] RIP [<ffffffff8102ce26>] check_irq_vectors_for_cpu_disable+0x86/0x1c0
|
||||
[ 4147.460135] RSP <ffff8801746afd30>
|
||||
[ 4147.465154] CR2: 0000000000000040
|
||||
```
|
||||
|
||||
This bug has been fixed upstream, but redhat will not backport the fixes.
|
||||
You can work around the problem with a kpatch by backporting the three
|
||||
following commits:
|
||||
|
||||
x86: irq: Get correct available vectors for cpu disable
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=ac2a55395eddccd6e3e39532df9869d61e97b2ee
|
||||
|
||||
x86/irq: Check for valid irq descriptor in check_irq_vectors_for_cpu_disable()
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=d97eb8966c91f2c9d05f0a22eb89ed5b76d966d1
|
||||
|
||||
x86/irq: Use proper locking in check_irq_vectors_for_cpu_disable()
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=cbb24dc761d95fe39a7a122bb1b298e9604cae15
|
||||
|
||||
|
||||
Alternatively, since it is related to the irq configuration, it might
|
||||
be possible to mitigate the issue by setting the irq affinities early
|
||||
on and making sure none of the cpus that will be offlined have any irq
|
||||
configured.
|
||||
339
LICENSE
Normal file
339
LICENSE
Normal file
@ -0,0 +1,339 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Lesser General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License.
|
||||
76
Makefile.in
76
Makefile.in
@ -1,76 +0,0 @@
|
||||
TARGET = @TARGET@
|
||||
SBINDIR = @SBINDIR@
|
||||
ETCDIR = @ETCDIR@
|
||||
MANDIR = @MANDIR@
|
||||
|
||||
all::
|
||||
@(cd executer/kernel/mcctrl; make modules)
|
||||
@(cd executer/kernel/mcoverlayfs; make modules)
|
||||
@(cd executer/user; make)
|
||||
@case "$(TARGET)" in \
|
||||
attached-mic | builtin-x86 | builtin-mic | smp-x86) \
|
||||
(cd kernel; make) \
|
||||
;; \
|
||||
*) \
|
||||
echo "unknown target $(TARGET)" >&2 \
|
||||
exit 1 \
|
||||
;; \
|
||||
esac
|
||||
|
||||
install::
|
||||
@(cd executer/kernel/mcctrl; make install)
|
||||
@(cd executer/kernel/mcoverlayfs; make install)
|
||||
@(cd executer/user; make install)
|
||||
@case "$(TARGET)" in \
|
||||
attached-mic | builtin-x86 | builtin-mic | smp-x86) \
|
||||
(cd kernel; make install) \
|
||||
;; \
|
||||
*) \
|
||||
echo "unknown target $(TARGET)" >&2 \
|
||||
exit 1 \
|
||||
;; \
|
||||
esac
|
||||
@case "$(TARGET)" in \
|
||||
attached-mic) \
|
||||
mkdir -p -m 755 $(SBINDIR); \
|
||||
install -m 755 arch/x86/tools/mcreboot-attached-mic.sh $(SBINDIR)/mcreboot; \
|
||||
install -m 755 arch/x86/tools/mcshutdown-attached-mic.sh $(SBINDIR)/mcshutdown; \
|
||||
mkdir -p -m 755 $(MANDIR)/man1; \
|
||||
install -m 644 arch/x86/tools/mcreboot.1 $(MANDIR)/man1/mcreboot.1; \
|
||||
;; \
|
||||
builtin-x86) \
|
||||
mkdir -p -m 755 $(SBINDIR); \
|
||||
install -m 755 arch/x86/tools/mcreboot-builtin-x86.sh $(SBINDIR)/mcreboot; \
|
||||
install -m 755 arch/x86/tools/mcshutdown-builtin-x86.sh $(SBINDIR)/mcshutdown; \
|
||||
mkdir -p -m 755 $(MANDIR)/man1; \
|
||||
install -m 644 arch/x86/tools/mcreboot.1 $(MANDIR)/man1/mcreboot.1; \
|
||||
;; \
|
||||
smp-x86) \
|
||||
mkdir -p -m 755 $(SBINDIR); \
|
||||
install -m 755 arch/x86/tools/mcreboot-smp-x86.sh $(SBINDIR)/mcreboot.sh; \
|
||||
install -m 755 arch/x86/tools/mcstop+release-smp-x86.sh $(SBINDIR)/mcstop+release.sh; \
|
||||
mkdir -p -m 755 $(ETCDIR); \
|
||||
install -m 644 arch/x86/tools/irqbalance_mck.service $(ETCDIR)/irqbalance_mck.service; \
|
||||
install -m 644 arch/x86/tools/irqbalance_mck.in $(ETCDIR)/irqbalance_mck.in; \
|
||||
mkdir -p -m 755 $(MANDIR)/man1; \
|
||||
install -m 644 arch/x86/tools/mcreboot.1 $(MANDIR)/man1/mcreboot.1; \
|
||||
;; \
|
||||
*) \
|
||||
echo "unknown target $(TARGET)" >&2 \
|
||||
exit 1 \
|
||||
;; \
|
||||
esac
|
||||
|
||||
clean::
|
||||
@(cd executer/kernel/mcctrl; make clean)
|
||||
@(cd executer/kernel/mcoverlayfs; make clean)
|
||||
@(cd executer/user; make clean)
|
||||
@case "$(TARGET)" in \
|
||||
attached-mic | builtin-x86 | builtin-mic | smp-x86) \
|
||||
(cd kernel; make clean) \
|
||||
;; \
|
||||
*) \
|
||||
echo "unknown target $(TARGET)" >&2 \
|
||||
exit 1 \
|
||||
;; \
|
||||
esac
|
||||
540
NEWS.md
Normal file
540
NEWS.md
Normal file
@ -0,0 +1,540 @@
|
||||
=============================================
|
||||
What's new in version 1.7.0rc4 (Apr 15, 2020)
|
||||
=============================================
|
||||
|
||||
----------------------
|
||||
McKernel major updates
|
||||
----------------------
|
||||
1. arm64: Contiguous PTE support
|
||||
2. arm64: Scalable Vector Extension (SVE) support
|
||||
3. arm64: PMU overflow interrupt support
|
||||
4. xpmem: Support large page attachment
|
||||
5. arm64 port: Direct access to Mckernel memory from Linux
|
||||
6. arm64 port: utility thread offloading, which spawns thread onto
|
||||
Linux CPU
|
||||
7. eclair: support for live debug
|
||||
8. Crash utility extension
|
||||
9. Replace mcoverlayfs with a soft userspace overlay
|
||||
10. Build system is switched to cmake
|
||||
11. Core dump includes thread information
|
||||
|
||||
------------------------
|
||||
McKernel major bug fixes
|
||||
------------------------
|
||||
1. shmobj: Fix rusage counting for large page
|
||||
2. mcctrl control: task start_time changed to u64 nsec
|
||||
3. mcctrl: add handling for one more level of page tables
|
||||
4. Add kernel argument to turn on/off time sharing
|
||||
5. flatten_string/process env: realign env and clear trailing bits
|
||||
6. madvise: Add MADV_HUGEPAGE support
|
||||
8. mcctrl: remove in-kernel calls to syscalls
|
||||
9. arch_cpu_read_write_register: error return fix.
|
||||
10. set_cputime(): interrupt enable/disable fix.
|
||||
11. set_mempolicy(): Add mode check.
|
||||
12. mbind(): Fix memory_range_lock deadlock.
|
||||
13. ihk_ikc_recv: Record channel to packet for release
|
||||
14. Add set_cputime() kernel to kernel case and mode enum.
|
||||
15. execve: Call preempt_enable() before error-exit
|
||||
16. memory/x86_64: fix linux safe_kernel_map
|
||||
17. do_kill(): fix pids table when nr of threads is larger than num_processors
|
||||
18. shmget: Use transparent huge pages when page size isn't specified
|
||||
19. prctl: Add support for PR_SET_THP_DISABLE and PR_GET_THP_DISABLE
|
||||
20. monitor_init: fix undetected hang on highest numbered core
|
||||
21. init_process_stack: change premapped stack size based on arch
|
||||
22. x86 syscalls: add a bunch of XXat() delegated syscalls
|
||||
23. do_pageout: fix direct kernel-user access
|
||||
24. stack: add hwcap auxval
|
||||
25. perf counters: add arch-specific perf counters
|
||||
26. Added check of nohost to terminate_host().
|
||||
27. kmalloc: Fix address order in free list
|
||||
28. sysfs: use nr_cpu_ids for cpumasks (fixes libnuma parsing error on ARM)
|
||||
29. monitor_init: Use ihk_mc_cpu_info()
|
||||
30. Fix ThunderX2 write-combined PTE flag insanity
|
||||
31. ARM: eliminate zero page mapping (i.e, init_low_area())
|
||||
32. eliminate futex_cmpxchg_enabled check (not used and dereffed a NULL pointer)
|
||||
33. page_table: Fix return value of lookup_pte when ptl4 is blank
|
||||
34. sysfs: add missing symlinks for cpu/node
|
||||
35. Make Linux handler run when mmap to procfs.
|
||||
36. Separate mmap area from program loading (relocation) area
|
||||
37. move rusage into kernel ELF image (avoid dynamic alloc before NUMA init)
|
||||
38. arm: turn off cpu on panic
|
||||
39. page fault handler: protect thread accesses
|
||||
40. Register PPD and release_handler at the same time.
|
||||
41. fix to missing exclusive processing between terminate() and
|
||||
finalize_process().
|
||||
42. perfctr_stop: add flags to no 'disable_intens'
|
||||
43. fileobj, shmobj: free pages in object destructor (as opposed to page_unmap())
|
||||
44. clear_range_l1, clear_range_middle: Fix handling contiguous PTE
|
||||
45. do_mmap: don't pre-populate the whole file when asked for smaller segment
|
||||
46. invalidate_one_page: Support shmobj and contiguous PTE
|
||||
47. ubsan: fix undefined shifts
|
||||
48. x86: disable zero mapping and add a boot pt for ap trampoline
|
||||
49. rusage: Don't count PF_PATCH change
|
||||
50. Fixed time processing.
|
||||
51. copy_user_pte: vmap area not owned by McKernel
|
||||
52. gencore: Zero-clear ELF header and memory range table
|
||||
53. rpm: ignore CMakeCache.txt in dist and relax BuildRequires on cross build
|
||||
54. gencore: Allocate ELF header to heap instead of stack
|
||||
55. nanosleep: add cpu_pause() in spinwait loop
|
||||
56. init_process: add missing initializations to proc struct
|
||||
57. rus_vm_fault: always use a packet on the stack
|
||||
58. process stack: use PAGE_SIZE in aux vector
|
||||
59. copy_user_pte: base memobj copy on range & VR_PRIVATE
|
||||
60. arm64: ptrace: Fix overwriting 1st argument with return value
|
||||
61. page fault: use cow for private device mappings
|
||||
62. reproductible builds: remove most install paths in c code
|
||||
63. page fault: clear writable bit for non-dirtying access to shared ranges
|
||||
64. mcreboot/mcstop+release: support for regular user execution
|
||||
65. irqbalance_mck: replace extra service with service drop-in
|
||||
66. do_mmap: give addr argument a chance even if not MAP_FIXED
|
||||
67. x86: fix xchg() and cmpxchg() macros
|
||||
68. IHK: support for using Linux work IRQ as IKC interrupt (optional)
|
||||
69. MCS: fix ARM64 issue by using smp_XXX() functions (i.e., barrier()s)
|
||||
70. procfs: add number of threads to stat and status
|
||||
71. memory_range_lock: Fix deadlock in procfs/sysfs handler
|
||||
72. flush instruction cache at context switch time if necessary
|
||||
73. arm64: Fix PMU related functions
|
||||
74. page_fault_process_memory_range: Disable COW for VM region with zeroobj
|
||||
75. extend_process_region: Fall back to demand paging when not contiguous
|
||||
76. munmap: fix deadlock with remote pagefault on vm range lock
|
||||
77. procfs: if memory_range_lock fails, process later
|
||||
78. migrate-cpu: Prevent migration target from calling schedule() twice
|
||||
79. sched_request_migrate(): fix race condition between migration req and IRQs
|
||||
80. get_one_cpu_topology: Renumber core_id (physical core id)
|
||||
81. bb7e140 procfs cpuinfo: use sequence number as processor
|
||||
82. set_host_vma(): do NOT read protect Linux VMA
|
||||
|
||||
===========================================
|
||||
What's new in V1.6.0 (Nov 11, 2018)
|
||||
===========================================
|
||||
|
||||
-----------------------------------------------
|
||||
McKernel new features, improvements and changes
|
||||
-----------------------------------------------
|
||||
1. McKernel and Linux share one unified kernel virtual address space.
|
||||
That is, McKernel sections resides in Linux sections spared for
|
||||
modules. In this way, Linux can access the McKernel kernel memory
|
||||
area.
|
||||
2. hugetlbfs support
|
||||
3. IHK is now included as a git submodule
|
||||
4. Debug messages are turned on/off in per souce file basis at run-time.
|
||||
5. It's prohibited for McKernel to access physical memory ranges which
|
||||
Linux didn't give to McKernel.
|
||||
6. UTI (capability to spawn a thread on Linux CPU) improvement:
|
||||
* System calls issued from the thread are hooked by modifying
|
||||
binary in memory.
|
||||
|
||||
---------------------------
|
||||
McKernel bug fixes (digest)
|
||||
---------------------------
|
||||
#<num> below corresponds to the redmine issue number
|
||||
(https://postpeta.pccluster.org/redmine/).
|
||||
|
||||
1. #926: shmget: Hide object with IPC_RMID from shmget
|
||||
2. #1028: init_process: Inherit parent cpu_set
|
||||
3. #995: Fix shebang recorded in argv[0]
|
||||
4. #1024: Fix VMAP virtual address leak
|
||||
5. #1109: init_process_stack: Support "ulimit -s unlimited"
|
||||
6. x86 mem init: do not map identity mapping
|
||||
7. mcexec_wait_syscall: requeue potential request on interrupted wait
|
||||
8. mcctrl_ikc_send_wait: fix interrupt with do_frees == NULL
|
||||
9. pager_req_read: handle short read
|
||||
10. kprintf: only call eventfd() if it is safe to interrupt
|
||||
11. process_procfs_request: Add Pid to /proc/<PID>/status
|
||||
12. terminate: fix oversubscribe hang when waiting for other threads on same CPU to die
|
||||
13. mcexec: Do not close fd returned to mckernel side
|
||||
14. #976: execve: Clear sigaltstack and fp_regs
|
||||
15. #1002: perf_event: Specify counter by bit_mask on start/stop
|
||||
16. #1027: schedule: Don't reschedule immediately when wake up on migrate
|
||||
17. #mcctrl: lookup unexported symbols at runtime
|
||||
18. __sched_wakeup_thread: Notify interrupt_exit() of re-schedule
|
||||
19. futex_wait_queue_me: Spin-sleep when timeout and idle_halt is specified
|
||||
20. #1167: ihk_os_getperfevent,setperfevent: Timeout IKC sent by mcctrl
|
||||
21. devobj: fix object size (POSTK_DEBUG_TEMP_FIX_36)
|
||||
22. mcctrl: remove rus page cache
|
||||
23. #1021: procfs: Support multiple reads of e.g. /proc/*/maps
|
||||
24. #1006: wait: Delay wake-up parent within switch context
|
||||
25. #1164: mem: Check if phys-mem is within the range of McKernel memory
|
||||
26. #1039: page_fault_process_memory_range: Remove ihk_mc_map_virtual for CoW of device map
|
||||
27. partitioned execution: pass process rank to LWK
|
||||
28. process/vm: implement access_ok()
|
||||
29. spinlock: rewrite spinlock to use Linux ticket head/tail format
|
||||
30. #986: Fix deadlock involving mmap_sem and memory_range_lock
|
||||
31. Prevent one CPU from getting chosen by concurrent forks
|
||||
32. #1009: check_signal: system call restart is done only once
|
||||
33. #1176: syscall: the signal received during system call processing is not processed.
|
||||
34. #1036 syscall_time: Handle by McKernel
|
||||
35. #1165 do_syscall: Delegate system calls to the mcexec with the same pid
|
||||
36. #1194 execve: Fix calling ptrace_report_signal after preemption is disabled
|
||||
37. #1005 coredump: Exclude special areas
|
||||
38. #1018 procfs: Fix pread/pwrite to procfs fail when specified size is bigger than 4MB
|
||||
39. #1180 sched_setaffinity: Check migration after decrementing in_interrupt
|
||||
40. #771, #1179, #1143 ptrace supports threads
|
||||
41. #1189 procfs/do_fork: wait until procfs entries are registered
|
||||
42. #1114 procfs: add '/proc/pid/stat' to mckernel side and fix its comm
|
||||
43. #1116 mcctrl procfs: check entry was returned before using it
|
||||
44. #1167 ihk_os_getperfevent,setperfevent: Return -ETIME when IKC timeouts
|
||||
45. mcexec/execve: fix shebangs handling
|
||||
46. procfs: handle 'comm' on mckernel side
|
||||
47. ihk_os_setperfevent: Return number of registered events
|
||||
48. mcexec: fix terminating zero after readlink()
|
||||
|
||||
===========================================
|
||||
What's new in V1.5.1 (July 9, 2018)
|
||||
===========================================
|
||||
|
||||
-----------------------------------------------
|
||||
McKernel new features, improvements and changes
|
||||
-----------------------------------------------
|
||||
1. Watchdog timer to detect hang of McKernel
|
||||
mcexec prints out the following line to its stderr when a hang of
|
||||
McKernel is detected.
|
||||
|
||||
mcexec detected hang of McKernel
|
||||
|
||||
The watchdog timer is enabled by passing -i <timeout_in_sec> option
|
||||
to mcreboot.sh. <timeout_in_sec> specifies the interval of checking
|
||||
if McKernel is alive.
|
||||
Example: mcreboot.sh -i 600: Detect the hang with 10 minutes interval
|
||||
|
||||
The detailed step of the hang detection is as follows.
|
||||
(1) mcexec acquires eventfd for notification from IHK and perform
|
||||
epoll() on it.
|
||||
(2) A daemon called ihkmond monitors the state of McKernel periodically
|
||||
with the interval specified by the -i option. It judges that
|
||||
McKernel is hanging and notifies mcexec by the eventfd if its
|
||||
state hasn't changed since the last check.
|
||||
|
||||
2. Documentation
|
||||
man page: Installed directory is changed to <install_dir>/share/man
|
||||
|
||||
---------------------------
|
||||
McKernel bug fixes (digest)
|
||||
---------------------------
|
||||
1. #1146: pager_req_map(): do not take mmap_sem if not needed
|
||||
2. #1135: prepare_process_ranges_args_envs(): fix saving cmdline
|
||||
3. #1144: fileobj/devobj: record path name
|
||||
4. #1145: fileobj: use MCS locks for per-file page hash
|
||||
5. #1076: mcctrl: refactor prepare_image into new generic ikc send&wait
|
||||
6. #1072: execve: fix execve with oversubscribing
|
||||
7. #1132: execve: use thread variable instead of cpu_local_var(current)
|
||||
8. #1117: mprotect: do not set page table writable for cow pages
|
||||
9. #1143: syscall wait4: add _WALL (POSTK_DEBUG_ARCH_DEP_44)
|
||||
10. #1064: rusage: Fix initialization of rusage->num_processors
|
||||
11. #1133: pager_req_unmap: Put per-process data at exit
|
||||
12. #731: do_fork: Propagate error code returned by mcexec
|
||||
13. #1149: execve: Reinitialize vm_regions's map area on execve
|
||||
14. #1065: procfs: Show file names in /proc/<PID>/maps
|
||||
15. #1112: mremap: Fix type of size arguments (from ssize_t to size_t)
|
||||
16. #1121: sched_getaffinity: Check arguments in the same order as in Linux
|
||||
17. #1137: mmap, mremap: Check arguments in the same order as in Linux
|
||||
18. #1122: fix return value of sched_getaffinity
|
||||
19. #732: fix: /proc/<PID>/maps outputs a unnecessary NULL character
|
||||
|
||||
===================================
|
||||
What's new in V1.5.0 (Apr 5, 2018)
|
||||
===================================
|
||||
|
||||
--------------------------------------
|
||||
McKernel new features and improvements
|
||||
--------------------------------------
|
||||
1. Aid for Linux version migration: Detect /proc, /sys format change
|
||||
between two kernel verions
|
||||
2. Swap out
|
||||
* Only swap-out anonymous pages for now
|
||||
3. Improve support of /proc/maps
|
||||
4. mcstat: Linux tool to show resource usage
|
||||
|
||||
---------------------------
|
||||
McKernel bug fixes (digest)
|
||||
---------------------------
|
||||
1. #727: execve: Fix memory leak when receiving SIGKILL
|
||||
2. #829: perf_event_open: Support PERF_TYPE_HARDWARE and PERF_TYPE_HW_CACHE
|
||||
3. #906: mcexec: Check return code of fork()
|
||||
4. #1038: mcexec: Timeout when incorrect value is given to -n option
|
||||
5. #943 #945 #946 #960 $961: mcexec: Support strace
|
||||
6. #1029: struct thread is not released with stress-test involving signal
|
||||
and futex
|
||||
7. #863 #870: Respond immediately to terminating signal when
|
||||
offloading system call
|
||||
8. #1119: translate_rva_to_rpa(): use 2MB blocks in 1GB pages on x86
|
||||
11. #898: Shutdown OS only after no in-flight IKC exist
|
||||
12. #882: release_handler: Destroy objects as the process which opened it
|
||||
13. #882: mcexec: Make child process exit if the parent is killed during
|
||||
fork()
|
||||
14. #925: XPMEM: Don't destroy per-process object of the parent
|
||||
15. #885: ptrace: Support the case where a process attaches its child
|
||||
16. #1031: sigaction: Support SA_RESETHAND
|
||||
17. #923: rus_vm_fault: Return error when a thread not performing
|
||||
system call offloading causes remote page fault
|
||||
18. #1032 #1033 #1034: getrusage: Fix ru_maxrss, RUSAGE_CHILDREN,
|
||||
ru_stime related bugs
|
||||
19. #1120: getrusage: Fix deadlock on thread->times_update
|
||||
20. #1123: Fix deadlock related to wait_queue_head_list_node
|
||||
21. #1124: Fix deadlock of calling terminate() from terminate()
|
||||
22. #1125: Fix deadlock related to thread status
|
||||
* Related functions are: hold_thread(), do_kill() and terminate()
|
||||
23. #1126: uti: Fix uti thread on the McKernel side blocks others in do_syscall()
|
||||
24. #1066: procfs: Show Linux /proc/self/cgroup
|
||||
25. #1127: prepare_process_ranges_args_envs(): fix generating saved_cmdline to
|
||||
avoid PF in strlen()
|
||||
26. #1128: ihk_mc_map/unmap_virtual(): do proper TLB invalidation
|
||||
27. #1043: terminate(): fix update_lock and threads_lock order to avoid deadlock
|
||||
28. #1129: mcreboot.sh: Save /proc/irq/*/smp_affinity to /tmp/mcreboot
|
||||
29. #1130: mcexec: drop READ_IMPLIES_EXEC from personality
|
||||
|
||||
--------------------
|
||||
McKernel workarounds
|
||||
--------------------
|
||||
1. Forbid CPU oversubscription
|
||||
* It can be turned on by mcreboot.sh -O option
|
||||
|
||||
|
||||
===================================
|
||||
What's new in V1.4.0 (Oct 30, 2017)
|
||||
===================================
|
||||
|
||||
-----------------------------------------------------------
|
||||
Feature: Abstracted event type support in perf_event_open()
|
||||
-----------------------------------------------------------
|
||||
PERF_TYPE_HARDWARE and PERF_TYPE_CACHE types are supported.
|
||||
|
||||
----------------------------------
|
||||
Clean-up: Direct user-space access
|
||||
----------------------------------
|
||||
Code lines using direct user-space access (e.g. passing user-space
|
||||
pointer to memcpy()) becomes more portable across processor
|
||||
architectures. The modification follows the following rules.
|
||||
|
||||
1. Move the code section as it is to the architecture dependent
|
||||
directory if it is a part of the critical-path.
|
||||
2. Otherwise, rewrite the code section by using the portable methods.
|
||||
The methods include copy_from_user(), copy_to_user(),
|
||||
pte_get_phys() and phys_to_virt().
|
||||
|
||||
--------------------------------
|
||||
Test: MPI and OpenMP micro-bench
|
||||
--------------------------------
|
||||
The performance figures of MPI and OpenMP primitives are compared with
|
||||
those of Linux by using Intel MPI Benchmarks and EPCC OpenMP Micro
|
||||
Benchmark.
|
||||
|
||||
|
||||
===================================
|
||||
What's new in V1.3.0 (Sep 30, 2017)
|
||||
===================================
|
||||
|
||||
--------------------
|
||||
Feature: Kernel dump
|
||||
--------------------
|
||||
1. A dump level of "only kernel memory" is added.
|
||||
|
||||
The following two levels are available now:
|
||||
0: Dump all
|
||||
24: Dump only kernel memory
|
||||
|
||||
The dump level can be set by -d option in ihkosctl or the argument
|
||||
for ihk_os_makedumpfile(), as shown in the following examples:
|
||||
|
||||
Command: ihkosctl 0 dump -d 24
|
||||
Function call: ihk_os_makedumpfile(0, NULL, 24, 0);
|
||||
|
||||
2. Dump file is created when Linux panics.
|
||||
|
||||
The dump level can be set by dump_level kernel argument, as shown in the
|
||||
following example:
|
||||
|
||||
ihkosctl 0 kargs "hidos dump_level=24"
|
||||
|
||||
The IHK dump function is registered to panic_notifier_list when creating
|
||||
/dev/mcdX and called when Linux panics.
|
||||
|
||||
-----------------------------
|
||||
Feature: Quick Process Launch
|
||||
-----------------------------
|
||||
|
||||
MPI process launch time and some of the initialization time can be
|
||||
reduced in application consisting of multiple MPI programs which are
|
||||
launched in turn in the job script.
|
||||
|
||||
The following two steps should be performed to use this feature:
|
||||
1. Replace mpiexec with ql_mpiexec_start and add some lines for
|
||||
ql_mpiexec_finalize in the job script
|
||||
2. Modify the app so that it can repeat calculations and wait for the
|
||||
instructions from ql_mpiexec_{start,finalize} at the end of the
|
||||
loop
|
||||
|
||||
The first step is explained using an example. Assume the original job
|
||||
script looks like this:
|
||||
|
||||
/* Execute ensamble simulation and then data assimilation, and repeat this
|
||||
ten times */
|
||||
for i in {1..10}; do
|
||||
|
||||
/* Each ensamble simulation execution uses 100 nodes, launch ten of them
|
||||
in parallel */
|
||||
for j in {1..10}; do
|
||||
mpiexec -n 100 -machinefile ./list1_$j p1.out a1 & pids[$i]=$!;
|
||||
done
|
||||
|
||||
/* Wait until the ten ensamble simulation programs finish */
|
||||
for j in {1..10}; do wait ${pids[$j]}; done
|
||||
|
||||
/* Launch one data assimilation program using 1000 nodes */
|
||||
mpiexec -n 1000 -machinefile ./list2 p2.out a2
|
||||
done
|
||||
|
||||
The job script should be modified like this:
|
||||
|
||||
for i in {1..10}; do
|
||||
for j in {1..10}; do
|
||||
/* Replace mpiexec with ql_mpiexec_start */
|
||||
ql_mpiexec_start -n 100 -machinefile ./list1_$j p1.out a1 & pids[$j]=$!;
|
||||
done
|
||||
|
||||
for j in {1..10}; do wait ${pids[$j]}; done
|
||||
|
||||
ql_mpiexec_start -n 1000 -machinefile ./list2 p2.out a2
|
||||
done
|
||||
|
||||
/* p1.out and p2.out don't exit but are waiting for the next calculation.
|
||||
So tell them to exit */
|
||||
for j in {1..10}; do
|
||||
ql_mpiexec_finalize -machinefile ./list1_$i p1.out a1;
|
||||
done
|
||||
ql_mpiexec_finalize -machinefile ./list2 p2.out a2;
|
||||
|
||||
|
||||
The second step is explained using a pseudo-code.
|
||||
|
||||
MPI_Init();
|
||||
Prepare data exchange with preceding / following MPI programs
|
||||
loop:
|
||||
foreach Fortran module
|
||||
Initialize data using command-line argments, parameter files,
|
||||
environment variables
|
||||
Input data from preceding MPI programs / Read snap-shot
|
||||
Perform main calculation
|
||||
Output data to following MPI programs / Write snap-shot
|
||||
/* ql_client() waits for command of ql_mpiexec_{start,finish} */
|
||||
if (ql_client() == QL_CONTINUE) { goto loop; }
|
||||
MPI_Finalize();
|
||||
|
||||
qlmpilib.h should be included in the code and libql{mpi,fort}.so
|
||||
should be linked to the executable file.
|
||||
|
||||
|
||||
========================
|
||||
Restrictions on McKernel
|
||||
========================
|
||||
|
||||
1. Pseudo devices such as /dev/mem and /dev/zero are not mmap()ed
|
||||
correctly even if the mmap() returns a success. An access of their
|
||||
mapping receives the SIGSEGV signal.
|
||||
|
||||
2. clone() supports only the following flags. All the other flags
|
||||
cause clone() to return error or are simply ignored.
|
||||
|
||||
* CLONE_CHILD_CLEARTID
|
||||
* CLONE_CHILD_SETTID
|
||||
* CLONE_PARENT_SETTID
|
||||
* CLONE_SETTLS
|
||||
* CLONE_SIGHAND
|
||||
* CLONE_VM
|
||||
|
||||
3. PAPI has the following restriction.
|
||||
|
||||
* Number of counters a user can use at the same time is up to the
|
||||
number of the physical counters in the processor.
|
||||
|
||||
4. msync writes back only the modified pages mapped by the calling process.
|
||||
|
||||
5. The following syscalls always return the ENOSYS error.
|
||||
|
||||
* migrate_pages()
|
||||
* move_pages()
|
||||
* set_robust_list()
|
||||
|
||||
6. The following syscalls always return the EOPNOTSUPP error.
|
||||
|
||||
* arch_prctl(ARCH_SET_GS)
|
||||
* signalfd()
|
||||
|
||||
7. signalfd4() returns a fd, but signal is not notified through the
|
||||
fd.
|
||||
|
||||
8. set_rlimit sets the limit values but they are not enforced.
|
||||
|
||||
9. Address randomization is not supported.
|
||||
|
||||
10. brk() extends the heap more than requestd when -h
|
||||
(--extend-heap-by=)<step> option of mcexec is used with the value
|
||||
larger than 4 KiB. syscall_pwrite02 of LTP would fail for this
|
||||
reason. This is because the test expects that the end of the heap
|
||||
is set to the same address as the argument of sbrk() and expects a
|
||||
segmentation violation occurs when it tries to access the memory
|
||||
area right next to the boundary. However, the optimization sets
|
||||
the end to a value larger than the requested. Therefore, the
|
||||
expected segmentation violation doesn't occur.
|
||||
|
||||
11. setpriority()/getpriority() won't work. They might set/get the
|
||||
priority of a random mcexec thread. This is because there's no
|
||||
fixed correspondence between a McKernel thread which issues the
|
||||
system call and a mcexec thread which handles the offload request.
|
||||
|
||||
12. mbind() can set the policy but it is not used when allocating
|
||||
physical pages.
|
||||
|
||||
13. MPOL_F_RELATIVE_NODES and MPOL_INTERLEAVE flags for
|
||||
set_mempolicy()/mbind() are not supported.
|
||||
|
||||
14. The MPOL_BIND policy for set_mempolicy()/mbind() works as the same
|
||||
as the MPOL_PREFERRED policy. That is, the physical page allocator
|
||||
doesn't give up the allocation when the specified nodes are
|
||||
running out of pages but continues to search pages in the other
|
||||
nodes.
|
||||
|
||||
15. Kernel dump on Linux panic requires Linux kernel CentOS-7.4 and
|
||||
later. In addition, crash_kexec_post_notifiers kernel argument
|
||||
must be given to Linux kernel.
|
||||
|
||||
16. setfsuid()/setfsgid() cannot change the id of the calling thread.
|
||||
Instead, it changes that of the mcexec worker thread which takes
|
||||
the system-call offload request.
|
||||
|
||||
17. mmap (hugeTLBfs): The physical pages corresponding to a map are
|
||||
released when no McKernel process exist. The next map gets fresh
|
||||
physical pages.
|
||||
|
||||
18. Sticky bit on executable file has no effect.
|
||||
|
||||
19. Linux (RHEL-7 for x86_64) could hang when offlining CPUs in the
|
||||
process of booting McKernel due to the Linux bug, found in
|
||||
Linux-3.10 and fixed in the later version. One way to circumvent
|
||||
this is to always assign the same CPU set to McKernel.
|
||||
|
||||
20. madvise:
|
||||
* MADV_HWPOISON and MADV_SOFT_OFFLINE always returns -EPERM.
|
||||
* MADV_MERGEABLE and MADV_UNMERGEABLE always returns -EINVAL.
|
||||
* MADV_HUGEPAGE and MADV_NOHUGEPAGE on file map returns -EINVAL
|
||||
(It succeeds on RHEL-8 for aarch64).
|
||||
|
||||
21. brk() and mmap() doesn't report out-of-memory through its return
|
||||
value. Instead, page-fault reports the error.
|
||||
|
||||
22. Anonymous mmap pre-maps requested number of pages when contiguous
|
||||
pages are available. Demand paging is used when not available.
|
||||
|
||||
23. Mixing page sizes in anonymous shared mapping is not allowed. mmap
|
||||
creates vm_range with one page size. And munmap or mremap that
|
||||
needs the reduced page size changes the sizes of all the pages of
|
||||
the vm_range.
|
||||
|
||||
24. ihk_os_getperfevent() could time-out when invoked from Fujitsu TCS
|
||||
(job-scheduler).
|
||||
|
||||
25. The behaviors of madvise and mbind are changed to do nothing and
|
||||
report success as a workaround for Fugaku.
|
||||
|
||||
26. mmap() allows unlimited overcommit. Note that it corresponds to
|
||||
setting sysctl ``vm.overcommit_memory`` to 1.
|
||||
290
README.md
Normal file
290
README.md
Normal file
@ -0,0 +1,290 @@
|
||||

|
||||
-------------------------
|
||||
|
||||
IHK/McKernel is a light-weight multi-kernel operating system designed for high-end supercomputing. It runs Linux and McKernel, a light-weight kernel (LWK), side-by-side inside compute nodes and aims at the following:
|
||||
|
||||
- Provide scalable and consistent execution of large-scale parallel scientific applications, but at the same time maintain the ability to rapidly adapt to new hardware features and emerging programming models
|
||||
- Provide efficient memory and device management so that resource contention and data movement are minimized at the system level
|
||||
- Eliminate OS noise by isolating OS services in Linux and provide jitter free execution on the LWK
|
||||
- Support the full POSIX/Linux APIs by selectively offloading (slow-path) system calls to Linux
|
||||
|
||||
## Contents
|
||||
|
||||
- [Background](#background-and-motivation)
|
||||
- [Architectural Overview](#architectural-overview)
|
||||
- [Installation](#installation)
|
||||
- [The Team](#the-team)
|
||||
|
||||
## Background and Motivation
|
||||
|
||||
With the growing complexity of high-end supercomputers, the current system software stack faces significant challenges as we move forward to exascale and beyond. The necessity to deal with extreme degree of parallelism, heterogeneous architectures, multiple levels of memory hierarchy, power constraints, etc., advocates operating systems that can rapidly adapt to new hardware requirements, and that can support novel programming paradigms and runtime systems. On the other hand, a new class of more dynamic and complex applications are also on the horizon, with an increasing demand for application constructs such as in-situ analysis, workflows, elaborate monitoring and performance tools. This complexity relies not only on the rich features of POSIX, but also on the Linux APIs (such as the */proc*, */sys* filesystems, etc.) in particular.
|
||||
|
||||
|
||||
##### Two Traditional HPC OS Approaches
|
||||
|
||||
Traditionally, light-weight operating systems specialized for HPC followed two approaches to tackle scalable execution of large-scale applications. In the full weight kernel (FWK) approach, a full Linux environment is taken as the basis, and features that inhibit attaining HPC scalability are removed, i.e., making it light-weight. The pure light-weight kernel (LWK) approach, on the other hand, starts from scratch and effort is undertaken to add sufficient functionality so that it provides a familiar API, typically something close to that of a general purpose OS, while at the same time it retains the desired scalability and reliability attributes. Neither of these approaches yields a fully Linux compatible environment.
|
||||
|
||||
|
||||
##### The Multi-kernel Approach
|
||||
|
||||
A hybrid approach recognized recently by the system software community is to run Linux simultaneously with a lightweight kernel on compute nodes and multiple research projects are now pursuing this direction. The basic idea is that simulations run on an HPC tailored lightweight kernel, ensuring the necessary isolation for noiseless execution of parallel applications, but Linux is leveraged so that the full POSIX API is supported. Additionally, the small code base of the LWK can also facilitate rapid prototyping for new, exotic hardware features. Nevertheless, the questions of how to share node resources between the two types of kernels, where do device drivers execute, how exactly do the two kernels interact with each other and to what extent are they integrated, remain subjects of ongoing debate.
|
||||
|
||||
|
||||
|
||||
## Architectural Overview
|
||||
|
||||
At the heart of the stack is a low-level software infrastructure called Interface for Heterogeneous Kernels (IHK). IHK is a general framework that provides capabilities for partitioning resources in a many-core environment (e.g.,CPU cores and physical memory) and it enables management of lightweight kernels. IHK can allocate and release host resources dynamically and no reboot of the host machine is required when altering configuration. IHK also provides a low-level inter-kernel messaging infrastructure, called the Inter-Kernel Communication (IKC) layer. An architectural overview of the main system components is shown below.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
McKernel is a lightweight kernel written from scratch. It is designed for HPC and is booted from IHK. McKernel retains a binary compatible ABI with Linux, however, it implements only a small set of performance sensitive system calls and the rest are offloaded to Linux. Specifically, McKernel has its own memory management, it supports processes and multi-threading with a simple round-robin cooperative (tick-less) scheduler, and it implements signaling. It also allows inter-process memory mappings and it provides interfaces to hardware performance counters.
|
||||
|
||||
### Functionality
|
||||
|
||||
An overview of some of the principal functionalities of the IHK/McKernel stack is provided below.
|
||||
|
||||
#### System Call Offloading
|
||||
|
||||
System call forwarding in McKernel is implemented as follows. When an offloaded system call occurs, McKernel marshals the system call number along with its arguments and sends a message to Linux via a dedicated IKC channel. The corresponding proxy process running on Linux is by default waiting for system call requests through an ioctl() call into IHK’s system call delegator kernel module. The delegator kernel module’s IKC interrupt handler wakes up the proxy process, which returns to userspace and simply invokes the requested system call. Once it obtains the return value, it instructs the delegator module to send the result back to McKernel, which subsequently passes the value to user-space.
|
||||
|
||||
#### Unified Address Space
|
||||
|
||||
The unified address space model in IHK/McKernel ensures that offloaded system calls can seamlessly resolve arguments even in case of pointers. This mechanism is depicted below and is implemented as follows.
|
||||
|
||||
|
||||

|
||||
|
||||
|
||||
First, the proxy process is compiled as a position independent binary, which enables us to map the code and data segments specific to the proxy process to an address range which is explicitly excluded from McKernel’s user space. The grey box on the right side of the figure demonstrates the excluded region. Second, the entire valid virtual address range of McKernel’s application user-space is covered by a special mapping in the proxy process for which we use a pseudo file mapping in Linux. This mapping is indicated by the blue box on the left side of the figure.
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
For a smooth experience, we recommend the following combination of OS distributions and platforms:
|
||||
|
||||
- CentOS 7.3+ running on Intel Xeon, Xeon Phi, Fujitsu A64FX
|
||||
|
||||
|
||||
##### 1. Change SELinux settings
|
||||
Log in as the root and disable SELinux:
|
||||
|
||||
~~~~
|
||||
vim /etc/selinux/config
|
||||
~~~~
|
||||
|
||||
Change the file to SELINUX=disabled
|
||||
|
||||
##### 2. Reboot the host machine
|
||||
~~~~
|
||||
sudo reboot
|
||||
~~~~
|
||||
|
||||
##### 3. Prepare packages, kernel symbol table file
|
||||
You will need the following packages installed:
|
||||
|
||||
~~~~
|
||||
sudo yum install cmake kernel-devel binutils-devel systemd-devel numactl-devel gcc make nasm git libdwarf-devel
|
||||
~~~~
|
||||
|
||||
Note that to install libdwarf-devel to RHEL-8.2, you need to enable the CodeReady Linux Builder (CLB) repository and the EPEL repository with the following commands:
|
||||
~~~~
|
||||
sudo subscription-manager repos --enable codeready-builder-for-rhel-8-$(/bin/arch)-rpms
|
||||
~~~~
|
||||
|
||||
Grant read permission to the System.map file of your kernel version:
|
||||
|
||||
~~~~
|
||||
sudo chmod a+r /boot/System.map-`uname -r`
|
||||
~~~~
|
||||
|
||||
##### 4. Obtain sources and compile the kernel
|
||||
|
||||
Clone the source code:
|
||||
|
||||
~~~~
|
||||
mkdir -p ~/src/ihk+mckernel/
|
||||
cd ~/src/ihk+mckernel/
|
||||
git clone --recursive -b development https://github.com/RIKEN-SysSoft/mckernel.git
|
||||
~~~~
|
||||
|
||||
(Optional) Checkout to the specific branch or version:
|
||||
|
||||
~~~~
|
||||
cd mckernel
|
||||
git checkout <pathspec>
|
||||
git submodule update
|
||||
~~~~
|
||||
|
||||
Foe example, if you want to try the development branch, use "development" as the pathspec. If you want to try the prerelease version 1.7.0-0.2, use "1.7.0-0.2".
|
||||
|
||||
###### 4.1 Install with cmake
|
||||
|
||||
Configure and compile:
|
||||
|
||||
~~~~
|
||||
mkdir -p build && cd build
|
||||
cmake -DCMAKE_INSTALL_PREFIX=${HOME}/ihk+mckernel $HOME/src/ihk+mckernel/mckernel
|
||||
make -j install
|
||||
~~~~
|
||||
|
||||
The IHK kernel modules and McKernel kernel image should be installed under the **ihk+mckernel** folder in your home directory.
|
||||
|
||||
###### 4.2 Install with rpm
|
||||
|
||||
Build rpm:
|
||||
|
||||
~~~~
|
||||
mkdir -p build && cd build
|
||||
cmake $HOME/src/ihk+mckernel/mckernel
|
||||
make dist
|
||||
cp mckernel-<version>.tar.gz <rpmbuild>/SOURCES
|
||||
rpm -ba scripts/mckernel.spec
|
||||
sudo rpm -ivh <rpmbuild>/RPMS/<arch>/mckernel-<version>-<release>_<linux_kernel_ver>_<dist>.<arch>.rpm
|
||||
~~~~
|
||||
|
||||
The IHK kernel modules and McKernel kernel image are installed under the system directory.
|
||||
|
||||
##### 5. Boot McKernel
|
||||
|
||||
A boot script called mcreboot.sh is provided under sbin in the install folder. To boot on logical CPU 1 with 512MB of memory, use the following invocation:
|
||||
|
||||
~~~~
|
||||
export TOP=${HOME}/ihk+mckernel/
|
||||
cd ${TOP}
|
||||
sudo ./sbin/mcreboot.sh -c 1 -m 512m
|
||||
~~~~
|
||||
|
||||
You should see something similar like this if you display the McKernel's kernel message log:
|
||||
|
||||
|
||||
~~~~
|
||||
./sbin/ihkosctl 0 kmsg
|
||||
|
||||
IHK/McKernel started.
|
||||
[ -1]: no_execute_available: 1
|
||||
[ -1]: map_fixed: phys: 0xfee00000 => 0xffff860000009000 (1 pages)
|
||||
[ -1]: setup_x86 done.
|
||||
[ -1]: ns_per_tsc: 385
|
||||
[ -1]: KCommand Line: hidos dump_level=24
|
||||
[ -1]: Physical memory: 0x1ad3000 - 0x21000000, 525520896 bytes, 128301 pages available @ NUMA: 0
|
||||
[ -1]: NUMA: 0, Linux NUMA: 0, type: 1, available bytes: 525520896, pages: 128301
|
||||
[ -1]: NUMA 0 distances: 0 (10),
|
||||
[ -1]: map_fixed: phys: 0x28000 => 0xffff86000000a000 (2 pages)
|
||||
[ -1]: Trampoline area: 0x28000
|
||||
[ -1]: map_fixed: phys: 0x0 => 0xffff86000000c000 (1 pages)
|
||||
[ -1]: # of cpus : 1
|
||||
[ -1]: locals = ffff880001af6000
|
||||
[ 0]: BSP: 0 (HW ID: 1 @ NUMA 0)
|
||||
[ 0]: BSP: booted 0 AP CPUs
|
||||
[ 0]: Master channel init acked.
|
||||
[ 0]: vdso is enabled
|
||||
IHK/McKernel booted.
|
||||
~~~~
|
||||
|
||||
|
||||
##### 6. Run a simple program on McKernel
|
||||
|
||||
The mcexec command line tool (which is also the Linux proxy process) can be used for executing applications on McKernel:
|
||||
|
||||
~~~~
|
||||
./bin/mcexec hostname
|
||||
centos-vm
|
||||
~~~~
|
||||
|
||||
|
||||
##### 7. Shutdown McKernel
|
||||
|
||||
Finally, to shutdown McKernel and release CPU/memory resources back to Linux use the following command:
|
||||
|
||||
~~~~
|
||||
sudo ./sbin/mcstop+release.sh
|
||||
~~~~
|
||||
|
||||
##### 8. Advanced: Enable Utility Thread offloading Interface (UTI)
|
||||
|
||||
UTI enables a runtime such as MPI runtime to spawn utility threads such as MPI asynchronous progress threads to Linux cores.
|
||||
|
||||
###### 8.1 Install capstone
|
||||
|
||||
Install EPEL capstone-devel:
|
||||
|
||||
~~~~
|
||||
sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm
|
||||
sudo yum install capstone-devel
|
||||
~~~~
|
||||
|
||||
###### 8.2 Install syscall_intercept
|
||||
|
||||
~~~~
|
||||
git clone https://github.com/RIKEN-SysSoft/syscall_intercept.git
|
||||
cmake ../arch/aarch64 -DCMAKE_INSTALL_PREFIX=<syscall-intercept-install> -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=gcc -DTREAT_WARNINGS_AS_ERRORS=OFF
|
||||
~~~~
|
||||
|
||||
###### 8.3 Install UTI for McKernel
|
||||
|
||||
Install:
|
||||
|
||||
~~~~
|
||||
git clone https://github.com/RIKEN-SysSoft/uti.git
|
||||
mkdir build && cd build
|
||||
../uti/configure --prefix=<mckernel-install> --with-rm=mckernel
|
||||
make && make install
|
||||
~~~~
|
||||
|
||||
###### 8.4 Install McKernel
|
||||
|
||||
~~~~
|
||||
CMAKE_PREFIX_PATH=<syscall-intercept-install> cmake -DCMAKE_INSTALL_PREFIX=${HOME}/ihk+mckernel -DENABLE_UTI=ON $HOME/src/ihk+mckernel/mckernel
|
||||
~~~~
|
||||
|
||||
###### 8.5 Run executable
|
||||
|
||||
~~~~
|
||||
mcexec --enable-uti <command>
|
||||
~~~~
|
||||
|
||||
###### 8.6 Install UTI for Linux for performance comparison
|
||||
|
||||
Install by make:
|
||||
|
||||
~~~~
|
||||
git clone https://github.com/RIKEN-SysSoft/uti.git
|
||||
mkdir build && cd build
|
||||
../uti/configure --prefix=<uti-install> --with-rm=linux
|
||||
make && make install
|
||||
~~~~
|
||||
|
||||
Install by rpm:
|
||||
|
||||
~~~~
|
||||
git clone https://github.com/RIKEN-SysSoft/uti.git
|
||||
mkdir build && cd build
|
||||
../uti/configure --prefix=<uti-install> --with-rm=linux
|
||||
rm -f ~/rpmbuild/SOURCES/<version>.tar.gz
|
||||
rpmbuild -ba ./scripts/uti.spec
|
||||
rpm -Uvh uti-<version>-<release>-<arch>.rpm
|
||||
~~~~
|
||||
|
||||
## The Team
|
||||
|
||||
The McKernel project was started at The University of Tokyo and currently it is mainly developed at RIKEN.
|
||||
Some of our collaborators include:
|
||||
|
||||
- Hitachi
|
||||
- Fujitsu
|
||||
- CEA (France)
|
||||
- NEC
|
||||
|
||||
|
||||
## License
|
||||
|
||||
McKernel is GPL licensed, as found in the LICENSE file.
|
||||
|
||||
## Contact
|
||||
|
||||
Please give your feedback to us via one of the following mailing lists. Subscription via [www.pccluster.org](http://www.pccluster.org/mailman/listinfo/mckernel-users) is needed.
|
||||
|
||||
* English: mckernel-users@pccluster.org
|
||||
* Japanese: mckernel-users-jp@pccluster.org
|
||||
27
arch/arm64/kernel/Makefile.arch.in
Normal file
27
arch/arm64/kernel/Makefile.arch.in
Normal file
@ -0,0 +1,27 @@
|
||||
# Makefile.arch.in COPYRIGHT FUJITSU LIMITED 2015-2018
|
||||
VDSO_SRCDIR = $(SRC)/../arch/$(IHKARCH)/kernel/vdso
|
||||
VDSO_BUILDDIR = @abs_builddir@/vdso
|
||||
VDSO_SO_O = $(O)/vdso.so.o
|
||||
|
||||
IHK_OBJS += assert.o cache.o cpu.o cputable.o context.o entry.o entry-fpsimd.o
|
||||
IHK_OBJS += fault.o head.o hyp-stub.o local.o perfctr.o perfctr_armv8pmu.o proc.o proc-macros.o
|
||||
IHK_OBJS += psci.o smp.o trampoline.o traps.o fpsimd.o
|
||||
IHK_OBJS += debug-monitors.o hw_breakpoint.o ptrace.o timer.o
|
||||
IHK_OBJS += $(notdir $(VDSO_SO_O)) memory.o syscall.o vdso.o
|
||||
|
||||
IHK_OBJS += irq-gic-v2.o irq-gic-v3.o
|
||||
IHK_OBJS += memcpy.o memset.o
|
||||
IHK_OBJS += cpufeature.o
|
||||
|
||||
IHK_OBJS += imp-sysreg.o
|
||||
IHK_OBJS += coredump.o
|
||||
|
||||
$(VDSO_SO_O): $(VDSO_BUILDDIR)/vdso.so
|
||||
|
||||
$(VDSO_BUILDDIR)/vdso.so: FORCE
|
||||
$(call echo_cmd,BUILD VDSO,$(TARGET))
|
||||
mkdir -p $(O)/vdso
|
||||
TARGETDIR="$(TARGETDIR)" $(submake) -C $(VDSO_BUILDDIR) $(SUBOPTS) prepare
|
||||
TARGETDIR="$(TARGETDIR)" $(submake) -C $(VDSO_BUILDDIR) $(SUBOPTS)
|
||||
|
||||
FORCE:
|
||||
56
arch/arm64/kernel/assert.c
Normal file
56
arch/arm64/kernel/assert.c
Normal file
@ -0,0 +1,56 @@
|
||||
/* assert.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
|
||||
#include <process.h>
|
||||
#include <list.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <ihk/context.h>
|
||||
#include <asm-offsets.h>
|
||||
#include <cputable.h>
|
||||
#include <thread_info.h>
|
||||
#include <smp.h>
|
||||
#include <ptrace.h>
|
||||
|
||||
/* assert for struct pt_regs member offset & size define */
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[0]) == S_X0);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[1]) == S_X1);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[2]) == S_X2);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[3]) == S_X3);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[4]) == S_X4);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[5]) == S_X5);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[6]) == S_X6);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[7]) == S_X7);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, regs[30]) == S_LR);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, sp) == S_SP);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, pc) == S_PC);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, pstate) == S_PSTATE);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, orig_x0) == S_ORIG_X0);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, orig_pc) == S_ORIG_PC);
|
||||
STATIC_ASSERT(offsetof(struct pt_regs, syscallno) == S_SYSCALLNO);
|
||||
STATIC_ASSERT(sizeof(struct pt_regs) == S_FRAME_SIZE);
|
||||
|
||||
/* assert for struct cpu_info member offset & size define */
|
||||
STATIC_ASSERT(offsetof(struct cpu_info, cpu_setup) == CPU_INFO_SETUP);
|
||||
STATIC_ASSERT(sizeof(struct cpu_info) == CPU_INFO_SZ);
|
||||
|
||||
/* assert for struct thread_info member offset define */
|
||||
STATIC_ASSERT(offsetof(struct thread_info, flags) == TI_FLAGS);
|
||||
STATIC_ASSERT(offsetof(struct thread_info, cpu_context) == TI_CPU_CONTEXT);
|
||||
|
||||
/* assert for arch depend kernel stack size and common kernel stack pages */
|
||||
STATIC_ASSERT((KERNEL_STACK_SIZE * 2) < (KERNEL_STACK_NR_PAGES * PAGE_SIZE));
|
||||
|
||||
/* assert for struct secondary_data member offset define */
|
||||
STATIC_ASSERT(offsetof(struct secondary_data, stack) == SECONDARY_DATA_STACK);
|
||||
STATIC_ASSERT(offsetof(struct secondary_data, next_pc) == SECONDARY_DATA_NEXT_PC);
|
||||
STATIC_ASSERT(offsetof(struct secondary_data, arg) == SECONDARY_DATA_ARG);
|
||||
|
||||
/* assert for sve defines */
|
||||
/* @ref.impl arch/arm64/kernel/signal.c::BUILD_BUG_ON in the init_user_layout */
|
||||
STATIC_ASSERT(sizeof(struct sigcontext) - offsetof(struct sigcontext, __reserved) > ALIGN_UP(sizeof(struct _aarch64_ctx), 16));
|
||||
STATIC_ASSERT(sizeof(struct sigcontext) - offsetof(struct sigcontext, __reserved) -
|
||||
ALIGN_UP(sizeof(struct _aarch64_ctx), 16) > sizeof(struct extra_context));
|
||||
STATIC_ASSERT(SVE_PT_FPSIMD_OFFSET == sizeof(struct user_sve_header));
|
||||
STATIC_ASSERT(SVE_PT_SVE_OFFSET == sizeof(struct user_sve_header));
|
||||
|
||||
/* assert for struct arm64_cpu_local_thread member offset define */
|
||||
STATIC_ASSERT(offsetof(struct arm64_cpu_local_thread, panic_regs) == 168);
|
||||
39
arch/arm64/kernel/cache.S
Normal file
39
arch/arm64/kernel/cache.S
Normal file
@ -0,0 +1,39 @@
|
||||
/* cache.S COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
|
||||
#include <linkage.h>
|
||||
#include "proc-macros.S"
|
||||
|
||||
/*
|
||||
* __inval_cache_range(start, end)
|
||||
* - start - start address of region
|
||||
* - end - end address of region
|
||||
*/
|
||||
ENTRY(__inval_cache_range)
|
||||
/* FALLTHROUGH */
|
||||
|
||||
/*
|
||||
* __dma_inv_range(start, end)
|
||||
* - start - virtual start address of region
|
||||
* - end - virtual end address of region
|
||||
*/
|
||||
__dma_inv_range:
|
||||
dcache_line_size x2, x3
|
||||
sub x3, x2, #1
|
||||
tst x1, x3 // end cache line aligned?
|
||||
bic x1, x1, x3
|
||||
b.eq 1f
|
||||
dc civac, x1 // clean & invalidate D / U line
|
||||
1: tst x0, x3 // start cache line aligned?
|
||||
bic x0, x0, x3
|
||||
b.eq 2f
|
||||
dc civac, x0 // clean & invalidate D / U line
|
||||
b 3f
|
||||
2: dc ivac, x0 // invalidate D / U line
|
||||
3: add x0, x0, x2
|
||||
cmp x0, x1
|
||||
b.lo 2b
|
||||
dsb sy
|
||||
ret
|
||||
ENDPROC(__inval_cache_range)
|
||||
ENDPROC(__dma_inv_range)
|
||||
|
||||
191
arch/arm64/kernel/context.c
Normal file
191
arch/arm64/kernel/context.c
Normal file
@ -0,0 +1,191 @@
|
||||
/* context.c COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
#include <ihk/context.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <thread_info.h>
|
||||
#include <cputype.h>
|
||||
#include <mmu_context.h>
|
||||
#include <arch-memory.h>
|
||||
#include <irqflags.h>
|
||||
#include <lwk/compiler.h>
|
||||
#include <bitops.h>
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/mmu_context.h::MAX_ASID_BITS */
|
||||
#define MAX_ASID_BITS 16
|
||||
#define ASID_FIRST_VERSION (1 << MAX_ASID_BITS)
|
||||
#define ASID_MASK ((1 << MAX_ASID_BITS) - 1)
|
||||
#define VERSION_MASK (0xFFFF << MAX_ASID_BITS)
|
||||
|
||||
/* @ref.impl arch/arm64/mm/context.c::asid_bits */
|
||||
#define asid_bits(reg) \
|
||||
(((read_cpuid(ID_AA64MMFR0_EL1) & 0xf0) >> 2) + 8)
|
||||
|
||||
#define MAX_CTX_NR (1UL << MAX_ASID_BITS)
|
||||
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR) = { 1 }; /* context number 0 reserved. */
|
||||
|
||||
/* cpu_asid lock */
|
||||
static ihk_spinlock_t cpu_asid_lock = SPIN_LOCK_UNLOCKED;
|
||||
|
||||
/* last allocation ASID, initialized by 0x0001_0000 */
|
||||
static unsigned int cpu_last_asid = ASID_FIRST_VERSION;
|
||||
|
||||
/* @ref.impl arch/arm64/mm/context.c::set_mm_context */
|
||||
/* set asid for kernel_context_t.context */
|
||||
static void set_mm_context(struct page_table *pgtbl, unsigned int asid)
|
||||
{
|
||||
unsigned int context = get_address_space_id(pgtbl);
|
||||
if (likely((context ^ cpu_last_asid) >> MAX_ASID_BITS)) {
|
||||
set_address_space_id(pgtbl, asid);
|
||||
}
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/mm/context.c::__new_context */
|
||||
/* ASID allocation for new process function */
|
||||
static inline void __new_context(struct page_table *pgtbl)
|
||||
{
|
||||
unsigned int asid;
|
||||
unsigned int bits = asid_bits();
|
||||
unsigned long flags;
|
||||
unsigned int context = get_address_space_id(pgtbl);
|
||||
unsigned long index = 0;
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&cpu_asid_lock);
|
||||
|
||||
/* already assigned context number? */
|
||||
if (!unlikely((context ^ cpu_last_asid) >> MAX_ASID_BITS)) {
|
||||
/* true, unnecessary assigned context number */
|
||||
ihk_mc_spinlock_unlock(&cpu_asid_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
/* false, necessary assigned context number */
|
||||
/* search from the previous assigned number */
|
||||
index = (cpu_last_asid & ASID_MASK) + 1;
|
||||
asid = find_next_zero_bit(mmu_context_bmap, MAX_CTX_NR, index);
|
||||
|
||||
/* upper limit exceeded */
|
||||
if (asid >= (1 << bits)) {
|
||||
/* re assigned context number, search from 1 */
|
||||
asid = find_next_zero_bit(mmu_context_bmap, index, 1);
|
||||
|
||||
/* upper previous assigned number, goto panic */
|
||||
if (unlikely(asid >= index)) {
|
||||
ihk_mc_spinlock_unlock(&cpu_asid_lock, flags);
|
||||
panic("__new_context(): PANIC: Context Number Depletion.\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* set assigned context number bitmap */
|
||||
mmu_context_bmap[asid >> 6] |= (1UL << (asid & 63));
|
||||
|
||||
/* set previous assigned context number */
|
||||
cpu_last_asid = asid | (cpu_last_asid & VERSION_MASK);
|
||||
|
||||
set_mm_context(pgtbl, cpu_last_asid);
|
||||
ihk_mc_spinlock_unlock(&cpu_asid_lock, flags);
|
||||
}
|
||||
|
||||
void free_mmu_context(struct page_table *pgtbl)
|
||||
{
|
||||
unsigned int context = get_address_space_id(pgtbl);
|
||||
unsigned int nr = context & ASID_MASK;
|
||||
unsigned long flags = ihk_mc_spinlock_lock(&cpu_asid_lock);
|
||||
|
||||
/* clear used context number bitmap */
|
||||
mmu_context_bmap[nr >> 6] &= ~(1UL << (nr & 63));
|
||||
ihk_mc_spinlock_unlock(&cpu_asid_lock, flags);
|
||||
}
|
||||
|
||||
/* set ttbr0 assembler code extern */
|
||||
/* in arch/arm64/kernel/proc.S */
|
||||
extern void *cpu_do_switch_mm(translation_table_t* tt_pa, unsigned int asid);
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/mmu_context.h::switch_new_context */
|
||||
/* ASID allocation for new process */
|
||||
static inline void switch_new_context(struct page_table *pgtbl)
|
||||
{
|
||||
unsigned long flags;
|
||||
translation_table_t* tt_pa;
|
||||
unsigned int context;
|
||||
|
||||
/* ASID allocation */
|
||||
__new_context(pgtbl);
|
||||
context = get_address_space_id(pgtbl);
|
||||
|
||||
/* disable interrupt save */
|
||||
flags = cpu_disable_interrupt_save();
|
||||
|
||||
tt_pa = get_translation_table_as_paddr(pgtbl);
|
||||
cpu_do_switch_mm(tt_pa, context & ASID_MASK);
|
||||
|
||||
/* interrupt restore */
|
||||
cpu_restore_interrupt(flags);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/mmu_context.h::check_and_switch_context */
|
||||
/* ASID allocation */
|
||||
void switch_mm(struct page_table *pgtbl)
|
||||
{
|
||||
unsigned int context = get_address_space_id(pgtbl);
|
||||
|
||||
/* During switch_mm, you want to disable the TTBR */
|
||||
cpu_set_reserved_ttbr0();
|
||||
|
||||
/* check new process or existing process */
|
||||
if (!((context ^ cpu_last_asid) >> MAX_ASID_BITS)) {
|
||||
translation_table_t* tt_pa;
|
||||
|
||||
/* for existing process */
|
||||
tt_pa = get_translation_table_as_paddr(pgtbl);
|
||||
cpu_do_switch_mm(tt_pa, context & ASID_MASK);
|
||||
|
||||
/* TODO: tif_switch_mm / after context switch */
|
||||
// } else if (irqs_disabled()) {
|
||||
// /*
|
||||
// * Defer the new ASID allocation until after the context
|
||||
// * switch critical region since __new_context() cannot be
|
||||
// * called with interrupts disabled.
|
||||
// */
|
||||
// set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
|
||||
} else {
|
||||
/* for new process */
|
||||
/* ASID allocation & set ttbr0 */
|
||||
switch_new_context(pgtbl);
|
||||
}
|
||||
}
|
||||
|
||||
/* context switch assembler code extern */
|
||||
/* in arch/arm64/kernel/entry.S */
|
||||
extern void *cpu_switch_to(struct thread_info *prev, struct thread_info *next, void *prev_proc);
|
||||
|
||||
/* context switch C function */
|
||||
/* TODO: fpreg etc.. save & restore */
|
||||
static inline void *switch_to(struct thread_info *prev,
|
||||
struct thread_info *next,
|
||||
void *prev_proc)
|
||||
{
|
||||
void *last = NULL;
|
||||
|
||||
next->cpu = ihk_mc_get_processor_id();
|
||||
last = cpu_switch_to(prev, next, prev_proc);
|
||||
|
||||
return last;
|
||||
}
|
||||
|
||||
/* common unit I/F, for context switch */
|
||||
void *ihk_mc_switch_context(ihk_mc_kernel_context_t *old_ctx,
|
||||
ihk_mc_kernel_context_t *new_ctx,
|
||||
void *prev)
|
||||
{
|
||||
struct thread_info *prev_ti = NULL;
|
||||
struct thread_info *next_ti = NULL;
|
||||
|
||||
/* get next thread_info addr */
|
||||
next_ti = new_ctx->thread;
|
||||
if (likely(old_ctx)) {
|
||||
/* get prev thread_info addr */
|
||||
prev_ti = old_ctx->thread;
|
||||
}
|
||||
|
||||
/* switch next thread_info & process */
|
||||
return switch_to(prev_ti, next_ti, prev);
|
||||
}
|
||||
194
arch/arm64/kernel/copy_template.S
Normal file
194
arch/arm64/kernel/copy_template.S
Normal file
@ -0,0 +1,194 @@
|
||||
/* copy_template.S COPYRIGHT FUJITSU LIMITED 2017 */
|
||||
/*
|
||||
* Copyright (C) 2013 ARM Ltd.
|
||||
* Copyright (C) 2013 Linaro.
|
||||
*
|
||||
* This code is based on glibc cortex strings work originally authored by Linaro
|
||||
* and re-licensed under GPLv2 for the Linux kernel. The original code can
|
||||
* be found @
|
||||
*
|
||||
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
|
||||
* files/head:/src/aarch64/
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Copy a buffer from src to dest (alignment handled by the hardware)
|
||||
*
|
||||
* Parameters:
|
||||
* x0 - dest
|
||||
* x1 - src
|
||||
* x2 - n
|
||||
* Returns:
|
||||
* x0 - dest
|
||||
*/
|
||||
dstin .req x0
|
||||
src .req x1
|
||||
count .req x2
|
||||
tmp1 .req x3
|
||||
tmp1w .req w3
|
||||
tmp2 .req x4
|
||||
tmp2w .req w4
|
||||
dst .req x6
|
||||
|
||||
A_l .req x7
|
||||
A_h .req x8
|
||||
B_l .req x9
|
||||
B_h .req x10
|
||||
C_l .req x11
|
||||
C_h .req x12
|
||||
D_l .req x13
|
||||
D_h .req x14
|
||||
|
||||
mov dst, dstin
|
||||
cmp count, #16
|
||||
/*When memory length is less than 16, the accessed are not aligned.*/
|
||||
b.lo .Ltiny15
|
||||
|
||||
neg tmp2, src
|
||||
ands tmp2, tmp2, #15/* Bytes to reach alignment. */
|
||||
b.eq .LSrcAligned
|
||||
sub count, count, tmp2
|
||||
/*
|
||||
* Copy the leading memory data from src to dst in an increasing
|
||||
* address order.By this way,the risk of overwritting the source
|
||||
* memory data is eliminated when the distance between src and
|
||||
* dst is less than 16. The memory accesses here are alignment.
|
||||
*/
|
||||
tbz tmp2, #0, 1f
|
||||
ldrb1 tmp1w, src, #1
|
||||
strb1 tmp1w, dst, #1
|
||||
1:
|
||||
tbz tmp2, #1, 2f
|
||||
ldrh1 tmp1w, src, #2
|
||||
strh1 tmp1w, dst, #2
|
||||
2:
|
||||
tbz tmp2, #2, 3f
|
||||
ldr1 tmp1w, src, #4
|
||||
str1 tmp1w, dst, #4
|
||||
3:
|
||||
tbz tmp2, #3, .LSrcAligned
|
||||
ldr1 tmp1, src, #8
|
||||
str1 tmp1, dst, #8
|
||||
|
||||
.LSrcAligned:
|
||||
cmp count, #64
|
||||
b.ge .Lcpy_over64
|
||||
/*
|
||||
* Deal with small copies quickly by dropping straight into the
|
||||
* exit block.
|
||||
*/
|
||||
.Ltail63:
|
||||
/*
|
||||
* Copy up to 48 bytes of data. At this point we only need the
|
||||
* bottom 6 bits of count to be accurate.
|
||||
*/
|
||||
ands tmp1, count, #0x30
|
||||
b.eq .Ltiny15
|
||||
cmp tmp1w, #0x20
|
||||
b.eq 1f
|
||||
b.lt 2f
|
||||
ldp1 A_l, A_h, src, #16
|
||||
stp1 A_l, A_h, dst, #16
|
||||
1:
|
||||
ldp1 A_l, A_h, src, #16
|
||||
stp1 A_l, A_h, dst, #16
|
||||
2:
|
||||
ldp1 A_l, A_h, src, #16
|
||||
stp1 A_l, A_h, dst, #16
|
||||
.Ltiny15:
|
||||
/*
|
||||
* Prefer to break one ldp/stp into several load/store to access
|
||||
* memory in an increasing address order,rather than to load/store 16
|
||||
* bytes from (src-16) to (dst-16) and to backward the src to aligned
|
||||
* address,which way is used in original cortex memcpy. If keeping
|
||||
* the original memcpy process here, memmove need to satisfy the
|
||||
* precondition that src address is at least 16 bytes bigger than dst
|
||||
* address,otherwise some source data will be overwritten when memove
|
||||
* call memcpy directly. To make memmove simpler and decouple the
|
||||
* memcpy's dependency on memmove, withdrew the original process.
|
||||
*/
|
||||
tbz count, #3, 1f
|
||||
ldr1 tmp1, src, #8
|
||||
str1 tmp1, dst, #8
|
||||
1:
|
||||
tbz count, #2, 2f
|
||||
ldr1 tmp1w, src, #4
|
||||
str1 tmp1w, dst, #4
|
||||
2:
|
||||
tbz count, #1, 3f
|
||||
ldrh1 tmp1w, src, #2
|
||||
strh1 tmp1w, dst, #2
|
||||
3:
|
||||
tbz count, #0, .Lexitfunc
|
||||
ldrb1 tmp1w, src, #1
|
||||
strb1 tmp1w, dst, #1
|
||||
|
||||
b .Lexitfunc
|
||||
|
||||
.Lcpy_over64:
|
||||
subs count, count, #128
|
||||
b.ge .Lcpy_body_large
|
||||
/*
|
||||
* Less than 128 bytes to copy, so handle 64 here and then jump
|
||||
* to the tail.
|
||||
*/
|
||||
ldp1 A_l, A_h, src, #16
|
||||
stp1 A_l, A_h, dst, #16
|
||||
ldp1 B_l, B_h, src, #16
|
||||
ldp1 C_l, C_h, src, #16
|
||||
stp1 B_l, B_h, dst, #16
|
||||
stp1 C_l, C_h, dst, #16
|
||||
ldp1 D_l, D_h, src, #16
|
||||
stp1 D_l, D_h, dst, #16
|
||||
|
||||
tst count, #0x3f
|
||||
b.ne .Ltail63
|
||||
b .Lexitfunc
|
||||
|
||||
/*
|
||||
* Critical loop. Start at a new cache line boundary. Assuming
|
||||
* 64 bytes per line this ensures the entire loop is in one line.
|
||||
*/
|
||||
.p2align L1_CACHE_SHIFT
|
||||
.Lcpy_body_large:
|
||||
/* pre-get 64 bytes data. */
|
||||
ldp1 A_l, A_h, src, #16
|
||||
ldp1 B_l, B_h, src, #16
|
||||
ldp1 C_l, C_h, src, #16
|
||||
ldp1 D_l, D_h, src, #16
|
||||
1:
|
||||
/*
|
||||
* interlace the load of next 64 bytes data block with store of the last
|
||||
* loaded 64 bytes data.
|
||||
*/
|
||||
stp1 A_l, A_h, dst, #16
|
||||
ldp1 A_l, A_h, src, #16
|
||||
stp1 B_l, B_h, dst, #16
|
||||
ldp1 B_l, B_h, src, #16
|
||||
stp1 C_l, C_h, dst, #16
|
||||
ldp1 C_l, C_h, src, #16
|
||||
stp1 D_l, D_h, dst, #16
|
||||
ldp1 D_l, D_h, src, #16
|
||||
subs count, count, #64
|
||||
b.ge 1b
|
||||
stp1 A_l, A_h, dst, #16
|
||||
stp1 B_l, B_h, dst, #16
|
||||
stp1 C_l, C_h, dst, #16
|
||||
stp1 D_l, D_h, dst, #16
|
||||
|
||||
tst count, #0x3f
|
||||
b.ne .Ltail63
|
||||
.Lexitfunc:
|
||||
92
arch/arm64/kernel/coredump.c
Normal file
92
arch/arm64/kernel/coredump.c
Normal file
@ -0,0 +1,92 @@
|
||||
/* coredump.c COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#include <process.h>
|
||||
#include <elfcore.h>
|
||||
#include <string.h>
|
||||
#include <ptrace.h>
|
||||
#include <cls.h>
|
||||
#include <hwcap.h>
|
||||
|
||||
#define align32(x) ((((x) + 3) / 4) * 4)
|
||||
|
||||
void arch_fill_prstatus(struct elf_prstatus64 *prstatus,
|
||||
struct thread *thread, void *regs0, int sig)
|
||||
{
|
||||
struct pt_regs *regs = regs0;
|
||||
struct elf_prstatus64 tmp_prstatus;
|
||||
/*
|
||||
We ignore following entries for now.
|
||||
|
||||
struct elf_siginfo pr_info;
|
||||
short int pr_cursig;
|
||||
a8_uint64_t pr_sigpend;
|
||||
a8_uint64_t pr_sighold;
|
||||
pid_t pr_pgrp;
|
||||
pid_t pr_sid;
|
||||
struct prstatus64_timeval pr_utime;
|
||||
struct prstatus64_timeval pr_stime;
|
||||
struct prstatus64_timeval pr_cutime;
|
||||
struct prstatus64_timeval pr_cstime;
|
||||
*/
|
||||
|
||||
/* copy x0-30, sp, pc, pstate */
|
||||
memcpy(&tmp_prstatus.pr_reg, ®s->user_regs, sizeof(tmp_prstatus.pr_reg));
|
||||
tmp_prstatus.pr_fpvalid = 0; /* We assume no fp */
|
||||
|
||||
/* copy unaligned prstatus addr */
|
||||
memcpy(prstatus, &tmp_prstatus, sizeof(*prstatus));
|
||||
|
||||
prstatus->pr_pid = thread->tid;
|
||||
if (thread->proc->parent) {
|
||||
prstatus->pr_ppid = thread->proc->parent->pid;
|
||||
}
|
||||
|
||||
prstatus->pr_info.si_signo = sig;
|
||||
prstatus->pr_cursig = sig;
|
||||
}
|
||||
|
||||
int arch_get_thread_core_info_size(void)
|
||||
{
|
||||
const struct user_regset_view *view = current_user_regset_view();
|
||||
const struct user_regset *regset = find_regset(view, NT_ARM_SVE);
|
||||
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return 0;
|
||||
}
|
||||
return sizeof(struct note) + align32(sizeof("LINUX"))
|
||||
+ regset_size(cpu_local_var(current), regset);
|
||||
}
|
||||
|
||||
void arch_fill_thread_core_info(struct note *head,
|
||||
struct thread *thread, void *regs)
|
||||
{
|
||||
const struct user_regset_view *view = current_user_regset_view();
|
||||
const struct user_regset *regset = find_regset(view, NT_ARM_SVE);
|
||||
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* pre saved registers */
|
||||
save_fp_regs(thread);
|
||||
|
||||
if (regset->core_note_type && regset->get &&
|
||||
(!regset->active || regset->active(thread, regset))) {
|
||||
int ret;
|
||||
size_t size = regset_size(thread, regset);
|
||||
void *namep;
|
||||
void *descp;
|
||||
|
||||
namep = (void *) (head + 1);
|
||||
descp = namep + align32(sizeof("LINUX"));
|
||||
|
||||
ret = regset->get(thread, regset, 0, size, descp, NULL);
|
||||
if (ret) {
|
||||
return;
|
||||
}
|
||||
|
||||
head->namesz = sizeof("LINUX");
|
||||
head->descsz = size;
|
||||
head->type = NT_ARM_SVE;
|
||||
memcpy(namep, "LINUX", sizeof("LINUX"));
|
||||
}
|
||||
}
|
||||
1902
arch/arm64/kernel/cpu.c
Normal file
1902
arch/arm64/kernel/cpu.c
Normal file
File diff suppressed because it is too large
Load Diff
1014
arch/arm64/kernel/cpufeature.c
Normal file
1014
arch/arm64/kernel/cpufeature.c
Normal file
File diff suppressed because it is too large
Load Diff
14
arch/arm64/kernel/cputable.c
Normal file
14
arch/arm64/kernel/cputable.c
Normal file
@ -0,0 +1,14 @@
|
||||
/* cputable.c COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
|
||||
#include <cputable.h>
|
||||
|
||||
extern unsigned long __cpu_setup(void);
|
||||
struct cpu_info cpu_table[] = {
|
||||
{
|
||||
.cpu_id_val = 0x000f0000,
|
||||
.cpu_id_mask = 0x000f0000,
|
||||
.cpu_name = "AArch64 Processor",
|
||||
.cpu_setup = __cpu_setup,
|
||||
},
|
||||
{ 0 },
|
||||
};
|
||||
109
arch/arm64/kernel/debug-monitors.c
Normal file
109
arch/arm64/kernel/debug-monitors.c
Normal file
@ -0,0 +1,109 @@
|
||||
/* debug-monitors.c COPYRIGHT FUJITSU LIMITED 2016-2017 */
|
||||
#include <cputype.h>
|
||||
#include <irqflags.h>
|
||||
#include <ihk/context.h>
|
||||
#include <signal.h>
|
||||
#include <errno.h>
|
||||
#include <debug-monitors.h>
|
||||
#include <cls.h>
|
||||
#include <thread_info.h>
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::debug_monitors_arch */
|
||||
/* Determine debug architecture. */
|
||||
unsigned char debug_monitors_arch(void)
|
||||
{
|
||||
return read_cpuid(ID_AA64DFR0_EL1) & 0xf;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::mdscr_write */
|
||||
void mdscr_write(unsigned int mdscr)
|
||||
{
|
||||
unsigned long flags = local_dbg_save();
|
||||
asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
|
||||
local_dbg_restore(flags);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::mdscr_read */
|
||||
unsigned int mdscr_read(void)
|
||||
{
|
||||
unsigned int mdscr;
|
||||
asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
|
||||
return mdscr;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::clear_os_lock */
|
||||
static void clear_os_lock(void)
|
||||
{
|
||||
asm volatile("msr oslar_el1, %0" : : "r" (0));
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::debug_monitors_init */
|
||||
void debug_monitors_init(void)
|
||||
{
|
||||
clear_os_lock();
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::set_regs_spsr_ss */
|
||||
void set_regs_spsr_ss(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long spsr;
|
||||
|
||||
spsr = regs->pstate;
|
||||
spsr &= ~DBG_SPSR_SS;
|
||||
spsr |= DBG_SPSR_SS;
|
||||
regs->pstate = spsr;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::set_regs_spsr_ss */
|
||||
void clear_regs_spsr_ss(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long spsr;
|
||||
|
||||
spsr = regs->pstate;
|
||||
spsr &= ~DBG_SPSR_SS;
|
||||
regs->pstate = spsr;
|
||||
}
|
||||
|
||||
extern int interrupt_from_user(void *);
|
||||
extern void clear_single_step(struct thread *thread);
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::single_step_handler */
|
||||
int single_step_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
int ret = -EFAULT;
|
||||
|
||||
if (interrupt_from_user(regs)) {
|
||||
info.si_signo = SIGTRAP;
|
||||
info.si_errno = 0;
|
||||
info.si_code = TRAP_HWBKPT;
|
||||
info._sifields._sigfault.si_addr = (void *)regs->pc;
|
||||
set_signal(SIGTRAP, regs, &info);
|
||||
clear_single_step(cpu_local_var(current));
|
||||
|
||||
ret = 0;
|
||||
} else {
|
||||
kprintf("Unexpected kernel single-step exception at EL1\n");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/debug-monitors.c::brk_handler */
|
||||
int brk_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
siginfo_t info;
|
||||
int ret = -EFAULT;
|
||||
|
||||
if (interrupt_from_user(regs)) {
|
||||
info.si_signo = SIGTRAP;
|
||||
info.si_errno = 0;
|
||||
info.si_code = TRAP_BRKPT;
|
||||
info._sifields._sigfault.si_addr = (void *)regs->pc;
|
||||
set_signal(SIGTRAP, regs, &info);
|
||||
|
||||
ret = 0;
|
||||
} else {
|
||||
kprintf("Unexpected kernel BRK exception at EL1\n");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
126
arch/arm64/kernel/entry-fpsimd.S
Normal file
126
arch/arm64/kernel/entry-fpsimd.S
Normal file
@ -0,0 +1,126 @@
|
||||
/* entry-fpsimd.S COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
|
||||
#include <linkage.h>
|
||||
#include <assembler.h>
|
||||
#include <fpsimdmacros.h>
|
||||
|
||||
/*
|
||||
* @ref.impl linux-linaro/arch/arm64/include/asm/fpsimdmacros.h
|
||||
*/
|
||||
/*
|
||||
* FP/SIMD state saving and restoring macros
|
||||
*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
.macro fpsimd_save state, tmpnr
|
||||
stp q0, q1, [\state, #16 * 0]
|
||||
stp q2, q3, [\state, #16 * 2]
|
||||
stp q4, q5, [\state, #16 * 4]
|
||||
stp q6, q7, [\state, #16 * 6]
|
||||
stp q8, q9, [\state, #16 * 8]
|
||||
stp q10, q11, [\state, #16 * 10]
|
||||
stp q12, q13, [\state, #16 * 12]
|
||||
stp q14, q15, [\state, #16 * 14]
|
||||
stp q16, q17, [\state, #16 * 16]
|
||||
stp q18, q19, [\state, #16 * 18]
|
||||
stp q20, q21, [\state, #16 * 20]
|
||||
stp q22, q23, [\state, #16 * 22]
|
||||
stp q24, q25, [\state, #16 * 24]
|
||||
stp q26, q27, [\state, #16 * 26]
|
||||
stp q28, q29, [\state, #16 * 28]
|
||||
stp q30, q31, [\state, #16 * 30]!
|
||||
mrs x\tmpnr, fpsr
|
||||
str w\tmpnr, [\state, #16 * 2]
|
||||
mrs x\tmpnr, fpcr
|
||||
str w\tmpnr, [\state, #16 * 2 + 4]
|
||||
.endm
|
||||
|
||||
.macro fpsimd_restore_fpcr state, tmp
|
||||
/*
|
||||
* Writes to fpcr may be self-synchronising, so avoid restoring
|
||||
* the register if it hasn't changed.
|
||||
*/
|
||||
mrs \tmp, fpcr
|
||||
cmp \tmp, \state
|
||||
b.eq 9999f
|
||||
msr fpcr, \state
|
||||
9999:
|
||||
.endm
|
||||
|
||||
/* Clobbers \state */
|
||||
.macro fpsimd_restore state, tmpnr
|
||||
ldp q0, q1, [\state, #16 * 0]
|
||||
ldp q2, q3, [\state, #16 * 2]
|
||||
ldp q4, q5, [\state, #16 * 4]
|
||||
ldp q6, q7, [\state, #16 * 6]
|
||||
ldp q8, q9, [\state, #16 * 8]
|
||||
ldp q10, q11, [\state, #16 * 10]
|
||||
ldp q12, q13, [\state, #16 * 12]
|
||||
ldp q14, q15, [\state, #16 * 14]
|
||||
ldp q16, q17, [\state, #16 * 16]
|
||||
ldp q18, q19, [\state, #16 * 18]
|
||||
ldp q20, q21, [\state, #16 * 20]
|
||||
ldp q22, q23, [\state, #16 * 22]
|
||||
ldp q24, q25, [\state, #16 * 24]
|
||||
ldp q26, q27, [\state, #16 * 26]
|
||||
ldp q28, q29, [\state, #16 * 28]
|
||||
ldp q30, q31, [\state, #16 * 30]!
|
||||
ldr w\tmpnr, [\state, #16 * 2]
|
||||
msr fpsr, x\tmpnr
|
||||
ldr w\tmpnr, [\state, #16 * 2 + 4]
|
||||
fpsimd_restore_fpcr x\tmpnr, \state
|
||||
.endm
|
||||
|
||||
/*
|
||||
* @ref.impl linux-linaro/arch/arm64/kernel/entry-fpsimd.S
|
||||
*/
|
||||
/*
|
||||
* Save the FP registers.
|
||||
*
|
||||
* x0 - pointer to struct fpsimd_state
|
||||
*/
|
||||
ENTRY(fpsimd_save_state)
|
||||
fpsimd_save x0, 8
|
||||
ret
|
||||
ENDPROC(fpsimd_save_state)
|
||||
|
||||
/*
|
||||
* Load the FP registers.
|
||||
*
|
||||
* x0 - pointer to struct fpsimd_state
|
||||
*/
|
||||
ENTRY(fpsimd_load_state)
|
||||
fpsimd_restore x0, 8
|
||||
ret
|
||||
ENDPROC(fpsimd_load_state)
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
ENTRY(sve_save_state)
|
||||
sve_save 0, x1, 2
|
||||
ret
|
||||
ENDPROC(sve_save_state)
|
||||
|
||||
ENTRY(sve_load_state)
|
||||
sve_load 0, x1, x2, 3
|
||||
ret
|
||||
ENDPROC(sve_load_state)
|
||||
|
||||
ENTRY(sve_get_vl)
|
||||
_zrdvl 0, 1
|
||||
ret
|
||||
ENDPROC(sve_get_vl)
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
566
arch/arm64/kernel/entry.S
Normal file
566
arch/arm64/kernel/entry.S
Normal file
@ -0,0 +1,566 @@
|
||||
/* entry.S COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
|
||||
#include <linkage.h>
|
||||
#include <assembler.h>
|
||||
#include <asm-offsets.h>
|
||||
#include <esr.h>
|
||||
#include <thread_info.h>
|
||||
#include <asm-syscall.h>
|
||||
|
||||
/*
|
||||
* Bad Abort numbers
|
||||
*-----------------
|
||||
*/
|
||||
#define BAD_SYNC 0
|
||||
#define BAD_IRQ 1
|
||||
#define BAD_FIQ 2
|
||||
#define BAD_ERROR 3
|
||||
|
||||
.macro kernel_entry, el, regsize = 64
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
.if \regsize == 32
|
||||
mov w0, w0 // zero upper 32 bits of x0
|
||||
.endif
|
||||
stp x0, x1, [sp, #16 * 0]
|
||||
stp x2, x3, [sp, #16 * 1]
|
||||
stp x4, x5, [sp, #16 * 2]
|
||||
stp x6, x7, [sp, #16 * 3]
|
||||
stp x8, x9, [sp, #16 * 4]
|
||||
stp x10, x11, [sp, #16 * 5]
|
||||
stp x12, x13, [sp, #16 * 6]
|
||||
stp x14, x15, [sp, #16 * 7]
|
||||
stp x16, x17, [sp, #16 * 8]
|
||||
stp x18, x19, [sp, #16 * 9]
|
||||
stp x20, x21, [sp, #16 * 10]
|
||||
stp x22, x23, [sp, #16 * 11]
|
||||
stp x24, x25, [sp, #16 * 12]
|
||||
stp x26, x27, [sp, #16 * 13]
|
||||
stp x28, x29, [sp, #16 * 14]
|
||||
|
||||
.if \el == 0
|
||||
mrs x21, sp_el0
|
||||
get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
|
||||
ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
|
||||
disable_step_tsk x19, x20 // exceptions when scheduling.
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
.endif
|
||||
mrs x22, elr_el1
|
||||
mrs x23, spsr_el1
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
mrs_s x20, ICC_PMR_EL1 // Get PMR
|
||||
and x20, x20, #ICC_PMR_EL1_G_BIT // Extract mask bit
|
||||
lsl x20, x20, #PSR_G_PMR_G_SHIFT // Shift to a PSTATE RES0 bit
|
||||
eor x20, x20, #PSR_G_BIT // Invert bit
|
||||
orr x23, x20, x23 // Store PMR within PSTATE
|
||||
mov x20, #ICC_PMR_EL1_MASKED
|
||||
msr_s ICC_PMR_EL1, x20 // Mask normal interrupts at PMR
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
stp lr, x21, [sp, #S_LR]
|
||||
stp x22, x23, [sp, #S_PC]
|
||||
|
||||
/*
|
||||
* Set syscallno to -1 by default (overridden later if real syscall).
|
||||
*/
|
||||
.if \el == 0
|
||||
mvn x21, xzr
|
||||
str x21, [sp, #S_SYSCALLNO]
|
||||
.endif
|
||||
|
||||
/*
|
||||
* Registers that may be useful after this macro is invoked:
|
||||
*
|
||||
* x21 - aborted SP
|
||||
* x22 - aborted PC
|
||||
* x23 - aborted PSTATE
|
||||
*/
|
||||
.endm
|
||||
|
||||
.macro kernel_exit, el, need_enable_step = 0
|
||||
.if \el == 0
|
||||
bl check_sig_pending
|
||||
bl check_need_resched // or reschedule is needed.
|
||||
mov x0, #0
|
||||
mov x1, sp
|
||||
mov x2, #0
|
||||
bl check_signal // check whether the signal is delivered
|
||||
mov x0, #0
|
||||
mov x1, sp
|
||||
mov x2, #0
|
||||
bl check_signal_irq_disabled // check whether the signal is delivered(for kernel_exit)
|
||||
.endif
|
||||
.if \el == 1
|
||||
bl check_sig_pending
|
||||
.endif
|
||||
disable_irq x1 // disable interrupts
|
||||
.if \need_enable_step == 1
|
||||
ldr x1, [tsk, #TI_FLAGS]
|
||||
enable_step_tsk x1, x2
|
||||
.endif
|
||||
disable_nmi
|
||||
ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
|
||||
.if \el == 0
|
||||
// ct_user_enter // McKernel, disable (debugcode?)
|
||||
ldr x23, [sp, #S_SP] // load return stack pointer
|
||||
msr sp_el0, x23
|
||||
.endif
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
and x20, x22, #PSR_G_BIT // Get stolen PSTATE bit
|
||||
and x22, x22, #~PSR_G_BIT // Clear stolen bit
|
||||
lsr x20, x20, #PSR_G_PMR_G_SHIFT // Shift back to PMR mask
|
||||
eor x20, x20, #ICC_PMR_EL1_UNMASKED // x20 gets 0xf0 or 0xb0
|
||||
msr_s ICC_PMR_EL1, x20 // Write to PMR
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
msr elr_el1, x21 // set up the return data
|
||||
msr spsr_el1, x22
|
||||
ldp x0, x1, [sp, #16 * 0]
|
||||
ldp x2, x3, [sp, #16 * 1]
|
||||
ldp x4, x5, [sp, #16 * 2]
|
||||
ldp x6, x7, [sp, #16 * 3]
|
||||
ldp x8, x9, [sp, #16 * 4]
|
||||
ldp x10, x11, [sp, #16 * 5]
|
||||
ldp x12, x13, [sp, #16 * 6]
|
||||
ldp x14, x15, [sp, #16 * 7]
|
||||
ldp x16, x17, [sp, #16 * 8]
|
||||
ldp x18, x19, [sp, #16 * 9]
|
||||
ldp x20, x21, [sp, #16 * 10]
|
||||
ldp x22, x23, [sp, #16 * 11]
|
||||
ldp x24, x25, [sp, #16 * 12]
|
||||
ldp x26, x27, [sp, #16 * 13]
|
||||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
eret // return to kernel
|
||||
.endm
|
||||
|
||||
.macro get_thread_info, rd
|
||||
mov \rd, sp
|
||||
and \rd, \rd, #~(KERNEL_STACK_SIZE - 1) // top of stack
|
||||
.endm
|
||||
|
||||
/*
|
||||
* These are the registers used in the syscall handler, and allow us to
|
||||
* have in theory up to 7 arguments to a function - x0 to x6.
|
||||
*
|
||||
* x7 is reserved for the system call number in 32-bit mode.
|
||||
*/
|
||||
sc_nr .req x25 // number of system calls
|
||||
scno .req x26 // syscall number
|
||||
stbl .req x27 // syscall table pointer
|
||||
tsk .req x28 // current thread_info
|
||||
|
||||
/*
|
||||
* Interrupt handling.
|
||||
*/
|
||||
.macro irq_handler
|
||||
adrp x1, handle_arch_irq
|
||||
ldr x1, [x1, #:lo12:handle_arch_irq]
|
||||
mov x0, sp
|
||||
blr x1
|
||||
.endm
|
||||
|
||||
.text
|
||||
|
||||
/*
|
||||
* Exception vectors.
|
||||
*/
|
||||
|
||||
.align 11
|
||||
ENTRY(vectors)
|
||||
ventry el1_sync_invalid // Synchronous EL1t
|
||||
ventry el1_irq_invalid // IRQ EL1t
|
||||
ventry el1_fiq_invalid // FIQ EL1t
|
||||
ventry el1_error_invalid // Error EL1t
|
||||
|
||||
ventry el1_sync // Synchronous EL1h
|
||||
ventry el1_irq // IRQ EL1h
|
||||
ventry el1_fiq_invalid // FIQ EL1h
|
||||
ventry el1_error_invalid // Error EL1h
|
||||
|
||||
ventry el0_sync // Synchronous 64-bit EL0
|
||||
ventry el0_irq // IRQ 64-bit EL0
|
||||
ventry el0_fiq_invalid // FIQ 64-bit EL0
|
||||
ventry el0_error_invalid // Error 64-bit EL0
|
||||
|
||||
ventry el0_sync_invalid // Synchronous 32-bit EL0
|
||||
ventry el0_irq_invalid // IRQ 32-bit EL0
|
||||
ventry el0_fiq_invalid // FIQ 32-bit EL0
|
||||
ventry el0_error_invalid // Error 32-bit EL0
|
||||
END(vectors)
|
||||
|
||||
/*
|
||||
* Invalid mode handlers
|
||||
*/
|
||||
.macro inv_entry, el, reason, regsize = 64
|
||||
kernel_entry el, \regsize
|
||||
mov x0, sp
|
||||
mov x1, #\reason
|
||||
mrs x2, esr_el1
|
||||
enable_nmi
|
||||
.if \el == 0
|
||||
bl bad_mode
|
||||
b ret_to_user
|
||||
.else
|
||||
b bad_mode
|
||||
.endif
|
||||
.endm
|
||||
|
||||
el0_sync_invalid:
|
||||
inv_entry 0, BAD_SYNC
|
||||
ENDPROC(el0_sync_invalid)
|
||||
|
||||
el0_irq_invalid:
|
||||
inv_entry 0, BAD_IRQ
|
||||
ENDPROC(el0_irq_invalid)
|
||||
|
||||
el0_fiq_invalid:
|
||||
inv_entry 0, BAD_FIQ
|
||||
ENDPROC(el0_fiq_invalid)
|
||||
|
||||
el0_error_invalid:
|
||||
inv_entry 0, BAD_ERROR
|
||||
ENDPROC(el0_error_invalid)
|
||||
|
||||
el1_sync_invalid:
|
||||
inv_entry 1, BAD_SYNC
|
||||
ENDPROC(el1_sync_invalid)
|
||||
|
||||
el1_irq_invalid:
|
||||
inv_entry 1, BAD_IRQ
|
||||
ENDPROC(el1_irq_invalid)
|
||||
|
||||
el1_fiq_invalid:
|
||||
inv_entry 1, BAD_FIQ
|
||||
ENDPROC(el1_fiq_invalid)
|
||||
|
||||
el1_error_invalid:
|
||||
inv_entry 1, BAD_ERROR
|
||||
ENDPROC(el1_error_invalid)
|
||||
|
||||
/*
|
||||
* EL1 mode handlers.
|
||||
*/
|
||||
.align 6
|
||||
el1_sync:
|
||||
kernel_entry 1
|
||||
mrs x1, esr_el1 // read the syndrome register
|
||||
lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
|
||||
cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
|
||||
b.eq el1_da
|
||||
// cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1
|
||||
// b.eq el1_ia
|
||||
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
|
||||
b.eq el1_undef
|
||||
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
|
||||
b.eq el1_sp_pc
|
||||
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
|
||||
b.eq el1_sp_pc
|
||||
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
|
||||
b.eq el1_undef
|
||||
// cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
|
||||
// b.ge el1_dbg
|
||||
b el1_inv
|
||||
el1_ia:
|
||||
/*
|
||||
* Fall through to the Data abort case
|
||||
*/
|
||||
el1_da:
|
||||
/*
|
||||
* Data abort handling
|
||||
*/
|
||||
mrs x0, far_el1
|
||||
enable_nmi
|
||||
enable_dbg
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
# define PSR_INTR_SHIFT PSR_G_SHIFT // PSR_G_BIT
|
||||
#else /* defined(CONFIG_HAS_NMI) */
|
||||
# define PSR_INTR_SHIFT 7 // PSR_I_BIT
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
// re-enable interrupts if they were enabled in the aborted context
|
||||
tbnz x23, #PSR_INTR_SHIFT, 1f
|
||||
enable_irq x2
|
||||
1:
|
||||
mov x2, sp // struct pt_regs
|
||||
bl do_mem_abort
|
||||
|
||||
// disable interrupts before pulling preserved data off the stack
|
||||
kernel_exit 1
|
||||
|
||||
el1_sp_pc:
|
||||
/*
|
||||
* Stack or PC alignment exception handling
|
||||
*/
|
||||
mrs x0, far_el1
|
||||
enable_nmi
|
||||
enable_dbg
|
||||
mov x2, sp
|
||||
b do_sp_pc_abort
|
||||
el1_undef:
|
||||
/*
|
||||
* Undefined instruction
|
||||
*/
|
||||
enable_nmi
|
||||
enable_dbg
|
||||
mov x0, sp
|
||||
b do_undefinstr
|
||||
// el1_dbg:
|
||||
// /*
|
||||
// * Debug exception handling
|
||||
// */
|
||||
// cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
|
||||
// cinc x24, x24, eq // set bit '0'
|
||||
// tbz x24, #0, el1_inv // EL1 only
|
||||
// mrs x0, far_el1
|
||||
// mov x2, sp // struct pt_regs
|
||||
// bl do_debug_exception
|
||||
// kernel_exit 1
|
||||
el1_inv:
|
||||
// TODO: add support for undefined instructions in kernel mode
|
||||
mov x0, sp
|
||||
mov x1, #BAD_SYNC
|
||||
mrs x2, esr_el1
|
||||
enable_nmi
|
||||
enable_dbg
|
||||
b bad_mode
|
||||
ENDPROC(el1_sync)
|
||||
|
||||
/*
|
||||
* EL1 mode handlers.
|
||||
*/
|
||||
.align 6
|
||||
el1_irq:
|
||||
kernel_entry 1
|
||||
enable_dbg
|
||||
|
||||
irq_handler
|
||||
|
||||
kernel_exit 1
|
||||
ENDPROC(el1_irq)
|
||||
|
||||
/*
|
||||
* EL0 mode handlers.
|
||||
*/
|
||||
.align 6
|
||||
el0_sync:
|
||||
kernel_entry 0
|
||||
mrs x25, esr_el1 // read the syndrome register
|
||||
lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
|
||||
cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
|
||||
b.eq el0_svc
|
||||
cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
|
||||
b.eq el0_da
|
||||
cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
|
||||
b.eq el0_ia
|
||||
cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
|
||||
b.eq el0_fpsimd_acc
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
cmp x24, #ESR_ELx_EC_SVE // SVE access
|
||||
b.eq el0_sve_acc
|
||||
#endif
|
||||
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
|
||||
b.eq el0_fpsimd_exc
|
||||
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
|
||||
b.eq el0_undef
|
||||
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
|
||||
b.eq el0_sp_pc
|
||||
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
|
||||
b.eq el0_sp_pc
|
||||
cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
|
||||
b.eq el0_undef
|
||||
cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
|
||||
b.ge el0_dbg
|
||||
b el0_inv
|
||||
el0_svc:
|
||||
uxtw scno, w8 // syscall number in w8
|
||||
cmp scno, #__NR_rt_sigreturn
|
||||
b.eq 1f
|
||||
str x0, [sp, #S_ORIG_X0] // save the original x0
|
||||
ldr x16, [sp, #S_PC]
|
||||
str x16, [sp, #S_ORIG_PC] // save the original pc
|
||||
1: str scno, [sp, #S_SYSCALLNO] // save syscall number
|
||||
enable_nmi
|
||||
enable_dbg_and_irq x0
|
||||
adrp x16, __arm64_syscall_handler
|
||||
ldr x16, [x16, #:lo12:__arm64_syscall_handler]
|
||||
mov x0, scno
|
||||
mov x1, sp
|
||||
blr x16 // __arm64_syscall_handler(int, syscall_num, ihk_mc_user_context_t *uctx);
|
||||
/* Signal check has been completed at the stage of came back. */
|
||||
b ret_fast_syscall
|
||||
el0_da:
|
||||
/*
|
||||
* Data abort handling
|
||||
*/
|
||||
mrs x26, far_el1
|
||||
// enable interrupts before calling the main handler
|
||||
enable_nmi
|
||||
enable_dbg_and_irq x0
|
||||
// ct_user_exit
|
||||
bic x0, x26, #(0xff << 56)
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
bl do_mem_abort
|
||||
b ret_to_user
|
||||
el0_ia:
|
||||
/*
|
||||
* Instruction abort handling
|
||||
*/
|
||||
mrs x26, far_el1
|
||||
// enable interrupts before calling the main handler
|
||||
enable_nmi
|
||||
enable_dbg_and_irq x0
|
||||
// ct_user_exit
|
||||
mov x0, x26
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
bl do_mem_abort
|
||||
b ret_to_user
|
||||
el0_fpsimd_acc:
|
||||
/*
|
||||
* Floating Point or Advanced SIMD access
|
||||
*/
|
||||
enable_nmi
|
||||
enable_dbg
|
||||
// ct_user_exit
|
||||
mov x0, x25
|
||||
mov x1, sp
|
||||
bl do_fpsimd_acc
|
||||
b ret_to_user
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
/*
|
||||
* Scalable Vector Extension access
|
||||
*/
|
||||
el0_sve_acc:
|
||||
enable_nmi
|
||||
enable_dbg
|
||||
// ct_user_exit
|
||||
mov x0, x25
|
||||
mov x1, sp
|
||||
bl do_sve_acc
|
||||
b ret_to_user
|
||||
#endif
|
||||
el0_fpsimd_exc:
|
||||
/*
|
||||
* Floating Point, Advanced SIMD or SVE exception
|
||||
*/
|
||||
enable_nmi
|
||||
enable_dbg
|
||||
// ct_user_exit
|
||||
mov x0, x25
|
||||
mov x1, sp
|
||||
bl do_fpsimd_exc
|
||||
b ret_to_user
|
||||
el0_sp_pc:
|
||||
/*
|
||||
* Stack or PC alignment exception handling
|
||||
*/
|
||||
mrs x26, far_el1
|
||||
// enable interrupts before calling the main handler
|
||||
enable_nmi
|
||||
enable_dbg_and_irq x0
|
||||
mov x0, x26
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
bl do_sp_pc_abort
|
||||
b ret_to_user
|
||||
el0_undef:
|
||||
/*
|
||||
* Undefined instruction
|
||||
*/
|
||||
// enable interrupts before calling the main handler
|
||||
enable_nmi
|
||||
enable_dbg_and_irq x0
|
||||
// ct_user_exit
|
||||
mov x0, sp
|
||||
bl do_undefinstr
|
||||
b ret_to_user
|
||||
el0_dbg:
|
||||
/*
|
||||
* Debug exception handling
|
||||
*/
|
||||
tbnz x24, #0, el0_inv // EL0 only
|
||||
mrs x0, far_el1
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
enable_nmi
|
||||
bl do_debug_exception
|
||||
enable_dbg
|
||||
// ct_user_exit
|
||||
b ret_to_user
|
||||
el0_inv:
|
||||
enable_dbg
|
||||
mov x0, sp
|
||||
mov x1, #BAD_SYNC
|
||||
mrs x2, esr_el1
|
||||
enable_nmi
|
||||
bl bad_mode
|
||||
b ret_to_user
|
||||
ENDPROC(el0_sync)
|
||||
.align 6
|
||||
el0_irq:
|
||||
kernel_entry 0
|
||||
enable_dbg
|
||||
irq_handler
|
||||
b ret_to_user
|
||||
ENDPROC(el0_irq)
|
||||
|
||||
/*
|
||||
* Register switch for AArch64. The callee-saved registers need to be saved
|
||||
* and restored. On entry:
|
||||
* x0 = previous task_struct (must be preserved across the switch)
|
||||
* x1 = next task_struct
|
||||
* Previous and next are guaranteed not to be the same.
|
||||
*
|
||||
*/
|
||||
ENTRY(cpu_switch_to)
|
||||
cmp x0, xzr // for idle process branch(skip save)
|
||||
b.eq 1f
|
||||
add x8, x0, #TI_CPU_CONTEXT
|
||||
mov x9, sp
|
||||
stp x19, x20, [x8], #16 // store callee-saved registers
|
||||
stp x21, x22, [x8], #16
|
||||
stp x23, x24, [x8], #16
|
||||
stp x25, x26, [x8], #16
|
||||
stp x27, x28, [x8], #16
|
||||
stp x29, x9, [x8], #16
|
||||
str lr, [x8]
|
||||
1: add x8, x1, #TI_CPU_CONTEXT
|
||||
ldp x19, x20, [x8], #16 // restore callee-saved registers
|
||||
ldp x21, x22, [x8], #16
|
||||
ldp x23, x24, [x8], #16
|
||||
ldp x25, x26, [x8], #16
|
||||
ldp x27, x28, [x8], #16
|
||||
ldp x29, x9, [x8], #16
|
||||
ldr lr, [x8]
|
||||
mov sp, x9
|
||||
mov x0, x2 // return void *prev
|
||||
ret
|
||||
ENDPROC(cpu_switch_to)
|
||||
|
||||
|
||||
ret_fast_syscall:
|
||||
kernel_exit 0, 1
|
||||
ENDPROC(ret_fast_syscall)
|
||||
|
||||
/*
|
||||
* "slow" syscall return path.
|
||||
*/
|
||||
ret_to_user:
|
||||
no_work_pending:
|
||||
kernel_exit 0, 1
|
||||
ENDPROC(ret_to_user)
|
||||
|
||||
/*
|
||||
* This is how we return from a fork.
|
||||
*/
|
||||
ENTRY(ret_from_fork)
|
||||
// bl schedule_tail
|
||||
cbz x19, 1f // not a kernel thread
|
||||
mov x0, x20
|
||||
blr x19
|
||||
1: get_thread_info tsk
|
||||
bl release_runq_lock
|
||||
bl utilthr_migrate
|
||||
b ret_to_user
|
||||
ENDPROC(ret_from_fork)
|
||||
|
||||
295
arch/arm64/kernel/fault.c
Normal file
295
arch/arm64/kernel/fault.c
Normal file
@ -0,0 +1,295 @@
|
||||
/* fault.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
|
||||
#include <ihk/context.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <ptrace.h>
|
||||
#include <esr.h>
|
||||
#include <signal.h>
|
||||
#include <arch-memory.h>
|
||||
#include <thread_info.h>
|
||||
#include <syscall.h>
|
||||
#include <debug-monitors.h>
|
||||
|
||||
unsigned long __page_fault_handler_address;
|
||||
extern int interrupt_from_user(void *);
|
||||
|
||||
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
static int do_page_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
static int do_translation_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
static int do_alignment_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
|
||||
static struct fault_info {
|
||||
int (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
int sig;
|
||||
int code;
|
||||
const char *name;
|
||||
} fault_info[] = {
|
||||
{ do_bad, SIGBUS, 0, "ttbr address size fault" },
|
||||
{ do_bad, SIGBUS, 0, "level 1 address size fault" },
|
||||
{ do_bad, SIGBUS, 0, "level 2 address size fault" },
|
||||
{ do_bad, SIGBUS, 0, "level 3 address size fault" },
|
||||
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" },
|
||||
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
|
||||
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 8" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 12" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
|
||||
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 17" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 18" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 19" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous external abort (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 25" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 26" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 27" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 32" },
|
||||
{ do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 34" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 35" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 36" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 37" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 38" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 39" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 40" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 41" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 42" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 43" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 44" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 45" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 46" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 47" },
|
||||
{ do_bad, SIGBUS, 0, "TLB conflict abort" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 49" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 50" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 51" },
|
||||
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
|
||||
{ do_bad, SIGBUS, 0, "implementation fault (unsupported exclusive)" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 54" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 55" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 56" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 57" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 58" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 59" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 60" },
|
||||
{ do_bad, SIGBUS, 0, "section domain fault" },
|
||||
{ do_bad, SIGBUS, 0, "page domain fault" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 63" },
|
||||
};
|
||||
|
||||
static const char *fault_name(unsigned int esr)
|
||||
{
|
||||
const struct fault_info *inf = fault_info + (esr & 63);
|
||||
return inf->name;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispatch a data abort to the relevant handler.
|
||||
*/
|
||||
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf = fault_info + (esr & 63);
|
||||
struct siginfo info;
|
||||
const int from_user = interrupt_from_user(regs);
|
||||
|
||||
/* set_cputime called in inf->fn() */
|
||||
if (!inf->fn(addr, esr, regs))
|
||||
return;
|
||||
|
||||
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
|
||||
kprintf("Unhandled fault: %s (0x%08x) at 0x%016lx\n", inf->name, esr, addr);
|
||||
info.si_signo = inf->sig;
|
||||
info.si_errno = 0;
|
||||
info.si_code = inf->code;
|
||||
info._sifields._sigfault.si_addr = (void*)addr;
|
||||
|
||||
arm64_notify_die("", regs, &info, esr);
|
||||
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle stack alignment exceptions.
|
||||
*/
|
||||
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
struct siginfo info;
|
||||
const int from_user = interrupt_from_user(regs);
|
||||
|
||||
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
|
||||
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_errno = 0;
|
||||
info.si_code = BUS_ADRALN;
|
||||
info._sifields._sigfault.si_addr = (void*)addr;
|
||||
arm64_notify_die("", regs, &info, esr);
|
||||
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
|
||||
}
|
||||
|
||||
static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
struct siginfo info;
|
||||
const int from_user = interrupt_from_user(regs);
|
||||
|
||||
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
|
||||
/*
|
||||
* If we are in kernel mode at this point, we have no context to
|
||||
* handle this fault with.
|
||||
*/
|
||||
if (interrupt_from_user(regs)) {
|
||||
kprintf("unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
|
||||
fault_name(esr), SIGSEGV, addr, esr);
|
||||
|
||||
current_thread_info()->fault_address = addr;
|
||||
current_thread_info()->fault_code = esr;
|
||||
info.si_signo = SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
info.si_code = SEGV_MAPERR;
|
||||
info._sifields._sigfault.si_addr = (void *)addr;
|
||||
set_signal(SIGSEGV, regs, &info);
|
||||
|
||||
} else {
|
||||
kprintf("Unable to handle kernel %s at virtual address %08lx\n",
|
||||
(addr < PAGE_SIZE) ? "NULL pointer dereference" : "paging request", addr);
|
||||
panic("OOps.");
|
||||
}
|
||||
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
|
||||
}
|
||||
|
||||
static int is_el0_instruction_abort(unsigned int esr)
|
||||
{
|
||||
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
|
||||
}
|
||||
|
||||
static int do_page_fault(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
void (*page_fault_handler)(void *, uint64_t, void *);
|
||||
uint64_t reason = 0;
|
||||
int esr_ec_dfsc = (esr & 63);
|
||||
|
||||
if (interrupt_from_user(regs)) {
|
||||
reason |= PF_USER;
|
||||
}
|
||||
|
||||
if (is_el0_instruction_abort(esr)) {
|
||||
reason |= PF_INSTR;
|
||||
} else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
|
||||
reason |= PF_WRITE;
|
||||
if (13 <= esr_ec_dfsc && esr_ec_dfsc <= 15 ) {
|
||||
/* level [1-3] permission fault */
|
||||
reason |= PF_PROT;
|
||||
}
|
||||
}
|
||||
|
||||
/* set_cputime() call in page_fault_handler() */
|
||||
page_fault_handler = (void *)__page_fault_handler_address;
|
||||
(*page_fault_handler)((void *)addr, reason, regs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* First Level Translation Fault Handler
|
||||
*
|
||||
* We enter here because the first level page table doesn't contain a valid
|
||||
* entry for the address.
|
||||
*
|
||||
* If the address is in kernel space (>= TASK_SIZE), then we are probably
|
||||
* faulting in the vmalloc() area.
|
||||
*
|
||||
* If the init_task's first level page tables contains the relevant entry, we
|
||||
* copy the it to this task. If not, we send the process a signal, fixup the
|
||||
* exception, or oops the kernel.
|
||||
*
|
||||
* NOTE! We MUST NOT take any locks for this case. We may be in an interrupt
|
||||
* or a critical region, and should only copy the information from the master
|
||||
* page table, nothing more.
|
||||
*/
|
||||
static int do_translation_fault(unsigned long addr,
|
||||
unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (addr < USER_END)
|
||||
return do_page_fault(addr, esr, regs);
|
||||
|
||||
do_bad_area(addr, esr, regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_alignment_fault(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
do_bad_area(addr, esr, regs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern int breakpoint_handler(unsigned long unused, unsigned int esr, struct pt_regs *regs);
|
||||
extern int single_step_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
extern int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
extern int brk_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs);
|
||||
static struct fault_info debug_fault_info[] = {
|
||||
{ breakpoint_handler, SIGTRAP, TRAP_HWBKPT, "hw-breakpoint handler" },
|
||||
{ single_step_handler, SIGTRAP, TRAP_HWBKPT, "single-step handler" },
|
||||
{ watchpoint_handler, SIGTRAP, TRAP_HWBKPT, "hw-watchpoint handler" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 3" },
|
||||
{ do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" },
|
||||
{ do_bad, SIGTRAP, 0, "aarch32 vector catch" },
|
||||
{ brk_handler, SIGTRAP, TRAP_BRKPT, "ptrace BRK handler" },
|
||||
{ do_bad, SIGBUS, 0, "unknown 7" },
|
||||
};
|
||||
|
||||
int do_debug_exception(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
|
||||
struct siginfo info;
|
||||
const int from_user = interrupt_from_user(regs);
|
||||
int ret = -1;
|
||||
|
||||
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
|
||||
|
||||
if (!inf->fn(addr, esr, regs)) {
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kprintf("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
|
||||
inf->name, esr, addr);
|
||||
|
||||
info.si_signo = inf->sig;
|
||||
info.si_errno = 0;
|
||||
info.si_code = inf->code;
|
||||
info._sifields._sigfault.si_addr = (void *)addr;
|
||||
|
||||
arm64_notify_die("", regs, &info, 0);
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This abort handler always returns "fault".
|
||||
*/
|
||||
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
const int from_user = interrupt_from_user(regs);
|
||||
|
||||
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
|
||||
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
|
||||
return 1;
|
||||
}
|
||||
426
arch/arm64/kernel/fpsimd.c
Normal file
426
arch/arm64/kernel/fpsimd.c
Normal file
@ -0,0 +1,426 @@
|
||||
/* fpsimd.c COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||
#include <thread_info.h>
|
||||
#include <fpsimd.h>
|
||||
#include <cpuinfo.h>
|
||||
#include <lwk/compiler.h>
|
||||
#include <ikc/ihk.h>
|
||||
#include <hwcap.h>
|
||||
#include <cls.h>
|
||||
#include <prctl.h>
|
||||
#include <cpufeature.h>
|
||||
#include <kmalloc.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <process.h>
|
||||
#include <bitmap.h>
|
||||
|
||||
//#define DEBUG_PRINT_FPSIMD
|
||||
|
||||
#ifdef DEBUG_PRINT_FPSIMD
|
||||
#undef DDEBUG_DEFAULT
|
||||
#define DDEBUG_DEFAULT DDEBUG_PRINT
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
|
||||
/* Set of available vector lengths, as vq_to_bit(vq): */
|
||||
static DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
|
||||
|
||||
/* Maximum supported vector length across all CPUs (initially poisoned) */
|
||||
int sve_max_vl = -1;
|
||||
|
||||
/* Default VL for tasks that don't set it explicitly: */
|
||||
int sve_default_vl = -1;
|
||||
|
||||
/*
|
||||
* Helpers to translate bit indices in sve_vq_map to VQ values (and
|
||||
* vice versa). This allows find_next_bit() to be used to find the
|
||||
* _maximum_ VQ not exceeding a certain value.
|
||||
*/
|
||||
|
||||
static unsigned int vq_to_bit(unsigned int vq)
|
||||
{
|
||||
return SVE_VQ_MAX - vq;
|
||||
}
|
||||
|
||||
static unsigned int bit_to_vq(unsigned int bit)
|
||||
{
|
||||
if (bit >= SVE_VQ_MAX) {
|
||||
bit = SVE_VQ_MAX - 1;
|
||||
}
|
||||
return SVE_VQ_MAX - bit;
|
||||
}
|
||||
|
||||
/*
|
||||
* All vector length selection from userspace comes through here.
|
||||
* We're on a slow path, so some sanity-checks are included.
|
||||
* If things go wrong there's a bug somewhere, but try to fall back to a
|
||||
* safe choice.
|
||||
*/
|
||||
static unsigned int find_supported_vector_length(unsigned int vl)
|
||||
{
|
||||
int bit;
|
||||
int max_vl = sve_max_vl;
|
||||
|
||||
if (!sve_vl_valid(vl)) {
|
||||
vl = SVE_VL_MIN;
|
||||
}
|
||||
|
||||
if (!sve_vl_valid(max_vl)) {
|
||||
max_vl = SVE_VL_MIN;
|
||||
}
|
||||
|
||||
if (vl > max_vl) {
|
||||
vl = max_vl;
|
||||
}
|
||||
|
||||
bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
|
||||
vq_to_bit(sve_vq_from_vl(vl)));
|
||||
return sve_vl_from_vq(bit_to_vq(bit));
|
||||
}
|
||||
|
||||
static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
|
||||
{
|
||||
unsigned int vq, vl;
|
||||
unsigned long zcr;
|
||||
|
||||
bitmap_zero(map, SVE_VQ_MAX);
|
||||
|
||||
zcr = ZCR_EL1_LEN_MASK;
|
||||
zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
|
||||
|
||||
for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
|
||||
/* self-syncing */
|
||||
write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1);
|
||||
vl = sve_get_vl();
|
||||
/* skip intervening lengths */
|
||||
vq = sve_vq_from_vl(vl);
|
||||
set_bit(vq_to_bit(vq), map);
|
||||
}
|
||||
}
|
||||
|
||||
void sve_init_vq_map(void)
|
||||
{
|
||||
sve_probe_vqs(sve_vq_map);
|
||||
}
|
||||
|
||||
size_t sve_state_size(struct thread const *thread)
|
||||
{
|
||||
unsigned int vl = thread->ctx.thread->sve_vl;
|
||||
|
||||
BUG_ON(!sve_vl_valid(vl));
|
||||
return SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl));
|
||||
}
|
||||
|
||||
void sve_free(struct thread *thread)
|
||||
{
|
||||
if (thread->ctx.thread->sve_state) {
|
||||
kfree(thread->ctx.thread->sve_state);
|
||||
thread->ctx.thread->sve_state = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int sve_alloc(struct thread *thread)
|
||||
{
|
||||
if (thread->ctx.thread->sve_state) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
thread->ctx.thread->sve_state =
|
||||
kmalloc(sve_state_size(thread), IHK_MC_AP_NOWAIT);
|
||||
if (thread->ctx.thread->sve_state == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(thread->ctx.thread->sve_state, 0, sve_state_size(thread));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_nr_threads(struct process *proc)
|
||||
{
|
||||
struct thread *child;
|
||||
struct mcs_rwlock_node_irqsave lock;
|
||||
int nr_threads = 0;
|
||||
|
||||
mcs_rwlock_reader_lock(&proc->threads_lock, &lock);
|
||||
list_for_each_entry(child, &proc->threads_list, siblings_list){
|
||||
nr_threads++;
|
||||
}
|
||||
mcs_rwlock_reader_unlock(&proc->threads_lock, &lock);
|
||||
return nr_threads;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_set_vector_length */
|
||||
int sve_set_vector_length(struct thread *thread,
|
||||
unsigned long vl, unsigned long flags)
|
||||
{
|
||||
struct thread_info *ti = thread->ctx.thread;
|
||||
|
||||
if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
|
||||
PR_SVE_SET_VL_ONEXEC)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!sve_vl_valid(vl)) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clamp to the maximum vector length that VL-agnostic SVE code can
|
||||
* work with. A flag may be assigned in the future to allow setting
|
||||
* of larger vector lengths without confusing older software.
|
||||
*/
|
||||
if (vl > SVE_VL_ARCH_MAX) {
|
||||
vl = SVE_VL_ARCH_MAX;
|
||||
}
|
||||
|
||||
vl = find_supported_vector_length(vl);
|
||||
|
||||
if (flags & (PR_SVE_VL_INHERIT |
|
||||
PR_SVE_SET_VL_ONEXEC)) {
|
||||
ti->sve_vl_onexec = vl;
|
||||
} else {
|
||||
/* Reset VL to system default on next exec: */
|
||||
ti->sve_vl_onexec = 0;
|
||||
}
|
||||
|
||||
/* Only actually set the VL if not deferred: */
|
||||
if (flags & PR_SVE_SET_VL_ONEXEC) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (vl == ti->sve_vl) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((elf_hwcap & HWCAP_SVE)) {
|
||||
fp_regs_struct fp_regs;
|
||||
|
||||
memset(&fp_regs, 0, sizeof(fp_regs));
|
||||
|
||||
/* for self at prctl syscall */
|
||||
if (thread == cpu_local_var(current)) {
|
||||
save_fp_regs(thread);
|
||||
clear_fp_regs();
|
||||
thread_sve_to_fpsimd(thread, &fp_regs);
|
||||
sve_free(thread);
|
||||
|
||||
ti->sve_vl = vl;
|
||||
|
||||
sve_alloc(thread);
|
||||
thread_fpsimd_to_sve(thread, &fp_regs);
|
||||
restore_fp_regs(thread);
|
||||
/* for target thread at ptrace */
|
||||
} else {
|
||||
thread_sve_to_fpsimd(thread, &fp_regs);
|
||||
sve_free(thread);
|
||||
|
||||
ti->sve_vl = vl;
|
||||
|
||||
sve_alloc(thread);
|
||||
thread_fpsimd_to_sve(thread, &fp_regs);
|
||||
}
|
||||
}
|
||||
ti->sve_vl = vl;
|
||||
|
||||
out:
|
||||
ti->sve_flags = flags & PR_SVE_VL_INHERIT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_prctl_status */
|
||||
/*
|
||||
* Encode the current vector length and flags for return.
|
||||
* This is only required for prctl(): ptrace has separate fields
|
||||
*/
|
||||
static int sve_prctl_status(unsigned long flags)
|
||||
{
|
||||
int ret;
|
||||
struct thread_info *ti = cpu_local_var(current)->ctx.thread;
|
||||
|
||||
if (flags & PR_SVE_SET_VL_ONEXEC) {
|
||||
ret = ti->sve_vl_onexec;
|
||||
}
|
||||
else {
|
||||
ret = ti->sve_vl;
|
||||
}
|
||||
|
||||
if (ti->sve_flags & PR_SVE_VL_INHERIT) {
|
||||
ret |= PR_SVE_VL_INHERIT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_set_task_vl */
|
||||
int sve_set_thread_vl(unsigned long arg)
|
||||
{
|
||||
unsigned long vl, flags;
|
||||
int ret;
|
||||
|
||||
vl = arg & PR_SVE_VL_LEN_MASK;
|
||||
flags = arg & ~vl;
|
||||
|
||||
/* Instead of system_supports_sve() */
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = sve_set_vector_length(cpu_local_var(current), vl, flags);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
return sve_prctl_status(flags);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::sve_get_ti_vl */
|
||||
int sve_get_thread_vl(void)
|
||||
{
|
||||
/* Instead of system_supports_sve() */
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return -EINVAL;
|
||||
}
|
||||
return sve_prctl_status(0);
|
||||
}
|
||||
|
||||
void do_sve_acc(unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
kprintf("PANIC: CPU: %d PID: %d ESR: %x Trapped SVE access.\n",
|
||||
ihk_mc_get_processor_id(), cpu_local_var(current)->proc->pid, esr);
|
||||
panic("");
|
||||
}
|
||||
|
||||
void sve_setup(void)
|
||||
{
|
||||
extern unsigned long ihk_param_default_vl;
|
||||
uint64_t zcr;
|
||||
|
||||
/* Instead of system_supports_sve() */
|
||||
if (unlikely(!(elf_hwcap & HWCAP_SVE))) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* init sve_vq_map bitmap */
|
||||
sve_init_vq_map();
|
||||
|
||||
/*
|
||||
* The SVE architecture mandates support for 128-bit vectors,
|
||||
* so sve_vq_map must have at least SVE_VQ_MIN set.
|
||||
* If something went wrong, at least try to patch it up:
|
||||
*/
|
||||
if (!test_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map)) {
|
||||
set_bit(vq_to_bit(SVE_VQ_MIN), sve_vq_map);
|
||||
}
|
||||
|
||||
zcr = read_system_reg(SYS_ZCR_EL1);
|
||||
sve_max_vl = sve_vl_from_vq((zcr & ZCR_EL1_LEN_MASK) + 1);
|
||||
|
||||
/*
|
||||
* Sanity-check that the max VL we determined through CPU features
|
||||
* corresponds properly to sve_vq_map. If not, do our best:
|
||||
*/
|
||||
if (sve_max_vl != find_supported_vector_length(sve_max_vl)) {
|
||||
sve_max_vl = find_supported_vector_length(sve_max_vl);
|
||||
}
|
||||
|
||||
sve_default_vl = ihk_param_default_vl;
|
||||
|
||||
if (ihk_param_default_vl !=
|
||||
find_supported_vector_length(ihk_param_default_vl)) {
|
||||
kprintf("SVE: Getting unsupported default VL = %d "
|
||||
"from HOST-Linux.\n", sve_default_vl);
|
||||
sve_default_vl = find_supported_vector_length(64);
|
||||
kprintf("SVE: Using default vl(%d byte).\n",
|
||||
sve_default_vl);
|
||||
}
|
||||
|
||||
kprintf("SVE: maximum available vector length %u bytes per vector\n",
|
||||
sve_max_vl);
|
||||
kprintf("SVE: default vector length %u bytes per vector\n",
|
||||
sve_default_vl);
|
||||
}
|
||||
|
||||
#else /* CONFIG_ARM64_SVE */
|
||||
|
||||
void sve_setup(void)
|
||||
{
|
||||
/* nothing to do. */
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::__task_pffr */
|
||||
static void *__thread_pffr(struct thread *thread)
|
||||
{
|
||||
unsigned int vl = thread->ctx.thread->sve_vl;
|
||||
|
||||
BUG_ON(!sve_vl_valid(vl));
|
||||
return (char *)thread->ctx.thread->sve_state + 34 * vl;
|
||||
}
|
||||
|
||||
/* There is a need to call from to check the HWCAP_FP and HWCAP_ASIMD state. */
|
||||
void thread_fpsimd_load(struct thread *thread)
|
||||
{
|
||||
if (likely(elf_hwcap & HWCAP_SVE)) {
|
||||
unsigned int vl = thread->ctx.thread->sve_vl;
|
||||
|
||||
BUG_ON(!sve_vl_valid(vl));
|
||||
sve_load_state(__thread_pffr(thread), &thread->fp_regs->fpsr, sve_vq_from_vl(vl) - 1);
|
||||
dkprintf("sve for TID %d restored\n", thread->tid);
|
||||
} else {
|
||||
// Load the current FPSIMD state to memory.
|
||||
fpsimd_load_state(thread->fp_regs);
|
||||
dkprintf("fp_regs for TID %d restored\n", thread->tid);
|
||||
}
|
||||
}
|
||||
|
||||
/* There is a need to call from to check the HWCAP_FP and HWCAP_ASIMD state. */
|
||||
void thread_fpsimd_save(struct thread *thread)
|
||||
{
|
||||
if (likely(elf_hwcap & HWCAP_SVE)) {
|
||||
sve_save_state(__thread_pffr(thread), &thread->fp_regs->fpsr);
|
||||
dkprintf("sve for TID %d saved\n", thread->tid);
|
||||
} else {
|
||||
// Save the current FPSIMD state to memory.
|
||||
fpsimd_save_state(thread->fp_regs);
|
||||
dkprintf("fp_regs for TID %d saved\n", thread->tid);
|
||||
}
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::__task_fpsimd_to_sve */
|
||||
static void __thread_fpsimd_to_sve(struct thread *thread, fp_regs_struct *fp_regs, unsigned int vq)
|
||||
{
|
||||
struct fpsimd_sve_state(vq) *sst = thread->ctx.thread->sve_state;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
sst->zregs[i][0] = fp_regs->vregs[i];
|
||||
}
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::task_fpsimd_to_sve */
|
||||
void thread_fpsimd_to_sve(struct thread *thread, fp_regs_struct *fp_regs)
|
||||
{
|
||||
unsigned int vl = thread->ctx.thread->sve_vl;
|
||||
|
||||
BUG_ON(!sve_vl_valid(vl));
|
||||
__thread_fpsimd_to_sve(thread, fp_regs, sve_vq_from_vl(vl));
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::__task_sve_to_fpsimd */
|
||||
static void __thread_sve_to_fpsimd(struct thread *thread, fp_regs_struct *fp_regs, unsigned int vq)
|
||||
{
|
||||
struct fpsimd_sve_state(vq) *sst = thread->ctx.thread->sve_state;
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
fp_regs->vregs[i] = sst->zregs[i][0];
|
||||
}
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/fpsimd.c::task_sve_to_fpsimd */
|
||||
void thread_sve_to_fpsimd(struct thread *thread, fp_regs_struct *fp_regs)
|
||||
{
|
||||
unsigned int vl = thread->ctx.thread->sve_vl;
|
||||
|
||||
BUG_ON(!sve_vl_valid(vl));
|
||||
__thread_sve_to_fpsimd(thread, fp_regs, sve_vq_from_vl(vl));
|
||||
}
|
||||
805
arch/arm64/kernel/head.S
Normal file
805
arch/arm64/kernel/head.S
Normal file
@ -0,0 +1,805 @@
|
||||
/* head.S COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
|
||||
#include <linkage.h>
|
||||
#include <ptrace.h>
|
||||
#include <assembler.h>
|
||||
#include <asm-offsets.h>
|
||||
#include <virt.h>
|
||||
#include <cache.h>
|
||||
#include <arch-memory.h>
|
||||
#include <smp.h>
|
||||
#include <arm-gic-v3.h>
|
||||
|
||||
/* KERNEL_RAM_VADDR is defined by cmake */
|
||||
|
||||
//#ifndef CONFIG_SMP
|
||||
//# define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF
|
||||
//# define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF
|
||||
//#else
|
||||
# define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
|
||||
# define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
|
||||
//#endif /*CONFIG_SMP*/
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
# define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
|
||||
#else
|
||||
# define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
|
||||
#endif
|
||||
|
||||
.macro pgtbl_init_core, name, dir, tbl, ents, virt_to_phys
|
||||
ldr \tbl, =\name
|
||||
ldr \ents, =\dir
|
||||
add \tbl, \tbl, \virt_to_phys
|
||||
str \ents, [\tbl]
|
||||
add \tbl, \tbl, #8
|
||||
add \ents, \ents, \virt_to_phys
|
||||
str \ents, [\tbl]
|
||||
.endm
|
||||
|
||||
.macro pgtbl_init, tbl, ents, virt_to_phys
|
||||
pgtbl_init_core swapper_page_table, swapper_pg_dir, \tbl, \ents, \virt_to_phys
|
||||
pgtbl_init_core idmap_page_table, idmap_pg_dir, \tbl, \ents, \virt_to_phys
|
||||
.endm
|
||||
|
||||
.macro pgtbl, ttb0, ttb1, virt_to_phys
|
||||
ldr \ttb1, =swapper_pg_dir
|
||||
ldr \ttb0, =idmap_pg_dir
|
||||
add \ttb1, \ttb1, \virt_to_phys
|
||||
add \ttb0, \ttb0, \virt_to_phys
|
||||
.endm
|
||||
|
||||
#define KERNEL_START KERNEL_RAM_VADDR
|
||||
#define KERNEL_END _end
|
||||
|
||||
/* ihk param offset */
|
||||
#define TRAMPOLINE_DATA_RESERVED_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_PGTBL_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_LOAD_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_STACK_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_BOOT_PARAM_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_STARTUP_DATA_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_ST_PHYS_BASE_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_ST_PHYS_SIZE_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_GIC_DIST_PA_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_GIC_DIST_MAP_SIZE_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_GIC_CPU_PA_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_GIC_CPU_MAP_SIZE_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_GIC_PERCPU_OFF_SIZE 0x04
|
||||
#define TRAMPOLINE_DATA_GIC_VERSION_SIZE 0x04
|
||||
#define TRAMPOLINE_DATA_LPJ_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_HZ_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_PSCI_METHOD_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_USE_VIRT_TIMER_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_EVTSTRM_TIMER_RATE_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_DEFAULT_VL_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_CPU_MAP_SIZE_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_CPU_MAP_SIZE (NR_CPUS * 8)
|
||||
#define TRAMPOLINE_DATA_DATA_RDISTS_PA_SIZE (NR_CPUS * 8)
|
||||
#define TRAMPOLINE_DATA_RETENTION_STATE_FLAG_PA_SIZE 0x08
|
||||
#define TRAMPOLINE_DATA_NR_PMU_AFFI_SIZE 0x04
|
||||
#define TRAMPOLINE_DATA_PMU_AFF_SIZE (CONFIG_SMP_MAX_CORES * 4)
|
||||
|
||||
#define STARTUP_DATA_RESERVED 0x00
|
||||
#define STARTUP_DATA_BASE 0x08
|
||||
#define STARTUP_DATA_PGTBL 0x10
|
||||
#define STARTUP_DATA_STACK 0x18
|
||||
#define STARTUP_DATA_ARG2 0x20
|
||||
#define STARTUP_DATA_TRAMPILINE 0x28
|
||||
#define STARTUP_DATA_NEXT_PC 0x30
|
||||
|
||||
/* ihk param save area */
|
||||
.globl ihk_param_head
|
||||
.globl ihk_param_gic_dist_base_pa, ihk_param_gic_cpu_base_pa
|
||||
.globl ihk_param_gic_dist_map_size, ihk_param_gic_cpu_map_size
|
||||
.globl ihk_param_gic_percpu_offset, ihk_param_gic_version
|
||||
.globl ihk_param_lpj, ihk_param_hz, ihk_param_psci_method
|
||||
.globl ihk_param_cpu_logical_map, ihk_param_gic_rdist_base_pa
|
||||
.globl ihk_param_pmu_irq_affi, ihk_param_nr_pmu_irq_affi
|
||||
.globl ihk_param_use_virt_timer, ihk_param_evtstrm_timer_rate
|
||||
.globl ihk_param_retention_state_flag_pa, ihk_param_default_vl
|
||||
ihk_param_head:
|
||||
ihk_param_param_addr:
|
||||
.quad 0
|
||||
ihk_param_phys_addr:
|
||||
.quad 0
|
||||
ihk_param_st_phys_base:
|
||||
.quad 0
|
||||
ihk_param_st_phys_size:
|
||||
.quad 0
|
||||
ihk_param_gic_dist_base_pa:
|
||||
.quad 0
|
||||
ihk_param_gic_dist_map_size:
|
||||
.quad 0
|
||||
ihk_param_gic_cpu_base_pa:
|
||||
.quad 0
|
||||
ihk_param_gic_cpu_map_size:
|
||||
.quad 0
|
||||
ihk_param_gic_percpu_offset:
|
||||
.word 0
|
||||
ihk_param_gic_version:
|
||||
.word 0
|
||||
ihk_param_lpj:
|
||||
.quad 0 /* udelay loops value */
|
||||
ihk_param_hz:
|
||||
.quad 0 /* host HZ value */
|
||||
ihk_param_psci_method:
|
||||
.quad 0 /* hvc or smc ? */
|
||||
ihk_param_use_virt_timer:
|
||||
.quad 0 /* virt timer or phys timer ? */
|
||||
ihk_param_evtstrm_timer_rate:
|
||||
.quad 0 /* event stream timer rate */
|
||||
ihk_param_default_vl:
|
||||
.quad 0 /* SVE default VL */
|
||||
ihk_param_cpu_logical_map:
|
||||
.skip NR_CPUS * 8 /* array of the MPIDR and the core number */
|
||||
ihk_param_gic_rdist_base_pa:
|
||||
.skip NR_CPUS * 8 /* per-cpu re-distributer PA */
|
||||
ihk_param_retention_state_flag_pa:
|
||||
.quad 0
|
||||
ihk_param_pmu_irq_affi:
|
||||
.skip CONFIG_SMP_MAX_CORES * 4 /* array of the pmu affinity list */
|
||||
ihk_param_nr_pmu_irq_affi:
|
||||
.word 0 /* number of pmu affinity list elements. */
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/kvm_arm.h */
|
||||
#define HCR_E2H (UL(1) << 34)
|
||||
#define HCR_RW_SHIFT 31
|
||||
#define HCR_RW (UL(1) << HCR_RW_SHIFT)
|
||||
#define HCR_TGE (UL(1) << 27)
|
||||
|
||||
/*
|
||||
* end early head section, begin head code that is also used for
|
||||
* hotplug and needs to have the same protections as the text region
|
||||
*/
|
||||
.section ".text","ax"
|
||||
|
||||
ENTRY(arch_start)
|
||||
/* store ihk param */
|
||||
/* x4 = ihk_smp_trampoline_data PA */
|
||||
add x0, x4, #TRAMPOLINE_DATA_RESERVED_SIZE
|
||||
/* header_pgtbl */
|
||||
add x0, x0, #TRAMPOLINE_DATA_PGTBL_SIZE
|
||||
/* header_load */
|
||||
add x0, x0, #TRAMPOLINE_DATA_LOAD_SIZE
|
||||
/* stack_ptr */
|
||||
add x0, x0, #TRAMPOLINE_DATA_STACK_SIZE
|
||||
/* notify_address */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_BOOT_PARAM_SIZE
|
||||
adr x15, ihk_param_param_addr
|
||||
str x16, [x15]
|
||||
/* startup_data */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_STARTUP_DATA_SIZE
|
||||
ldr x15, [x16, #STARTUP_DATA_ARG2]
|
||||
adr x17, ihk_param_phys_addr
|
||||
str x15, [x17]
|
||||
/* st_phys_base */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_ST_PHYS_BASE_SIZE
|
||||
adr x15, ihk_param_st_phys_base
|
||||
str x16, [x15]
|
||||
/* st_phys_size */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_ST_PHYS_SIZE_SIZE
|
||||
adr x15, ihk_param_st_phys_size
|
||||
str x16, [x15]
|
||||
/* dist_base_pa */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_GIC_DIST_PA_SIZE
|
||||
adr x15, ihk_param_gic_dist_base_pa
|
||||
str x16, [x15]
|
||||
/* dist_map_size */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_GIC_DIST_MAP_SIZE_SIZE
|
||||
adr x15, ihk_param_gic_dist_map_size
|
||||
str x16, [x15]
|
||||
/* cpu_base_pa */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_GIC_CPU_PA_SIZE
|
||||
adr x15, ihk_param_gic_cpu_base_pa
|
||||
str x16, [x15]
|
||||
/* cpu_map_size */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_GIC_CPU_MAP_SIZE_SIZE
|
||||
adr x15, ihk_param_gic_cpu_map_size
|
||||
str x16, [x15]
|
||||
/* percpu_offset */
|
||||
ldr w16, [x0], #TRAMPOLINE_DATA_GIC_PERCPU_OFF_SIZE
|
||||
adr x15, ihk_param_gic_percpu_offset
|
||||
str w16, [x15]
|
||||
/* gic_version */
|
||||
ldr w16, [x0], #TRAMPOLINE_DATA_GIC_VERSION_SIZE
|
||||
adr x15, ihk_param_gic_version
|
||||
str w16, [x15]
|
||||
/* loops_per_jiffy */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_LPJ_SIZE
|
||||
adr x15, ihk_param_lpj
|
||||
str x16, [x15]
|
||||
/* hz */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_HZ_SIZE
|
||||
adr x15, ihk_param_hz
|
||||
str x16, [x15]
|
||||
/* psci_method */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_PSCI_METHOD_SIZE
|
||||
adr x15, ihk_param_psci_method
|
||||
str x16, [x15]
|
||||
/* use_virt_timer */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_USE_VIRT_TIMER_SIZE
|
||||
adr x15, ihk_param_use_virt_timer
|
||||
str x16, [x15]
|
||||
/* evtstrm_timer_rate */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_EVTSTRM_TIMER_RATE_SIZE
|
||||
adr x15, ihk_param_evtstrm_timer_rate
|
||||
str x16, [x15]
|
||||
/* SVE default VL */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_DEFAULT_VL_SIZE
|
||||
adr x15, ihk_param_default_vl
|
||||
str x16, [x15]
|
||||
/* cpu_logical_map_size */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_CPU_MAP_SIZE_SIZE
|
||||
mov x1, x16
|
||||
/* cpu_logical_map */
|
||||
adr x15, ihk_param_cpu_logical_map
|
||||
mov x18, x0
|
||||
1: ldr x17, [x18], #8
|
||||
str x17, [x15], #8
|
||||
sub x16, x16, #1
|
||||
cmp x16, #0
|
||||
b.ne 1b
|
||||
mov x16, #NR_CPUS /* calc next data */
|
||||
lsl x16, x16, 3
|
||||
add x0, x0, x16
|
||||
|
||||
/* reset cpu_logical_map_size */
|
||||
mov x16, x1
|
||||
/* gic_rdist_base_pa */
|
||||
adr x15, ihk_param_gic_rdist_base_pa
|
||||
mov x18, x0
|
||||
1: ldr x17, [x18], #8
|
||||
str x17, [x15], #8
|
||||
sub x16, x16, #1
|
||||
cmp x16, #0
|
||||
b.ne 1b
|
||||
mov x16, #NR_CPUS /* calc next data */
|
||||
lsl x16, x16, 3
|
||||
add x0, x0, x16
|
||||
/* retention_state_flag_pa */
|
||||
ldr x16, [x0], #TRAMPOLINE_DATA_RETENTION_STATE_FLAG_PA_SIZE
|
||||
adr x15, ihk_param_retention_state_flag_pa
|
||||
str x16, [x15]
|
||||
/* nr_pmu_irq_affi */
|
||||
ldr w16, [x0], #TRAMPOLINE_DATA_NR_PMU_AFFI_SIZE
|
||||
adr x15, ihk_param_nr_pmu_irq_affi
|
||||
str w16, [x15]
|
||||
/* pmu_irq_affi */
|
||||
mov x18, x0
|
||||
adr x15, ihk_param_pmu_irq_affi
|
||||
b 2f
|
||||
1: ldr w17, [x18], #4
|
||||
str w17, [x15], #4
|
||||
sub w16, w16, #1
|
||||
2: cmp w16, #0
|
||||
b.ne 1b
|
||||
|
||||
mov x16, #CONFIG_SMP_MAX_CORES /* calc next data */
|
||||
lsl x16, x16, 2
|
||||
add x0, x0, x16
|
||||
/* */
|
||||
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-KERNEL_START
|
||||
bl __create_page_tables // x25=TTBR0, x26=TTBR1
|
||||
b secondary_entry_common
|
||||
ENDPROC(arch_start)
|
||||
|
||||
ENTRY(arch_ap_start)
|
||||
bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-KERNEL_START
|
||||
b secondary_entry_common
|
||||
ENDPROC(arch_ap_start)
|
||||
|
||||
/*
|
||||
* Macro to create a table entry to the next page.
|
||||
*
|
||||
* tbl: page table address
|
||||
* virt: virtual address
|
||||
* shift: #imm page table shift
|
||||
* ptrs: #imm pointers per table page
|
||||
*
|
||||
* Preserves: virt
|
||||
* Corrupts: tmp1, tmp2
|
||||
* Returns: tbl -> next level table page address
|
||||
*/
|
||||
.macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
|
||||
lsr \tmp1, \virt, #\shift
|
||||
and \tmp1, \tmp1, #\ptrs - 1 // table index
|
||||
add \tmp2, \tbl, #PAGE_SIZE
|
||||
orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
|
||||
str \tmp2, [\tbl, \tmp1, lsl #3]
|
||||
add \tbl, \tbl, #PAGE_SIZE // next level table page
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Macro to populate the PGD (and possibily PUD) for the corresponding
|
||||
* block entry in the next level (tbl) for the given virtual address.
|
||||
*
|
||||
* Preserves: tbl, next, virt
|
||||
* Corrupts: tmp1, tmp2
|
||||
*/
|
||||
.macro create_pgd_entry, tbl, virt, tmp1, tmp2
|
||||
create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
|
||||
#if SWAPPER_PGTABLE_LEVELS == 3
|
||||
create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Macro to populate block entries in the page table for the start..end
|
||||
* virtual range (inclusive).
|
||||
*
|
||||
* Preserves: tbl, flags
|
||||
* Corrupts: phys, start, end, pstate
|
||||
*/
|
||||
.macro create_block_map, tbl, flags, phys, start, end
|
||||
lsr \phys, \phys, #BLOCK_SHIFT
|
||||
lsr \start, \start, #BLOCK_SHIFT
|
||||
and \start, \start, #PTRS_PER_PTE - 1 // table index
|
||||
orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
|
||||
lsr \end, \end, #BLOCK_SHIFT
|
||||
and \end, \end, #PTRS_PER_PTE - 1 // table end index
|
||||
9999: str \phys, [\tbl, \start, lsl #3] // store the entry
|
||||
add \start, \start, #1 // next entry
|
||||
add \phys, \phys, #BLOCK_SIZE // next block
|
||||
cmp \start, \end
|
||||
b.ls 9999b
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Setup the initial page tables. We only setup the barest amount which is
|
||||
* required to get the kernel running. The following sections are required:
|
||||
* - identity mapping to enable the MMU (low address, TTBR0)
|
||||
* - first few MB of the kernel linear mapping to jump to once the MMU has
|
||||
* been enabled, including the FDT blob (TTBR1)
|
||||
* - pgd entry for fixed mappings (TTBR1)
|
||||
*/
|
||||
__create_page_tables:
|
||||
pgtbl_init x25, x26, x28
|
||||
pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses
|
||||
mov x27, lr
|
||||
|
||||
/*
|
||||
* Invalidate the idmap and swapper page tables to avoid potential
|
||||
* dirty cache lines being evicted.
|
||||
*/
|
||||
mov x0, x25
|
||||
add x1, x26, #SWAPPER_DIR_SIZE
|
||||
bl __inval_cache_range
|
||||
|
||||
/*
|
||||
* Clear the idmap and swapper page tables.
|
||||
*/
|
||||
mov x0, x25
|
||||
add x6, x26, #SWAPPER_DIR_SIZE
|
||||
1: stp xzr, xzr, [x0], #16
|
||||
stp xzr, xzr, [x0], #16
|
||||
stp xzr, xzr, [x0], #16
|
||||
stp xzr, xzr, [x0], #16
|
||||
cmp x0, x6
|
||||
b.lo 1b
|
||||
|
||||
ldr x7, =MM_MMUFLAGS
|
||||
|
||||
/*
|
||||
* Create the identity mapping.
|
||||
*/
|
||||
mov x0, x25 // idmap_pg_dir
|
||||
ldr x3, =KERNEL_START
|
||||
add x3, x3, x28 // __pa(KERNEL_START)
|
||||
create_pgd_entry x0, x3, x5, x6
|
||||
ldr x6, =KERNEL_END
|
||||
mov x5, x3 // __pa(KERNEL_START)
|
||||
add x6, x6, x28 // __pa(KERNEL_END)
|
||||
create_block_map x0, x7, x3, x5, x6
|
||||
|
||||
/*
|
||||
* Map the kernel image (starting with PHYS_OFFSET).
|
||||
*/
|
||||
mov x0, x26 // swapper_pg_dir
|
||||
ldr x5, =KERNEL_START
|
||||
create_pgd_entry x0, x5, x3, x6
|
||||
ldr x6, =KERNEL_END
|
||||
mov x3, x24 // phys offset
|
||||
create_block_map x0, x7, x3, x5, x6
|
||||
|
||||
/*
|
||||
* Map the early_alloc_pages area, kernel_img next block
|
||||
*/
|
||||
ldr x3, =KERNEL_END
|
||||
add x3, x3, x28 // __pa(KERNEL_END)
|
||||
add x3, x3, #BLOCK_SIZE
|
||||
sub x3, x3, #1
|
||||
bic x3, x3, #(BLOCK_SIZE - 1) // start PA calc.
|
||||
ldr x5, =KERNEL_END // get start VA
|
||||
add x5, x5, #BLOCK_SIZE
|
||||
sub x5, x5, #1
|
||||
bic x5, x5, #(BLOCK_SIZE - 1) // start VA calc.
|
||||
mov x6, #MAP_EARLY_ALLOC_SIZE
|
||||
add x6, x5, x6 // end VA calc
|
||||
mov x23, x6 // save end VA
|
||||
sub x6, x6, #1 // inclusive range
|
||||
create_block_map x0, x7, x3, x5, x6
|
||||
|
||||
/*
|
||||
* Map the boot_param area
|
||||
*/
|
||||
adr x3, ihk_param_param_addr
|
||||
ldr x3, [x3] // get boot_param PA
|
||||
mov x5, x23 // get start VA
|
||||
add x5, x5, #BLOCK_SIZE
|
||||
sub x5, x5, #1
|
||||
bic x5, x5, #(BLOCK_SIZE - 1) // start VA calc
|
||||
mov x6, #MAP_BOOT_PARAM_SIZE
|
||||
add x6, x5, x6 // end VA calc.
|
||||
sub x6, x6, #1 // inclusive range
|
||||
create_block_map x0, x7, x3, x5, x6
|
||||
|
||||
/*
|
||||
* Map the FDT blob (maximum 2MB; must be within 512MB of
|
||||
* PHYS_OFFSET).
|
||||
*/
|
||||
/* FDT disable for McKernel */
|
||||
// mov x3, x21 // FDT phys address
|
||||
// and x3, x3, #~((1 << 21) - 1) // 2MB aligned
|
||||
// mov x6, #PAGE_OFFSET
|
||||
// sub x5, x3, x24 // subtract PHYS_OFFSET
|
||||
// tst x5, #~((1 << 29) - 1) // within 512MB?
|
||||
// csel x21, xzr, x21, ne // zero the FDT pointer
|
||||
// b.ne 1f
|
||||
// add x5, x5, x6 // __va(FDT blob)
|
||||
// add x6, x5, #1 << 21 // 2MB for the FDT blob
|
||||
// sub x6, x6, #1 // inclusive range
|
||||
// create_block_map x0, x7, x3, x5, x6
|
||||
1:
|
||||
/*
|
||||
* Since the page tables have been populated with non-cacheable
|
||||
* accesses (MMU disabled), invalidate the idmap and swapper page
|
||||
* tables again to remove any speculatively loaded cache lines.
|
||||
*/
|
||||
mov x0, x25
|
||||
add x1, x26, #SWAPPER_DIR_SIZE
|
||||
bl __inval_cache_range
|
||||
|
||||
mov lr, x27
|
||||
ret
|
||||
ENDPROC(__create_page_tables)
|
||||
.ltorg
|
||||
|
||||
/*
|
||||
* If we're fortunate enough to boot at EL2, ensure that the world is
|
||||
* sane before dropping to EL1.
|
||||
*
|
||||
* Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
|
||||
* booted in EL1 or EL2 respectively.
|
||||
*/
|
||||
ENTRY(el2_setup)
|
||||
mrs x0, CurrentEL
|
||||
cmp x0, #CurrentEL_EL2
|
||||
b.ne 1f
|
||||
mrs x0, sctlr_el2
|
||||
CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
|
||||
CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
|
||||
msr sctlr_el2, x0
|
||||
b 2f
|
||||
1: mrs x0, sctlr_el1
|
||||
CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
|
||||
CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
|
||||
msr sctlr_el1, x0
|
||||
mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
|
||||
isb
|
||||
ret
|
||||
|
||||
2:
|
||||
#ifdef CONFIG_ARM64_VHE
|
||||
/*
|
||||
* Check for VHE being present. For the rest of the EL2 setup,
|
||||
* x2 being non-zero indicates that we do have VHE, and that the
|
||||
* kernel is intended to run at EL2.
|
||||
*/
|
||||
mrs x2, id_aa64mmfr1_el1
|
||||
ubfx x2, x2, #8, #4
|
||||
#else /* CONFIG_ARM64_VHE */
|
||||
mov x2, xzr
|
||||
#endif /* CONFIG_ARM64_VHE */
|
||||
|
||||
/* Hyp configuration. */
|
||||
mov x0, #HCR_RW // 64-bit EL1
|
||||
cbz x2, set_hcr
|
||||
orr x0, x0, #HCR_TGE // Enable Host Extensions
|
||||
orr x0, x0, #HCR_E2H
|
||||
set_hcr:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
|
||||
/* Generic timers. */
|
||||
mrs x0, cnthctl_el2
|
||||
orr x0, x0, #3 // Enable EL1 physical timers
|
||||
msr cnthctl_el2, x0
|
||||
msr cntvoff_el2, xzr // Clear virtual offset
|
||||
|
||||
#ifdef CONFIG_ARM_GIC_V3
|
||||
/* GICv3 system register access */
|
||||
mrs x0, id_aa64pfr0_el1
|
||||
ubfx x0, x0, #24, #4
|
||||
cmp x0, #1
|
||||
b.ne 3f
|
||||
|
||||
mrs_s x0, ICC_SRE_EL2
|
||||
orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
|
||||
orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
|
||||
msr_s ICC_SRE_EL2, x0
|
||||
isb // Make sure SRE is now set
|
||||
msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
|
||||
|
||||
3:
|
||||
#endif
|
||||
|
||||
/* Populate ID registers. */
|
||||
mrs x0, midr_el1
|
||||
mrs x1, mpidr_el1
|
||||
msr vpidr_el2, x0
|
||||
msr vmpidr_el2, x1
|
||||
|
||||
/*
|
||||
* When VHE is not in use, early init of EL2 and EL1 needs to be
|
||||
* done here.
|
||||
* When VHE _is_ in use, EL1 will not be used in the host and
|
||||
* requires no configuration, and all non-hyp-specific EL2 setup
|
||||
* will be done via the _EL1 system register aliases in __cpu_setup.
|
||||
*/
|
||||
cbnz x2, 1f
|
||||
|
||||
/* sctlr_el1 */
|
||||
mov x0, #0x0800 // Set/clear RES{1,0} bits
|
||||
CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
|
||||
CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
|
||||
msr sctlr_el1, x0
|
||||
|
||||
/* Coprocessor traps. */
|
||||
mov x0, #0x33ff
|
||||
|
||||
/* SVE register access */
|
||||
mrs x1, id_aa64pfr0_el1
|
||||
ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4
|
||||
cbz x1, 4f
|
||||
|
||||
bic x0, x0, #CPTR_EL2_TZ // Disable SVE traps to EL2
|
||||
msr cptr_el2, x0 // Disable copro. traps to EL2
|
||||
isb
|
||||
|
||||
mov x1, #ZCR_EL1_LEN_MASK // SVE: Enable full vector
|
||||
msr_s SYS_ZCR_EL1, x1 // length for EL1.
|
||||
b 1f
|
||||
|
||||
4: msr cptr_el2, x0 // Disable copro. traps to EL2
|
||||
1:
|
||||
#ifdef CONFIG_COMPAT
|
||||
msr hstr_el2, xzr // Disable CP15 traps to EL2
|
||||
#endif
|
||||
|
||||
/* Stage-2 translation */
|
||||
msr vttbr_el2, xzr
|
||||
|
||||
cbz x2, install_el2_stub
|
||||
|
||||
mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
||||
isb
|
||||
ret
|
||||
|
||||
install_el2_stub:
|
||||
/* Hypervisor stub */
|
||||
adrp x0, __hyp_stub_vectors
|
||||
add x0, x0, #:lo12:__hyp_stub_vectors
|
||||
msr vbar_el2, x0
|
||||
|
||||
/* spsr */
|
||||
mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
msr spsr_el2, x0
|
||||
msr elr_el2, lr
|
||||
mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
|
||||
eret
|
||||
ENDPROC(el2_setup)
|
||||
|
||||
/*
|
||||
* Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
|
||||
* in x20. See arch/arm64/include/asm/virt.h for more info.
|
||||
*/
|
||||
ENTRY(set_cpu_boot_mode_flag)
|
||||
ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
|
||||
add x1, x1, x28
|
||||
cmp w20, #BOOT_CPU_MODE_EL2
|
||||
b.ne 1f
|
||||
add x1, x1, #4
|
||||
1: str w20, [x1] // This CPU has booted in EL1
|
||||
dmb sy
|
||||
dc ivac, x1 // Invalidate potentially stale cache line
|
||||
ret
|
||||
ENDPROC(set_cpu_boot_mode_flag)
|
||||
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
/*
|
||||
* void maybe_switch_to_sysreg_gic_cpuif(void)
|
||||
*
|
||||
* Enable interrupt controller system register access if this feature
|
||||
* has been detected by the alternatives system.
|
||||
*
|
||||
* Before we jump into generic code we must enable interrupt controller system
|
||||
* register access because this is required by the irqflags macros. We must
|
||||
* also mask interrupts at the PMR and unmask them within the PSR. That leaves
|
||||
* us set up and ready for the kernel to make its first call to
|
||||
* arch_local_irq_enable().
|
||||
*
|
||||
*/
|
||||
ENTRY(maybe_switch_to_sysreg_gic_cpuif)
|
||||
mrs_s x0, ICC_SRE_EL1
|
||||
orr x0, x0, #1
|
||||
msr_s ICC_SRE_EL1, x0 // Set ICC_SRE_EL1.SRE==1
|
||||
isb // Make sure SRE is now set
|
||||
mov x0, ICC_PMR_EL1_MASKED
|
||||
msr_s ICC_PMR_EL1, x0 // Prepare for unmask of I bit
|
||||
msr daifclr, #2 // Clear the I bit
|
||||
ret
|
||||
ENDPROC(maybe_switch_to_sysreg_gic_cpuif)
|
||||
#else
|
||||
ENTRY(maybe_switch_to_sysreg_gic_cpuif)
|
||||
ret
|
||||
ENDPROC(maybe_switch_to_sysreg_gic_cpuif)
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
|
||||
/*
|
||||
* We need to find out the CPU boot mode long after boot, so we need to
|
||||
* store it in a writable variable.
|
||||
*
|
||||
* This is not in .bss, because we set it sufficiently early that the boot-time
|
||||
* zeroing of .bss would clobber it.
|
||||
*/
|
||||
.pushsection .data..cacheline_aligned
|
||||
ENTRY(__boot_cpu_mode)
|
||||
.align L1_CACHE_SHIFT
|
||||
.long BOOT_CPU_MODE_EL2
|
||||
.long 0
|
||||
.popsection
|
||||
|
||||
ENTRY(secondary_entry_common)
|
||||
bl el2_setup // Drop to EL1
|
||||
bl set_cpu_boot_mode_flag
|
||||
b secondary_startup
|
||||
ENDPROC(secondary_entry_common)
|
||||
|
||||
ENTRY(secondary_startup)
|
||||
/*
|
||||
* Common entry point for secondary CPUs.
|
||||
*/
|
||||
mrs x22, midr_el1 // x22=cpuid
|
||||
mov x0, x22
|
||||
bl lookup_processor_type
|
||||
mov x23, x0 // x23=current cpu_table
|
||||
cbz x23, __error_p // invalid processor (x23=0)?
|
||||
|
||||
pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1
|
||||
ldr x12, [x23, #CPU_INFO_SETUP]
|
||||
add x12, x12, x28 // __virt_to_phys
|
||||
blr x12 // initialise processor
|
||||
|
||||
ldr x21, =secondary_data
|
||||
ldr x27, =__secondary_switched // address to jump to after enabling the MMU
|
||||
b __enable_mmu
|
||||
ENDPROC(secondary_startup)
|
||||
|
||||
ENTRY(__secondary_switched)
|
||||
ldr x0, [x21, #SECONDARY_DATA_STACK] // get secondary_data.stack
|
||||
mov sp, x0
|
||||
|
||||
/*
|
||||
* Conditionally switch to GIC PMR for interrupt masking (this
|
||||
* will be a nop if we are using normal interrupt masking)
|
||||
*/
|
||||
bl maybe_switch_to_sysreg_gic_cpuif
|
||||
mov x29, #0
|
||||
|
||||
adr x1, secondary_data
|
||||
ldr x0, [x1, #SECONDARY_DATA_ARG] // get secondary_data.arg
|
||||
ldr x27, [x1, #SECONDARY_DATA_NEXT_PC] // get secondary_data.next_pc
|
||||
br x27 // secondary_data.next_pc(secondary_data.arg);
|
||||
ENDPROC(__secondary_switched)
|
||||
|
||||
/*
|
||||
* Setup common bits before finally enabling the MMU. Essentially this is just
|
||||
* loading the page table pointer and vector base registers.
|
||||
*
|
||||
* On entry to this code, x0 must contain the SCTLR_EL1 value for turning on
|
||||
* the MMU.
|
||||
*/
|
||||
__enable_mmu:
|
||||
ldr x5, =vectors
|
||||
msr vbar_el1, x5
|
||||
msr ttbr0_el1, x25 // load TTBR0
|
||||
msr ttbr1_el1, x26 // load TTBR1
|
||||
isb
|
||||
b __turn_mmu_on
|
||||
ENDPROC(__enable_mmu)
|
||||
|
||||
/*
|
||||
* Enable the MMU. This completely changes the structure of the visible memory
|
||||
* space. You will not be able to trace execution through this.
|
||||
*
|
||||
* x0 = system control register
|
||||
* x27 = *virtual* address to jump to upon completion
|
||||
*
|
||||
* other registers depend on the function called upon completion
|
||||
*
|
||||
* We align the entire function to the smallest power of two larger than it to
|
||||
* ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
|
||||
* close to the end of a 512MB or 1GB block we might require an additional
|
||||
* table to map the entire function.
|
||||
*/
|
||||
.align 4
|
||||
__turn_mmu_on:
|
||||
msr sctlr_el1, x0
|
||||
isb
|
||||
br x27
|
||||
ENDPROC(__turn_mmu_on)
|
||||
|
||||
/*
|
||||
* Calculate the start of physical memory.
|
||||
*/
|
||||
__calc_phys_offset:
|
||||
adr x0, 1f
|
||||
ldp x1, x2, [x0]
|
||||
sub x28, x0, x1 // x28 = PHYS_OFFSET - KERNEL_START
|
||||
add x24, x2, x28 // x24 = PHYS_OFFSET
|
||||
ret
|
||||
ENDPROC(__calc_phys_offset)
|
||||
|
||||
.align 3
|
||||
1: .quad .
|
||||
.quad KERNEL_START
|
||||
|
||||
/*
|
||||
* Exception handling. Something went wrong and we can't proceed. We ought to
|
||||
* tell the user, but since we don't have any guarantee that we're even
|
||||
* running on the right architecture, we do virtually nothing.
|
||||
*/
|
||||
__error_p:
|
||||
ENDPROC(__error_p)
|
||||
|
||||
__error:
|
||||
1: nop
|
||||
b 1b
|
||||
ENDPROC(__error)
|
||||
|
||||
/*
|
||||
* This function gets the processor ID in w0 and searches the cpu_table[] for
|
||||
* a match. It returns a pointer to the struct cpu_info it found. The
|
||||
* cpu_table[] must end with an empty (all zeros) structure.
|
||||
*
|
||||
* This routine can be called via C code and it needs to work with the MMU
|
||||
* both disabled and enabled (the offset is calculated automatically).
|
||||
*/
|
||||
ENTRY(lookup_processor_type)
|
||||
adr x1, __lookup_processor_type_data
|
||||
ldp x2, x3, [x1]
|
||||
sub x1, x1, x2 // get offset between VA and PA
|
||||
add x3, x3, x1 // convert VA to PA
|
||||
1:
|
||||
ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask
|
||||
cbz w5, 2f // end of list?
|
||||
and w6, w6, w0
|
||||
cmp w5, w6
|
||||
b.eq 3f
|
||||
add x3, x3, #CPU_INFO_SZ
|
||||
b 1b
|
||||
2:
|
||||
mov x3, #0 // unknown processor
|
||||
3:
|
||||
mov x0, x3
|
||||
ret
|
||||
ENDPROC(lookup_processor_type)
|
||||
|
||||
.align 3
|
||||
.type __lookup_processor_type_data, %object
|
||||
__lookup_processor_type_data:
|
||||
.quad .
|
||||
.quad cpu_table
|
||||
.size __lookup_processor_type_data, . - __lookup_processor_type_data
|
||||
|
||||
410
arch/arm64/kernel/hw_breakpoint.c
Normal file
410
arch/arm64/kernel/hw_breakpoint.c
Normal file
@ -0,0 +1,410 @@
|
||||
/* hw_breakpoint.c COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
#include <ihk/debug.h>
|
||||
#include <cputype.h>
|
||||
#include <errno.h>
|
||||
#include <elfcore.h>
|
||||
#include <ptrace.h>
|
||||
#include <hw_breakpoint.h>
|
||||
#include <arch-memory.h>
|
||||
#include <signal.h>
|
||||
#include <process.h>
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::core_num_[brps|wrps] */
|
||||
/* Number of BRP/WRP registers on this CPU. */
|
||||
int core_num_brps;
|
||||
int core_num_wrps;
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::get_num_brps */
|
||||
/* Determine number of BRP registers available. */
|
||||
int get_num_brps(void)
|
||||
{
|
||||
return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::get_num_wrps */
|
||||
/* Determine number of WRP registers available. */
|
||||
int get_num_wrps(void)
|
||||
{
|
||||
return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::hw_breakpoint_slots */
|
||||
int hw_breakpoint_slots(int type)
|
||||
{
|
||||
/*
|
||||
* We can be called early, so don't rely on
|
||||
* our static variables being initialised.
|
||||
*/
|
||||
switch (type) {
|
||||
case TYPE_INST:
|
||||
return get_num_brps();
|
||||
case TYPE_DATA:
|
||||
return get_num_wrps();
|
||||
default:
|
||||
kprintf("unknown slot type: %d\n", type);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::READ_WB_REG_CASE */
|
||||
#define READ_WB_REG_CASE(OFF, N, REG, VAL) \
|
||||
case (OFF + N): \
|
||||
AARCH64_DBG_READ(N, REG, VAL); \
|
||||
break
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::READ_WB_REG_CASE */
|
||||
#define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \
|
||||
case (OFF + N): \
|
||||
AARCH64_DBG_WRITE(N, REG, VAL); \
|
||||
break
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::GEN_READ_WB_REG_CASES */
|
||||
#define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \
|
||||
READ_WB_REG_CASE(OFF, 0, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 1, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 2, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 3, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 4, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 5, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 6, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 7, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 8, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 9, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 10, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 11, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 12, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 13, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 14, REG, VAL); \
|
||||
READ_WB_REG_CASE(OFF, 15, REG, VAL)
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::GEN_WRITE_WB_REG_CASES */
|
||||
#define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \
|
||||
WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \
|
||||
WRITE_WB_REG_CASE(OFF, 15, REG, VAL)
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::read_wb_reg */
|
||||
unsigned long read_wb_reg(int reg, int n)
|
||||
{
|
||||
unsigned long val = 0;
|
||||
|
||||
switch (reg + n) {
|
||||
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
|
||||
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
|
||||
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
|
||||
GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
|
||||
default:
|
||||
kprintf("attempt to read from unknown breakpoint register %d\n", n);
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::write_wb_reg */
|
||||
void write_wb_reg(int reg, int n, unsigned long val)
|
||||
{
|
||||
switch (reg + n) {
|
||||
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
|
||||
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
|
||||
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
|
||||
GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
|
||||
default:
|
||||
kprintf("attempt to write to unknown breakpoint register %d\n", n);
|
||||
}
|
||||
isb();
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::hw_breakpoint_reset */
|
||||
void hw_breakpoint_reset(void)
|
||||
{
|
||||
int i = 0;
|
||||
|
||||
/* clear DBGBVR<n>_EL1 and DBGBCR<n>_EL1 (n=0-(core_num_brps-1)) */
|
||||
for (i = 0; i < core_num_brps; i++) {
|
||||
write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
|
||||
write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
|
||||
}
|
||||
|
||||
/* clear DBGWVR<n>_EL1 and DBGWCR<n>_EL1 (n=0-(core_num_wrps-1)) */
|
||||
for (i = 0; i < core_num_wrps; i++) {
|
||||
write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
|
||||
write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
|
||||
}
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::arch_hw_breakpoint_init */
|
||||
void arch_hw_breakpoint_init(void)
|
||||
{
|
||||
struct user_hwdebug_state hws;
|
||||
int max_hws_dbg_regs = sizeof(hws.dbg_regs) / sizeof(hws.dbg_regs[0]);
|
||||
|
||||
core_num_brps = get_num_brps();
|
||||
core_num_wrps = get_num_wrps();
|
||||
|
||||
if (max_hws_dbg_regs < core_num_brps) {
|
||||
kprintf("debugreg struct size is less than Determine number of BRP registers available.\n");
|
||||
core_num_brps = max_hws_dbg_regs;
|
||||
}
|
||||
|
||||
if (max_hws_dbg_regs < core_num_wrps) {
|
||||
kprintf("debugreg struct size is less than Determine number of WRP registers available.\n");
|
||||
core_num_wrps = max_hws_dbg_regs;
|
||||
}
|
||||
hw_breakpoint_reset();
|
||||
}
|
||||
|
||||
struct arch_hw_breakpoint_ctrl {
|
||||
unsigned int __reserved : 19,
|
||||
len : 8,
|
||||
type : 2,
|
||||
privilege : 2,
|
||||
enabled : 1;
|
||||
};
|
||||
|
||||
static inline unsigned int encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
|
||||
{
|
||||
return (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) |
|
||||
ctrl.enabled;
|
||||
}
|
||||
|
||||
static inline void decode_ctrl_reg(unsigned int reg, struct arch_hw_breakpoint_ctrl *ctrl)
|
||||
{
|
||||
ctrl->enabled = reg & 0x1;
|
||||
reg >>= 1;
|
||||
ctrl->privilege = reg & 0x3;
|
||||
reg >>= 2;
|
||||
ctrl->type = reg & 0x3;
|
||||
reg >>= 2;
|
||||
ctrl->len = reg & 0xff;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::arch_bp_generic_fields */
|
||||
/*
|
||||
* Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
|
||||
* Hopefully this will disappear when ptrace can bypass the conversion
|
||||
* to generic breakpoint descriptions.
|
||||
*/
|
||||
int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
|
||||
int *gen_len, int *gen_type)
|
||||
{
|
||||
/* Type */
|
||||
switch (ctrl.type) {
|
||||
case ARM_BREAKPOINT_EXECUTE:
|
||||
*gen_type = HW_BREAKPOINT_X;
|
||||
break;
|
||||
case ARM_BREAKPOINT_LOAD:
|
||||
*gen_type = HW_BREAKPOINT_R;
|
||||
break;
|
||||
case ARM_BREAKPOINT_STORE:
|
||||
*gen_type = HW_BREAKPOINT_W;
|
||||
break;
|
||||
case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
|
||||
*gen_type = HW_BREAKPOINT_RW;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Len */
|
||||
switch (ctrl.len) {
|
||||
case ARM_BREAKPOINT_LEN_1:
|
||||
*gen_len = HW_BREAKPOINT_LEN_1;
|
||||
break;
|
||||
case ARM_BREAKPOINT_LEN_2:
|
||||
*gen_len = HW_BREAKPOINT_LEN_2;
|
||||
break;
|
||||
case ARM_BREAKPOINT_LEN_4:
|
||||
*gen_len = HW_BREAKPOINT_LEN_4;
|
||||
break;
|
||||
case ARM_BREAKPOINT_LEN_8:
|
||||
*gen_len = HW_BREAKPOINT_LEN_8;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::arch_check_bp_in_kernelspace */
|
||||
/*
|
||||
* Check whether bp virtual address is in kernel space.
|
||||
*/
|
||||
int arch_check_bp_in_kernelspace(unsigned long addr, unsigned int len)
|
||||
{
|
||||
return (addr >= USER_END) && ((addr + len - 1) >= USER_END);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::arch_validate_hwbkpt_settings */
|
||||
int arch_validate_hwbkpt_settings(long note_type, struct user_hwdebug_state *hws, size_t len)
|
||||
{
|
||||
int i;
|
||||
unsigned long alignment_mask;
|
||||
size_t cpysize, cpynum;
|
||||
|
||||
switch(note_type) {
|
||||
case NT_ARM_HW_BREAK: /* breakpoint */
|
||||
alignment_mask = 0x3;
|
||||
break;
|
||||
case NT_ARM_HW_WATCH: /* watchpoint */
|
||||
alignment_mask = 0x7;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpysize = len - offsetof(struct user_hwdebug_state, dbg_regs[0]);
|
||||
cpynum = cpysize / sizeof(hws->dbg_regs[0]);
|
||||
|
||||
for (i = 0; i < cpynum; i++) {
|
||||
unsigned long addr = hws->dbg_regs[i].addr;
|
||||
unsigned int uctrl = hws->dbg_regs[i].ctrl;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
int err, len, type;
|
||||
|
||||
/* empty dbg_regs check skip */
|
||||
if (addr == 0 && uctrl == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* check address alignment */
|
||||
if (addr & alignment_mask) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* decode control bit */
|
||||
decode_ctrl_reg(uctrl, &ctrl);
|
||||
|
||||
/* disabled, continue */
|
||||
if (!ctrl.enabled) {
|
||||
continue;
|
||||
}
|
||||
|
||||
err = arch_bp_generic_fields(ctrl, &len, &type);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
/* type check */
|
||||
switch (note_type) {
|
||||
case NT_ARM_HW_BREAK: /* breakpoint */
|
||||
if ((type & HW_BREAKPOINT_X) != type) {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case NT_ARM_HW_WATCH: /* watchpoint */
|
||||
if ((type & HW_BREAKPOINT_RW) != type) {
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* privilege generate */
|
||||
if (arch_check_bp_in_kernelspace(addr, len)) {
|
||||
/* kernel space breakpoint unsupported. */
|
||||
return -EINVAL;
|
||||
} else {
|
||||
ctrl.privilege = AARCH64_BREAKPOINT_EL0;
|
||||
}
|
||||
|
||||
/* ctrl check OK. */
|
||||
hws->dbg_regs[i].ctrl = encode_ctrl_reg(ctrl);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::breakpoint_handler */
|
||||
/*
|
||||
* Debug exception handlers.
|
||||
*/
|
||||
int breakpoint_handler(unsigned long unused, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
int i = 0;
|
||||
unsigned long val;
|
||||
unsigned int ctrl_reg;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
siginfo_t info;
|
||||
|
||||
for (i = 0; i < core_num_brps; i++) {
|
||||
|
||||
/* Check if the breakpoint value matches. */
|
||||
val = read_wb_reg(AARCH64_DBG_REG_BVR, i);
|
||||
if (val != (regs->pc & ~0x3)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Possible match, check the byte address select to confirm. */
|
||||
ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i);
|
||||
decode_ctrl_reg(ctrl_reg, &ctrl);
|
||||
if (!((1 << (regs->pc & 0x3)) & ctrl.len)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* send SIGTRAP */
|
||||
info.si_signo = SIGTRAP;
|
||||
info.si_errno = 0;
|
||||
info.si_code = TRAP_HWBKPT;
|
||||
info._sifields._sigfault.si_addr = (void *)regs->pc;
|
||||
set_signal(SIGTRAP, regs, &info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/kernel/hw_breakpoint.c::watchpoint_handler */
|
||||
int watchpoint_handler(unsigned long addr, unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
int i = 0;
|
||||
int access;
|
||||
unsigned long val;
|
||||
unsigned int ctrl_reg;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
siginfo_t info;
|
||||
|
||||
for (i = 0; i < core_num_wrps; i++) {
|
||||
/* Check if the watchpoint value matches. */
|
||||
val = read_wb_reg(AARCH64_DBG_REG_WVR, i);
|
||||
if (val != (addr & ~0x7)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Possible match, check the byte address select to confirm. */
|
||||
ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i);
|
||||
decode_ctrl_reg(ctrl_reg, &ctrl);
|
||||
if (!((1 << (addr & 0x7)) & ctrl.len)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check that the access type matches.
|
||||
* 0 => load, otherwise => store
|
||||
*/
|
||||
access = (esr & AARCH64_ESR_ACCESS_MASK) ? ARM_BREAKPOINT_STORE :
|
||||
ARM_BREAKPOINT_LOAD;
|
||||
if (!(access & ctrl.type)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
/* send SIGTRAP */
|
||||
info.si_signo = SIGTRAP;
|
||||
info.si_errno = 0;
|
||||
info.si_code = TRAP_HWBKPT;
|
||||
info._sifields._sigfault.si_addr = (void *)addr;
|
||||
set_signal(SIGTRAP, regs, &info);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
58
arch/arm64/kernel/hyp-stub.S
Normal file
58
arch/arm64/kernel/hyp-stub.S
Normal file
@ -0,0 +1,58 @@
|
||||
/* hyp-stub.S COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#include <linkage.h>
|
||||
#include <assembler.h>
|
||||
|
||||
.text
|
||||
.align 11
|
||||
|
||||
ENTRY(__hyp_stub_vectors)
|
||||
ventry el2_sync_invalid // Synchronous EL2t
|
||||
ventry el2_irq_invalid // IRQ EL2t
|
||||
ventry el2_fiq_invalid // FIQ EL2t
|
||||
ventry el2_error_invalid // Error EL2t
|
||||
|
||||
ventry el2_sync_invalid // Synchronous EL2h
|
||||
ventry el2_irq_invalid // IRQ EL2h
|
||||
ventry el2_fiq_invalid // FIQ EL2h
|
||||
ventry el2_error_invalid // Error EL2h
|
||||
|
||||
ventry el1_sync // Synchronous 64-bit EL1
|
||||
ventry el1_irq_invalid // IRQ 64-bit EL1
|
||||
ventry el1_fiq_invalid // FIQ 64-bit EL1
|
||||
ventry el1_error_invalid // Error 64-bit EL1
|
||||
|
||||
ventry el1_sync_invalid // Synchronous 32-bit EL1
|
||||
ventry el1_irq_invalid // IRQ 32-bit EL1
|
||||
ventry el1_fiq_invalid // FIQ 32-bit EL1
|
||||
ventry el1_error_invalid // Error 32-bit EL1
|
||||
ENDPROC(__hyp_stub_vectors)
|
||||
|
||||
.align 11
|
||||
|
||||
el1_sync:
|
||||
mrs x1, esr_el2
|
||||
lsr x1, x1, #26
|
||||
cmp x1, #0x16
|
||||
b.ne 2f // Not an HVC trap
|
||||
cbz x0, 1f
|
||||
msr vbar_el2, x0 // Set vbar_el2
|
||||
b 2f
|
||||
1: mrs x0, vbar_el2 // Return vbar_el2
|
||||
2: eret
|
||||
ENDPROC(el1_sync)
|
||||
|
||||
.macro invalid_vector label
|
||||
\label:
|
||||
b \label
|
||||
ENDPROC(\label)
|
||||
.endm
|
||||
|
||||
invalid_vector el2_sync_invalid
|
||||
invalid_vector el2_irq_invalid
|
||||
invalid_vector el2_fiq_invalid
|
||||
invalid_vector el2_error_invalid
|
||||
invalid_vector el1_sync_invalid
|
||||
invalid_vector el1_irq_invalid
|
||||
invalid_vector el1_fiq_invalid
|
||||
invalid_vector el1_error_invalid
|
||||
|
||||
131
arch/arm64/kernel/imp-sysreg.c
Normal file
131
arch/arm64/kernel/imp-sysreg.c
Normal file
@ -0,0 +1,131 @@
|
||||
/* imp-sysreg.c COPYRIGHT FUJITSU LIMITED 2018 */
|
||||
#include <sysreg.h>
|
||||
|
||||
/* hpc */
|
||||
ACCESS_REG_FUNC(fj_tag_address_ctrl_el1, IMP_FJ_TAG_ADDRESS_CTRL_EL1);
|
||||
ACCESS_REG_FUNC(pf_ctrl_el1, IMP_PF_CTRL_EL1);
|
||||
ACCESS_REG_FUNC(pf_stream_detect_ctrl_el0, IMP_PF_STREAM_DETECT_CTRL_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_ctrl0_el0, IMP_PF_INJECTION_CTRL0_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_ctrl1_el0, IMP_PF_INJECTION_CTRL1_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_ctrl2_el0, IMP_PF_INJECTION_CTRL2_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_ctrl3_el0, IMP_PF_INJECTION_CTRL3_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_ctrl4_el0, IMP_PF_INJECTION_CTRL4_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_ctrl5_el0, IMP_PF_INJECTION_CTRL5_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_ctrl6_el0, IMP_PF_INJECTION_CTRL6_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_ctrl7_el0, IMP_PF_INJECTION_CTRL7_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_distance0_el0, IMP_PF_INJECTION_DISTANCE0_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_distance1_el0, IMP_PF_INJECTION_DISTANCE1_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_distance2_el0, IMP_PF_INJECTION_DISTANCE2_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_distance3_el0, IMP_PF_INJECTION_DISTANCE3_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_distance4_el0, IMP_PF_INJECTION_DISTANCE4_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_distance5_el0, IMP_PF_INJECTION_DISTANCE5_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_distance6_el0, IMP_PF_INJECTION_DISTANCE6_EL0);
|
||||
ACCESS_REG_FUNC(pf_injection_distance7_el0, IMP_PF_INJECTION_DISTANCE7_EL0);
|
||||
|
||||
static void hpc_prefetch_regs_init(void)
|
||||
{
|
||||
uint64_t reg = 0;
|
||||
|
||||
/* PF_CTRL_EL1 */
|
||||
reg = IMP_PF_CTRL_EL1_EL1AE_ENABLE | IMP_PF_CTRL_EL1_EL0AE_ENABLE;
|
||||
xos_access_pf_ctrl_el1(WRITE_ACCESS, ®);
|
||||
|
||||
/* PF_STREAM_DETECT_CTRL */
|
||||
reg = 0;
|
||||
xos_access_pf_stream_detect_ctrl_el0(WRITE_ACCESS, ®);
|
||||
|
||||
/* PF_INJECTION_CTRL */
|
||||
reg = 0;
|
||||
xos_access_pf_injection_ctrl0_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_ctrl1_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_ctrl2_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_ctrl3_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_ctrl4_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_ctrl5_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_ctrl6_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_ctrl7_el0(WRITE_ACCESS, ®);
|
||||
|
||||
/* PF_INJECTION_DISTANCE */
|
||||
reg = 0;
|
||||
xos_access_pf_injection_distance0_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_distance1_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_distance2_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_distance3_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_distance4_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_distance5_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_distance6_el0(WRITE_ACCESS, ®);
|
||||
xos_access_pf_injection_distance7_el0(WRITE_ACCESS, ®);
|
||||
}
|
||||
|
||||
static void hpc_tag_address_regs_init(void)
|
||||
{
|
||||
uint64_t reg = IMP_FJ_TAG_ADDRESS_CTRL_EL1_TBO0_MASK |
|
||||
IMP_FJ_TAG_ADDRESS_CTRL_EL1_SEC0_MASK |
|
||||
IMP_FJ_TAG_ADDRESS_CTRL_EL1_PFE0_MASK;
|
||||
|
||||
/* FJ_TAG_ADDRESS_CTRL */
|
||||
xos_access_fj_tag_address_ctrl_el1(WRITE_ACCESS, ®);
|
||||
}
|
||||
|
||||
void hpc_registers_init(void)
|
||||
{
|
||||
hpc_prefetch_regs_init();
|
||||
hpc_tag_address_regs_init();
|
||||
}
|
||||
|
||||
/* vhbm */
|
||||
ACCESS_REG_FUNC(barrier_ctrl_el1, IMP_BARRIER_CTRL_EL1);
|
||||
ACCESS_REG_FUNC(barrier_bst_bit_el1, IMP_BARRIER_BST_BIT_EL1);
|
||||
ACCESS_REG_FUNC(barrier_init_sync_bb0_el1, IMP_BARRIER_INIT_SYNC_BB0_EL1);
|
||||
ACCESS_REG_FUNC(barrier_init_sync_bb1_el1, IMP_BARRIER_INIT_SYNC_BB1_EL1);
|
||||
ACCESS_REG_FUNC(barrier_init_sync_bb2_el1, IMP_BARRIER_INIT_SYNC_BB2_EL1);
|
||||
ACCESS_REG_FUNC(barrier_init_sync_bb3_el1, IMP_BARRIER_INIT_SYNC_BB3_EL1);
|
||||
ACCESS_REG_FUNC(barrier_init_sync_bb4_el1, IMP_BARRIER_INIT_SYNC_BB4_EL1);
|
||||
ACCESS_REG_FUNC(barrier_init_sync_bb5_el1, IMP_BARRIER_INIT_SYNC_BB5_EL1);
|
||||
ACCESS_REG_FUNC(barrier_assign_sync_w0_el1, IMP_BARRIER_ASSIGN_SYNC_W0_EL1);
|
||||
ACCESS_REG_FUNC(barrier_assign_sync_w1_el1, IMP_BARRIER_ASSIGN_SYNC_W1_EL1);
|
||||
ACCESS_REG_FUNC(barrier_assign_sync_w2_el1, IMP_BARRIER_ASSIGN_SYNC_W2_EL1);
|
||||
ACCESS_REG_FUNC(barrier_assign_sync_w3_el1, IMP_BARRIER_ASSIGN_SYNC_W3_EL1);
|
||||
|
||||
void vhbm_barrier_registers_init(void)
|
||||
{
|
||||
uint64_t reg = 0;
|
||||
|
||||
reg = IMP_BARRIER_CTRL_EL1_EL1AE_ENABLE |
|
||||
IMP_BARRIER_CTRL_EL1_EL0AE_ENABLE;
|
||||
xos_access_barrier_ctrl_el1(WRITE_ACCESS, ®);
|
||||
|
||||
reg = 0;
|
||||
|
||||
xos_access_barrier_init_sync_bb0_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_init_sync_bb1_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_init_sync_bb2_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_init_sync_bb3_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_init_sync_bb4_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_init_sync_bb5_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_assign_sync_w0_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_assign_sync_w1_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_assign_sync_w2_el1(WRITE_ACCESS, ®);
|
||||
xos_access_barrier_assign_sync_w3_el1(WRITE_ACCESS, ®);
|
||||
}
|
||||
|
||||
/* sccr */
|
||||
ACCESS_REG_FUNC(sccr_ctrl_el1, IMP_SCCR_CTRL_EL1);
|
||||
ACCESS_REG_FUNC(sccr_assign_el1, IMP_SCCR_ASSIGN_EL1);
|
||||
ACCESS_REG_FUNC(sccr_set0_l2_el1, IMP_SCCR_SET0_L2_EL1);
|
||||
ACCESS_REG_FUNC(sccr_l1_el0, IMP_SCCR_L1_EL0);
|
||||
|
||||
void scdrv_registers_init(void)
|
||||
{
|
||||
uint64_t reg = 0;
|
||||
|
||||
reg = IMP_SCCR_CTRL_EL1_EL1AE_MASK;
|
||||
xos_access_sccr_ctrl_el1(WRITE_ACCESS, ®);
|
||||
|
||||
reg = 0;
|
||||
xos_access_sccr_assign_el1(WRITE_ACCESS, ®);
|
||||
xos_access_sccr_l1_el0(WRITE_ACCESS, ®);
|
||||
|
||||
reg = (14UL << IMP_SCCR_SET0_L2_EL1_L2_SEC0_SHIFT);
|
||||
xos_access_sccr_set0_l2_el1(WRITE_ACCESS, ®);
|
||||
}
|
||||
19
arch/arm64/kernel/include/arch-bitops.h
Normal file
19
arch/arm64/kernel/include/arch-bitops.h
Normal file
@ -0,0 +1,19 @@
|
||||
/* arch-bitops.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
#ifndef __HEADER_ARM64_COMMON_BITOPS_H
|
||||
#define __HEADER_ARM64_COMMON_BITOPS_H
|
||||
|
||||
#ifndef INCLUDE_BITOPS_H
|
||||
# error only <bitops.h> can be included directly
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include "bitops-fls.h"
|
||||
#include "bitops-__ffs.h"
|
||||
#include "bitops-ffz.h"
|
||||
#include "bitops-set_bit.h"
|
||||
#include "bitops-clear_bit.h"
|
||||
|
||||
#endif /*__ASSEMBLY__*/
|
||||
#endif /* !__HEADER_ARM64_COMMON_BITOPS_H */
|
||||
|
||||
146
arch/arm64/kernel/include/arch-futex.h
Normal file
146
arch/arm64/kernel/include/arch-futex.h
Normal file
@ -0,0 +1,146 @@
|
||||
/* arch-futex.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ARCH_FUTEX_H
|
||||
#define __HEADER_ARM64_COMMON_ARCH_FUTEX_H
|
||||
|
||||
|
||||
/*
|
||||
* @ref.impl
|
||||
* linux-linaro/arch/arm64/include/asm/futex.h:__futex_atomic_op
|
||||
*/
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
|
||||
asm volatile( \
|
||||
"1: ldxr %w1, %2\n" \
|
||||
insn "\n" \
|
||||
"2: stlxr %w3, %w0, %2\n" \
|
||||
" cbnz %w3, 1b\n" \
|
||||
" dmb ish\n" \
|
||||
"3:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"4: mov %w0, %w5\n" \
|
||||
" b 3b\n" \
|
||||
" .popsection\n" \
|
||||
" .pushsection __ex_table,\"a\"\n" \
|
||||
" .align 3\n" \
|
||||
" .quad 1b, 4b, 2b, 4b\n" \
|
||||
" .popsection\n" \
|
||||
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
|
||||
: "r" (oparg), "Ir" (-EFAULT) \
|
||||
: "memory")
|
||||
|
||||
/*
|
||||
* @ref.impl
|
||||
* linux-linaro/arch/arm64/include/asm/futex.h:futex_atomic_op_inuser
|
||||
*/
|
||||
static inline int futex_atomic_op_inuser(int encoded_op,
|
||||
int __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
int oparg = (encoded_op & 0x00fff000) >> 12;
|
||||
int cmparg = encoded_op & 0xfff;
|
||||
int oldval = 0, ret, tmp;
|
||||
|
||||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
#ifdef __UACCESS__
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
return -EFAULT;
|
||||
#endif
|
||||
|
||||
// pagefault_disable(); /* implies preempt_disable() */
|
||||
|
||||
switch (op) {
|
||||
case FUTEX_OP_SET:
|
||||
__futex_atomic_op("mov %w0, %w4",
|
||||
ret, oldval, uaddr, tmp, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ADD:
|
||||
__futex_atomic_op("add %w0, %w1, %w4",
|
||||
ret, oldval, uaddr, tmp, oparg);
|
||||
break;
|
||||
case FUTEX_OP_OR:
|
||||
__futex_atomic_op("orr %w0, %w1, %w4",
|
||||
ret, oldval, uaddr, tmp, oparg);
|
||||
break;
|
||||
case FUTEX_OP_ANDN:
|
||||
__futex_atomic_op("and %w0, %w1, %w4",
|
||||
ret, oldval, uaddr, tmp, ~oparg);
|
||||
break;
|
||||
case FUTEX_OP_XOR:
|
||||
__futex_atomic_op("eor %w0, %w1, %w4",
|
||||
ret, oldval, uaddr, tmp, oparg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOSYS;
|
||||
}
|
||||
|
||||
// pagefault_enable(); /* subsumes preempt_enable() */
|
||||
|
||||
if (!ret) {
|
||||
switch (cmp) {
|
||||
case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
|
||||
case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
|
||||
case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
|
||||
case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
|
||||
case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
|
||||
case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
|
||||
default: ret = -ENOSYS;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* @ref.impl
|
||||
* linux-linaro/arch/arm64/include/asm/futex.h:futex_atomic_cmpxchg_inatomic
|
||||
* mckernel/kernel/include/futex.h:futex_atomic_cmpxchg_inatomic (x86 depend)
|
||||
*/
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
||||
{
|
||||
int ret = 0;
|
||||
int val, tmp;
|
||||
|
||||
if(uaddr == NULL) {
|
||||
return -EFAULT;
|
||||
}
|
||||
#ifdef __UACCESS__
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) {
|
||||
return -EFAULT;
|
||||
}
|
||||
#endif
|
||||
|
||||
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
|
||||
"1: ldxr %w1, %2\n"
|
||||
" sub %w3, %w1, %w4\n"
|
||||
" cbnz %w3, 3f\n"
|
||||
"2: stlxr %w3, %w5, %2\n"
|
||||
" cbnz %w3, 1b\n"
|
||||
" dmb ish\n"
|
||||
"3:\n"
|
||||
" .pushsection .fixup,\"ax\"\n"
|
||||
"4: mov %w0, %w6\n"
|
||||
" b 3b\n"
|
||||
" .popsection\n"
|
||||
" .pushsection __ex_table,\"a\"\n"
|
||||
" .align 3\n"
|
||||
" .quad 1b, 4b, 2b, 4b\n"
|
||||
" .popsection\n"
|
||||
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
|
||||
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
|
||||
: "memory");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int get_futex_value_locked(uint32_t *dest, uint32_t *from)
|
||||
{
|
||||
|
||||
*dest = *(volatile uint32_t *)from;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_ARCH_FUTEX_H */
|
||||
760
arch/arm64/kernel/include/arch-lock.h
Normal file
760
arch/arm64/kernel/include/arch-lock.h
Normal file
@ -0,0 +1,760 @@
|
||||
/* arch-lock.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ARCH_LOCK_H
|
||||
#define __HEADER_ARM64_COMMON_ARCH_LOCK_H
|
||||
|
||||
#define IHK_STATIC_SPINLOCK_FUNCS
|
||||
|
||||
#include <ihk/cpu.h>
|
||||
#include <ihk/atomic.h>
|
||||
#include "affinity.h"
|
||||
#include <lwk/compiler.h>
|
||||
#include "config.h"
|
||||
|
||||
//#define DEBUG_SPINLOCK
|
||||
//#define DEBUG_MCS_RWLOCK
|
||||
|
||||
#if defined(DEBUG_SPINLOCK) || defined(DEBUG_MCS_RWLOCK)
|
||||
int __kprintf(const char *format, ...);
|
||||
#endif
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock_types.h::TICKET_SHIFT */
|
||||
#define TICKET_SHIFT 16
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock_types.h::arch_spinlock_t */
|
||||
typedef struct {
|
||||
#ifdef __AARCH64EB__
|
||||
uint16_t next;
|
||||
uint16_t owner;
|
||||
#else /* __AARCH64EB__ */
|
||||
uint16_t owner;
|
||||
uint16_t next;
|
||||
#endif /* __AARCH64EB__ */
|
||||
} __attribute__((aligned(4))) ihk_spinlock_t;
|
||||
|
||||
extern void preempt_enable(void);
|
||||
extern void preempt_disable(void);
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock_types.h::__ARCH_SPIN_LOCK_UNLOCKED */
|
||||
#define SPIN_LOCK_UNLOCKED { 0, 0 }
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/barrier.h::__nops */
|
||||
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
|
||||
|
||||
/* @ref.impl ./arch/arm64/include/asm/lse.h::ARM64_LSE_ATOMIC_INSN */
|
||||
/* else defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) */
|
||||
#define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc
|
||||
|
||||
/* initialized spinlock struct */
|
||||
static void ihk_mc_spinlock_init(ihk_spinlock_t *lock)
|
||||
{
|
||||
*lock = (ihk_spinlock_t)SPIN_LOCK_UNLOCKED;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_SPINLOCK
|
||||
#define ihk_mc_spinlock_trylock_noirq(l) { \
|
||||
int rc; \
|
||||
__kprintf("[%d] call ihk_mc_spinlock_trylock_noirq %p %s:%d\n", \
|
||||
ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
rc = __ihk_mc_spinlock_trylock_noirq(l); \
|
||||
__kprintf("[%d] ret ihk_mc_spinlock_trylock_noirq\n", \
|
||||
ihk_mc_get_processor_id()); \
|
||||
rc; \
|
||||
}
|
||||
#else
|
||||
#define ihk_mc_spinlock_trylock_noirq __ihk_mc_spinlock_trylock_noirq
|
||||
#endif
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_trylock */
|
||||
/* spinlock trylock */
|
||||
static int __ihk_mc_spinlock_trylock_noirq(ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned int tmp;
|
||||
ihk_spinlock_t lockval;
|
||||
int success;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %2\n"
|
||||
"1: ldaxr %w0, %2\n"
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbnz %w1, 2f\n"
|
||||
" add %w0, %w0, %3\n"
|
||||
" stxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
"2:",
|
||||
/* LSE atomics */
|
||||
" ldr %w0, %2\n"
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbnz %w1, 1f\n"
|
||||
" add %w1, %w0, %3\n"
|
||||
" casa %w0, %w1, %2\n"
|
||||
" sub %w1, %w1, %3\n"
|
||||
" eor %w1, %w1, %w0\n"
|
||||
"1:")
|
||||
: "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
|
||||
: "I" (1 << TICKET_SHIFT)
|
||||
: "memory");
|
||||
|
||||
success = !tmp;
|
||||
if (!success) {
|
||||
preempt_enable();
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_SPINLOCK
|
||||
#define ihk_mc_spinlock_trylock(l, result) ({ \
|
||||
unsigned long rc; \
|
||||
__kprintf("[%d] call ihk_mc_spinlock_trylock %p %s:%d\n", \
|
||||
ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
rc = __ihk_mc_spinlock_trylock(l, result); \
|
||||
__kprintf("[%d] ret ihk_mc_spinlock_trylock\n", \
|
||||
ihk_mc_get_processor_id()); \
|
||||
rc; \
|
||||
})
|
||||
#else
|
||||
#define ihk_mc_spinlock_trylock __ihk_mc_spinlock_trylock
|
||||
#endif
|
||||
|
||||
/* spinlock trylock & interrupt disable & PSTATE.DAIF save */
|
||||
static unsigned long __ihk_mc_spinlock_trylock(ihk_spinlock_t *lock,
|
||||
int *result)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = cpu_disable_interrupt_save();
|
||||
|
||||
*result = __ihk_mc_spinlock_trylock_noirq(lock);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_SPINLOCK
|
||||
#define ihk_mc_spinlock_lock_noirq(l) { \
|
||||
__kprintf("[%d] call ihk_mc_spinlock_lock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__ihk_mc_spinlock_lock_noirq(l); \
|
||||
__kprintf("[%d] ret ihk_mc_spinlock_lock_noirq\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define ihk_mc_spinlock_lock_noirq __ihk_mc_spinlock_lock_noirq
|
||||
#endif
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_lock */
|
||||
/* spinlock lock */
|
||||
static void __ihk_mc_spinlock_lock_noirq(ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned int tmp;
|
||||
ihk_spinlock_t lockval, newval;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
asm volatile(
|
||||
/* Atomically increment the next ticket. */
|
||||
ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" prfm pstl1strm, %3\n"
|
||||
"1: ldaxr %w0, %3\n"
|
||||
" add %w1, %w0, %w5\n"
|
||||
" stxr %w2, %w1, %3\n"
|
||||
" cbnz %w2, 1b\n",
|
||||
/* LSE atomics */
|
||||
" mov %w2, %w5\n"
|
||||
" ldadda %w2, %w0, %3\n"
|
||||
__nops(3)
|
||||
)
|
||||
|
||||
/* Did we get the lock? */
|
||||
" eor %w1, %w0, %w0, ror #16\n"
|
||||
" cbz %w1, 3f\n"
|
||||
/*
|
||||
* No: spin on the owner. Send a local event to avoid missing an
|
||||
* unlock before the exclusive load.
|
||||
*/
|
||||
" sevl\n"
|
||||
"2: wfe\n"
|
||||
" ldaxrh %w2, %4\n"
|
||||
" eor %w1, %w2, %w0, lsr #16\n"
|
||||
" cbnz %w1, 2b\n"
|
||||
/* We got the lock. Critical section starts here. */
|
||||
"3:"
|
||||
: "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
|
||||
: "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#ifdef DEBUG_SPINLOCK
|
||||
#define ihk_mc_spinlock_lock(l) ({ unsigned long rc;\
|
||||
__kprintf("[%d] call ihk_mc_spinlock_lock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
rc = __ihk_mc_spinlock_lock(l);\
|
||||
__kprintf("[%d] ret ihk_mc_spinlock_lock\n", ihk_mc_get_processor_id()); rc;\
|
||||
})
|
||||
#else
|
||||
#define ihk_mc_spinlock_lock __ihk_mc_spinlock_lock
|
||||
#endif
|
||||
|
||||
/* spinlock lock & interrupt disable & PSTATE.DAIF save */
|
||||
static unsigned long __ihk_mc_spinlock_lock(ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = cpu_disable_interrupt_save();
|
||||
|
||||
__ihk_mc_spinlock_lock_noirq(lock);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_SPINLOCK
|
||||
#define ihk_mc_spinlock_unlock_noirq(l) { \
|
||||
__kprintf("[%d] call ihk_mc_spinlock_unlock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__ihk_mc_spinlock_unlock_noirq(l); \
|
||||
__kprintf("[%d] ret ihk_mc_spinlock_unlock_noirq\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define ihk_mc_spinlock_unlock_noirq __ihk_mc_spinlock_unlock_noirq
|
||||
#endif
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/spinlock.h::arch_spin_unlock */
|
||||
/* spinlock unlock */
|
||||
static void __ihk_mc_spinlock_unlock_noirq(ihk_spinlock_t *lock)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile(ARM64_LSE_ATOMIC_INSN(
|
||||
/* LL/SC */
|
||||
" ldrh %w1, %0\n"
|
||||
" add %w1, %w1, #1\n"
|
||||
" stlrh %w1, %0",
|
||||
/* LSE atomics */
|
||||
" mov %w1, #1\n"
|
||||
" staddlh %w1, %0\n"
|
||||
__nops(1))
|
||||
: "=Q" (lock->owner), "=&r" (tmp)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/* spinlock unlock & restore PSTATE.DAIF */
|
||||
#ifdef DEBUG_SPINLOCK
|
||||
#define ihk_mc_spinlock_unlock(l, f) { \
|
||||
__kprintf("[%d] call ihk_mc_spinlock_unlock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__ihk_mc_spinlock_unlock((l), (f)); \
|
||||
__kprintf("[%d] ret ihk_mc_spinlock_unlock\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define ihk_mc_spinlock_unlock __ihk_mc_spinlock_unlock
|
||||
#endif
|
||||
static void __ihk_mc_spinlock_unlock(ihk_spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__ihk_mc_spinlock_unlock_noirq(lock);
|
||||
|
||||
cpu_restore_interrupt(flags);
|
||||
}
|
||||
|
||||
#define SPINLOCK_IN_MCS_RWLOCK
|
||||
|
||||
// reader/writer lock
|
||||
typedef struct mcs_rwlock_node {
|
||||
ihk_atomic_t count; // num of readers (use only common reader)
|
||||
char type; // lock type
|
||||
#define MCS_RWLOCK_TYPE_COMMON_READER 0
|
||||
#define MCS_RWLOCK_TYPE_READER 1
|
||||
#define MCS_RWLOCK_TYPE_WRITER 2
|
||||
char locked; // lock
|
||||
#define MCS_RWLOCK_LOCKED 1
|
||||
#define MCS_RWLOCK_UNLOCKED 0
|
||||
char dmy1; // unused
|
||||
char dmy2; // unused
|
||||
struct mcs_rwlock_node *next;
|
||||
#ifndef ENABLE_UBSAN
|
||||
} __aligned(64) mcs_rwlock_node_t;
|
||||
#else
|
||||
} mcs_rwlock_node_t;
|
||||
#endif
|
||||
|
||||
typedef struct mcs_rwlock_node_irqsave {
|
||||
#ifndef SPINLOCK_IN_MCS_RWLOCK
|
||||
struct mcs_rwlock_node node;
|
||||
#endif
|
||||
unsigned long irqsave;
|
||||
#ifndef ENABLE_UBSAN
|
||||
} __aligned(64) mcs_rwlock_node_irqsave_t;
|
||||
#else
|
||||
} mcs_rwlock_node_irqsave_t;
|
||||
#endif
|
||||
|
||||
typedef struct mcs_rwlock_lock {
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_spinlock_t slock;
|
||||
#else
|
||||
struct mcs_rwlock_node reader; /* common reader lock */
|
||||
struct mcs_rwlock_node *node; /* base */
|
||||
#endif
|
||||
#ifndef ENABLE_UBSAN
|
||||
} __aligned(64) mcs_rwlock_lock_t;
|
||||
#else
|
||||
} mcs_rwlock_lock_t;
|
||||
#endif
|
||||
|
||||
static void
|
||||
mcs_rwlock_init(struct mcs_rwlock_lock *lock)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_init(&lock->slock);
|
||||
#else
|
||||
ihk_atomic_set(&lock->reader.count, 0);
|
||||
lock->reader.type = MCS_RWLOCK_TYPE_COMMON_READER;
|
||||
lock->node = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_writer_lock_noirq(l, n) { \
|
||||
__kprintf("[%d] call mcs_rwlock_writer_lock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__mcs_rwlock_writer_lock_noirq((l), (n)); \
|
||||
__kprintf("[%d] ret mcs_rwlock_writer_lock_noirq\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define mcs_rwlock_writer_lock_noirq __mcs_rwlock_writer_lock_noirq
|
||||
#endif
|
||||
static void
|
||||
__mcs_rwlock_writer_lock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_lock_noirq(&lock->slock);
|
||||
#else
|
||||
struct mcs_rwlock_node *pred;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
node->type = MCS_RWLOCK_TYPE_WRITER;
|
||||
node->next = NULL;
|
||||
|
||||
pred = xchg8(&(lock->node), node);
|
||||
|
||||
if (pred) {
|
||||
node->locked = MCS_RWLOCK_LOCKED;
|
||||
pred->next = node;
|
||||
while (node->locked != MCS_RWLOCK_UNLOCKED) {
|
||||
cpu_pause();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef SPINLOCK_IN_MCS_RWLOCK
|
||||
static void
|
||||
mcs_rwlock_unlock_readers(struct mcs_rwlock_lock *lock)
|
||||
{
|
||||
struct mcs_rwlock_node *p;
|
||||
struct mcs_rwlock_node *f = NULL;
|
||||
struct mcs_rwlock_node *n;
|
||||
int breakf = 0;
|
||||
|
||||
ihk_atomic_inc(&lock->reader.count); // protect to unlock reader
|
||||
for(p = &lock->reader; p->next; p = n){
|
||||
n = p->next;
|
||||
if(p->next->type == MCS_RWLOCK_TYPE_READER){
|
||||
p->next = n->next;
|
||||
if(lock->node == n){
|
||||
struct mcs_rwlock_node *old;
|
||||
|
||||
old = atomic_cmpxchg8(&(lock->node), n, p);
|
||||
|
||||
if(old != n){ // couldn't change
|
||||
while (n->next == NULL) {
|
||||
cpu_pause();
|
||||
}
|
||||
p->next = n->next;
|
||||
}
|
||||
else{
|
||||
breakf = 1;
|
||||
}
|
||||
}
|
||||
else if(p->next == NULL){
|
||||
while (n->next == NULL) {
|
||||
cpu_pause();
|
||||
}
|
||||
p->next = n->next;
|
||||
}
|
||||
if(f){
|
||||
ihk_atomic_inc(&lock->reader.count);
|
||||
n->locked = MCS_RWLOCK_UNLOCKED;
|
||||
}
|
||||
else
|
||||
f = n;
|
||||
n = p;
|
||||
if(breakf)
|
||||
break;
|
||||
}
|
||||
if(n->next == NULL && lock->node != n){
|
||||
while (n->next == NULL && lock->node != n) {
|
||||
cpu_pause();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
f->locked = MCS_RWLOCK_UNLOCKED;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_writer_unlock_noirq(l, n) { \
|
||||
__kprintf("[%d] call mcs_rwlock_writer_unlock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__mcs_rwlock_writer_unlock_noirq((l), (n)); \
|
||||
__kprintf("[%d] ret mcs_rwlock_writer_unlock_noirq\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define mcs_rwlock_writer_unlock_noirq __mcs_rwlock_writer_unlock_noirq
|
||||
#endif
|
||||
static void
|
||||
__mcs_rwlock_writer_unlock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_unlock_noirq(&lock->slock);
|
||||
#else
|
||||
if (node->next == NULL) {
|
||||
struct mcs_rwlock_node *old = atomic_cmpxchg8(&(lock->node), node, 0);
|
||||
|
||||
if (old == node) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (node->next == NULL) {
|
||||
cpu_pause();
|
||||
}
|
||||
}
|
||||
|
||||
if(node->next->type == MCS_RWLOCK_TYPE_READER){
|
||||
lock->reader.next = node->next;
|
||||
mcs_rwlock_unlock_readers(lock);
|
||||
}
|
||||
else{
|
||||
node->next->locked = MCS_RWLOCK_UNLOCKED;
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_reader_lock_noirq(l, n) { \
|
||||
__kprintf("[%d] call mcs_rwlock_reader_lock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__mcs_rwlock_reader_lock_noirq((l), (n)); \
|
||||
__kprintf("[%d] ret mcs_rwlock_reader_lock_noirq\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define mcs_rwlock_reader_lock_noirq __mcs_rwlock_reader_lock_noirq
|
||||
#endif
|
||||
|
||||
static inline unsigned int
|
||||
atomic_inc_ifnot0(ihk_atomic_t *v)
|
||||
{
|
||||
unsigned int *p = (unsigned int *)(&(v)->counter);
|
||||
unsigned int old;
|
||||
unsigned int new;
|
||||
unsigned int val;
|
||||
|
||||
do{
|
||||
if(!(old = *p))
|
||||
break;
|
||||
new = old + 1;
|
||||
val = atomic_cmpxchg4(p, old, new);
|
||||
}while(val != old);
|
||||
return old;
|
||||
}
|
||||
|
||||
static void
|
||||
__mcs_rwlock_reader_lock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_lock_noirq(&lock->slock);
|
||||
#else
|
||||
struct mcs_rwlock_node *pred;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
node->type = MCS_RWLOCK_TYPE_READER;
|
||||
node->next = NULL;
|
||||
node->dmy1 = ihk_mc_get_processor_id();
|
||||
|
||||
pred = xchg8(&(lock->node), node);
|
||||
|
||||
if (pred) {
|
||||
if(pred == &lock->reader){
|
||||
if(atomic_inc_ifnot0(&pred->count)){
|
||||
struct mcs_rwlock_node *old;
|
||||
|
||||
old = atomic_cmpxchg8(&(lock->node), node, pred);
|
||||
|
||||
if (old == node) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (node->next == NULL) {
|
||||
cpu_pause();
|
||||
}
|
||||
|
||||
node->locked = MCS_RWLOCK_LOCKED;
|
||||
lock->reader.next = node;
|
||||
mcs_rwlock_unlock_readers(lock);
|
||||
ihk_atomic_dec(&pred->count);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
node->locked = MCS_RWLOCK_LOCKED;
|
||||
pred->next = node;
|
||||
while (node->locked != MCS_RWLOCK_UNLOCKED) {
|
||||
cpu_pause();
|
||||
}
|
||||
}
|
||||
else {
|
||||
lock->reader.next = node;
|
||||
mcs_rwlock_unlock_readers(lock);
|
||||
}
|
||||
out:
|
||||
return;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_reader_unlock_noirq(l, n) { \
|
||||
__kprintf("[%d] call mcs_rwlock_reader_unlock_noirq %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__mcs_rwlock_reader_unlock_noirq((l), (n)); \
|
||||
__kprintf("[%d] ret mcs_rwlock_reader_unlock_noirq\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define mcs_rwlock_reader_unlock_noirq __mcs_rwlock_reader_unlock_noirq
|
||||
#endif
|
||||
static void
|
||||
__mcs_rwlock_reader_unlock_noirq(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_unlock_noirq(&lock->slock);
|
||||
#else
|
||||
if(ihk_atomic_dec_return(&lock->reader.count))
|
||||
goto out;
|
||||
|
||||
if (lock->reader.next == NULL) {
|
||||
struct mcs_rwlock_node *old;
|
||||
|
||||
old = atomic_cmpxchg8(&(lock->node), &(lock->reader), 0);
|
||||
|
||||
if (old == &lock->reader) {
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (lock->reader.next == NULL) {
|
||||
cpu_pause();
|
||||
}
|
||||
}
|
||||
|
||||
if(lock->reader.next->type == MCS_RWLOCK_TYPE_READER){
|
||||
mcs_rwlock_unlock_readers(lock);
|
||||
}
|
||||
else{
|
||||
lock->reader.next->locked = MCS_RWLOCK_UNLOCKED;
|
||||
}
|
||||
|
||||
out:
|
||||
preempt_enable();
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_writer_lock(l, n) { \
|
||||
__kprintf("[%d] call mcs_rwlock_writer_lock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__mcs_rwlock_writer_lock((l), (n)); \
|
||||
__kprintf("[%d] ret mcs_rwlock_writer_lock\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define mcs_rwlock_writer_lock __mcs_rwlock_writer_lock
|
||||
#endif
|
||||
static void
|
||||
__mcs_rwlock_writer_lock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
node->irqsave = ihk_mc_spinlock_lock(&lock->slock);
|
||||
#else
|
||||
node->irqsave = cpu_disable_interrupt_save();
|
||||
__mcs_rwlock_writer_lock_noirq(lock, &node->node);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_writer_unlock(l, n) { \
|
||||
__kprintf("[%d] call mcs_rwlock_writer_unlock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__mcs_rwlock_writer_unlock((l), (n)); \
|
||||
__kprintf("[%d] ret mcs_rwlock_writer_unlock\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define mcs_rwlock_writer_unlock __mcs_rwlock_writer_unlock
|
||||
#endif
|
||||
static void
|
||||
__mcs_rwlock_writer_unlock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_unlock(&lock->slock, node->irqsave);
|
||||
#else
|
||||
__mcs_rwlock_writer_unlock_noirq(lock, &node->node);
|
||||
cpu_restore_interrupt(node->irqsave);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_reader_lock(l, n) { \
|
||||
__kprintf("[%d] call mcs_rwlock_reader_lock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__mcs_rwlock_reader_lock((l), (n)); \
|
||||
__kprintf("[%d] ret mcs_rwlock_reader_lock\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define mcs_rwlock_reader_lock __mcs_rwlock_reader_lock
|
||||
#endif
|
||||
static void
|
||||
__mcs_rwlock_reader_lock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
node->irqsave = ihk_mc_spinlock_lock(&lock->slock);
|
||||
#else
|
||||
node->irqsave = cpu_disable_interrupt_save();
|
||||
__mcs_rwlock_reader_lock_noirq(lock, &node->node);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef DEBUG_MCS_RWLOCK
|
||||
#define mcs_rwlock_reader_unlock(l, n) { \
|
||||
__kprintf("[%d] call mcs_rwlock_reader_unlock %p %s:%d\n", ihk_mc_get_processor_id(), (l), __FILE__, __LINE__); \
|
||||
__mcs_rwlock_reader_unlock((l), (n)); \
|
||||
__kprintf("[%d] ret mcs_rwlock_reader_unlock\n", ihk_mc_get_processor_id()); \
|
||||
}
|
||||
#else
|
||||
#define mcs_rwlock_reader_unlock __mcs_rwlock_reader_unlock
|
||||
#endif
|
||||
static void
|
||||
__mcs_rwlock_reader_unlock(struct mcs_rwlock_lock *lock, struct mcs_rwlock_node_irqsave *node)
|
||||
{
|
||||
#ifdef SPINLOCK_IN_MCS_RWLOCK
|
||||
ihk_mc_spinlock_unlock(&lock->slock, node->irqsave);
|
||||
#else
|
||||
__mcs_rwlock_reader_unlock_noirq(lock, &node->node);
|
||||
cpu_restore_interrupt(node->irqsave);
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
#include <arm-gic-v3.h>
|
||||
static inline int irqflags_can_interrupt(unsigned long flags)
|
||||
{
|
||||
return (flags == ICC_PMR_EL1_UNMASKED);
|
||||
}
|
||||
#else /* CONFIG_HAS_NMI */
|
||||
static inline int irqflags_can_interrupt(unsigned long flags)
|
||||
{
|
||||
return !(flags & 0x2);
|
||||
}
|
||||
#endif /* CONFIG_HAS_NMI */
|
||||
|
||||
struct ihk_rwlock {
|
||||
unsigned int lock;
|
||||
};
|
||||
|
||||
static inline void ihk_mc_rwlock_init(struct ihk_rwlock *rw)
|
||||
{
|
||||
rw->lock = 0;
|
||||
}
|
||||
|
||||
static inline void ihk_mc_read_lock(struct ihk_rwlock *rw)
|
||||
{
|
||||
unsigned int tmp, tmp2;
|
||||
|
||||
asm volatile(
|
||||
" sevl\n"
|
||||
"1: wfe\n"
|
||||
"2: ldaxr %w0, %2\n"
|
||||
" add %w0, %w0, #1\n"
|
||||
" tbnz %w0, #31, 1b\n"
|
||||
" stxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 2b\n"
|
||||
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
|
||||
:
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
static inline int ihk_mc_read_trylock(struct ihk_rwlock *rw)
|
||||
{
|
||||
unsigned int tmp, tmp2 = 1;
|
||||
|
||||
asm volatile(
|
||||
" ldaxr %w0, %2\n"
|
||||
" add %w0, %w0, #1\n"
|
||||
" tbnz %w0, #31, 1f\n"
|
||||
" stxr %w1, %w0, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
|
||||
:
|
||||
: "cc", "memory");
|
||||
|
||||
return !tmp2;
|
||||
}
|
||||
|
||||
static inline void ihk_mc_read_unlock(struct ihk_rwlock *rw)
|
||||
{
|
||||
unsigned int tmp, tmp2;
|
||||
|
||||
asm volatile(
|
||||
"1: ldxr %w0, %2\n"
|
||||
" sub %w0, %w0, #1\n"
|
||||
" stlxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
|
||||
:
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
static inline void ihk_mc_write_lock(struct ihk_rwlock *rw)
|
||||
{
|
||||
unsigned int tmp;
|
||||
|
||||
asm volatile(
|
||||
" sevl\n"
|
||||
"1: wfe\n"
|
||||
"2: ldaxr %w0, %1\n"
|
||||
" cbnz %w0, 1b\n"
|
||||
" stxr %w0, %w2, %1\n"
|
||||
" cbnz %w0, 2b\n"
|
||||
: "=&r" (tmp), "+Q" (rw->lock)
|
||||
: "r" (0x80000000)
|
||||
: "cc", "memory");
|
||||
}
|
||||
|
||||
static inline int ihk_mc_write_trylock(struct ihk_rwlock *rw)
|
||||
{
|
||||
unsigned int tmp;
|
||||
|
||||
asm volatile(
|
||||
" ldaxr %w0, %1\n"
|
||||
" cbnz %w0, 1f\n"
|
||||
" stxr %w0, %w2, %1\n"
|
||||
"1:\n"
|
||||
: "=&r" (tmp), "+Q" (rw->lock)
|
||||
: "r" (0x80000000)
|
||||
: "cc", "memory");
|
||||
|
||||
return !tmp;
|
||||
}
|
||||
|
||||
static inline void ihk_mc_write_unlock(struct ihk_rwlock *rw)
|
||||
{
|
||||
asm volatile(
|
||||
" stlr %w1, %0\n"
|
||||
: "=Q" (rw->lock) : "r" (0) : "memory");
|
||||
}
|
||||
|
||||
#define ihk_mc_read_can_lock(rw) ((rw)->lock < 0x80000000)
|
||||
#define ihk_mc_write_can_lock(rw) ((rw)->lock == 0)
|
||||
#endif /* !__HEADER_ARM64_COMMON_ARCH_LOCK_H */
|
||||
866
arch/arm64/kernel/include/arch-memory.h
Normal file
866
arch/arm64/kernel/include/arch-memory.h
Normal file
@ -0,0 +1,866 @@
|
||||
/* arch-memory.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ARCH_MEMORY_H
|
||||
#define __HEADER_ARM64_COMMON_ARCH_MEMORY_H
|
||||
|
||||
#include <const.h>
|
||||
#include <errno.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <list.h>
|
||||
#include <page.h>
|
||||
void panic(const char *);
|
||||
#endif /*__ASSEMBLY__*/
|
||||
|
||||
#define _SZ4KB (1UL<<12)
|
||||
#define _SZ16KB (1UL<<14)
|
||||
#define _SZ64KB (1UL<<16)
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
# define GRANULE_SIZE _SZ64KB
|
||||
# define BLOCK_SHIFT PAGE_SHIFT
|
||||
# define BLOCK_SIZE PAGE_SIZE
|
||||
# define TABLE_SHIFT PMD_SHIFT
|
||||
#else
|
||||
# define GRANULE_SIZE _SZ4KB
|
||||
# define BLOCK_SHIFT SECTION_SHIFT
|
||||
# define BLOCK_SIZE SECTION_SIZE
|
||||
# define TABLE_SHIFT PUD_SHIFT
|
||||
#endif
|
||||
|
||||
#define VA_BITS CONFIG_ARM64_VA_BITS
|
||||
|
||||
/*
|
||||
* Address define
|
||||
*/
|
||||
/* early alloc area address */
|
||||
/* START:_end, SIZE:512 pages */
|
||||
#define MAP_EARLY_ALLOC_SHIFT 5
|
||||
#define MAP_EARLY_ALLOC_SIZE (UL(1) << (PAGE_SHIFT + MAP_EARLY_ALLOC_SHIFT))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
# define ALIGN_UP(x, align) ALIGN_DOWN((x) + (align) - 1, align)
|
||||
# define ALIGN_DOWN(x, align) ((x) & ~((align) - 1))
|
||||
extern char _end[];
|
||||
# define MAP_EARLY_ALLOC (ALIGN_UP((unsigned long)_end, BLOCK_SIZE))
|
||||
# define MAP_EARLY_ALLOC_END (MAP_EARLY_ALLOC + MAP_EARLY_ALLOC_SIZE)
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/* bootparam area address */
|
||||
/* START:early alloc area end, SIZE:2MiB */
|
||||
#define MAP_BOOT_PARAM_SHIFT 21
|
||||
#define MAP_BOOT_PARAM_SIZE (UL(1) << MAP_BOOT_PARAM_SHIFT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
# define MAP_BOOT_PARAM (ALIGN_UP(MAP_EARLY_ALLOC_END, BLOCK_SIZE))
|
||||
# define MAP_BOOT_PARAM_END (MAP_BOOT_PARAM + MAP_BOOT_PARAM_SIZE)
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* MAP_KERNEL_START is HOST MODULES_END - 8MiB.
|
||||
* It's defined by cmake.
|
||||
*/
|
||||
#if (VA_BITS == 39 && GRANULE_SIZE == _SZ4KB) /* ARM64_MEMORY_LAYOUT=1 */
|
||||
#
|
||||
# define LD_TASK_UNMAPPED_BASE UL(0x0000000400000000)
|
||||
# define TASK_UNMAPPED_BASE UL(0x0000000800000000)
|
||||
# define USER_END UL(0x0000002000000000)
|
||||
# define MAP_VMAP_START UL(0xffffffbdc0000000)
|
||||
# define MAP_VMAP_SIZE UL(0x0000000100000000)
|
||||
# define MAP_FIXED_START UL(0xffffffbffbdfd000)
|
||||
# define MAP_ST_START UL(0xffffffc000000000)
|
||||
#
|
||||
#elif (VA_BITS == 42 && GRANULE_SIZE == _SZ64KB) /* ARM64_MEMORY_LAYOUT=3 */
|
||||
#
|
||||
# define LD_TASK_UNMAPPED_BASE UL(0x0000002000000000)
|
||||
# define TASK_UNMAPPED_BASE UL(0x0000004000000000)
|
||||
# define USER_END UL(0x0000010000000000)
|
||||
# define MAP_VMAP_START UL(0xfffffdfee0000000)
|
||||
# define MAP_VMAP_SIZE UL(0x0000000100000000)
|
||||
# define MAP_FIXED_START UL(0xfffffdfffbdd0000)
|
||||
# define MAP_ST_START UL(0xfffffe0000000000)
|
||||
#
|
||||
#elif (VA_BITS == 48 && GRANULE_SIZE == _SZ4KB) /* ARM64_MEMORY_LAYOUT=2 */
|
||||
#
|
||||
# define LD_TASK_UNMAPPED_BASE UL(0x0000080000000000)
|
||||
# define TASK_UNMAPPED_BASE UL(0x0000100000000000)
|
||||
# define USER_END UL(0x0000400000000000)
|
||||
# define MAP_VMAP_START UL(0xffff7bffc0000000)
|
||||
# define MAP_VMAP_SIZE UL(0x0000000100000000)
|
||||
# define MAP_FIXED_START UL(0xffff7ffffbdfd000)
|
||||
# define MAP_ST_START UL(0xffff800000000000)
|
||||
#
|
||||
#elif (VA_BITS == 48 && GRANULE_SIZE == _SZ64KB) /* ARM64_MEMORY_LAYOUT=4 */
|
||||
#
|
||||
# define LD_TASK_UNMAPPED_BASE UL(0x0000080000000000)
|
||||
# define TASK_UNMAPPED_BASE UL(0x0000100000000000)
|
||||
# define USER_END UL(0x0000400000000000)
|
||||
# define MAP_VMAP_START UL(0xffff780000000000)
|
||||
# define MAP_VMAP_SIZE UL(0x0000000100000000)
|
||||
# define MAP_FIXED_START UL(0xffff7ffffbdd0000)
|
||||
# define MAP_ST_START UL(0xffff800000000000)
|
||||
#
|
||||
#else
|
||||
# error address space is not defined.
|
||||
#endif
|
||||
|
||||
#define MAP_ST_SIZE (MAP_KERNEL_START - MAP_ST_START)
|
||||
#define STACK_TOP(region) ((region)->user_end)
|
||||
|
||||
/*
|
||||
* pagetable define
|
||||
*/
|
||||
#if GRANULE_SIZE == _SZ4KB
|
||||
# define __PTL4_SHIFT 39
|
||||
# define __PTL3_SHIFT 30
|
||||
# define __PTL2_SHIFT 21
|
||||
# define __PTL1_SHIFT 12
|
||||
# define PTL4_INDEX_MASK ((UL(1) << 9) - 1)
|
||||
# define PTL3_INDEX_MASK PTL4_INDEX_MASK
|
||||
# define PTL2_INDEX_MASK PTL3_INDEX_MASK
|
||||
# define PTL1_INDEX_MASK PTL2_INDEX_MASK
|
||||
# define __PTL4_CONT_SHIFT (__PTL4_SHIFT + 0)
|
||||
# define __PTL3_CONT_SHIFT (__PTL3_SHIFT + 4)
|
||||
# define __PTL2_CONT_SHIFT (__PTL2_SHIFT + 4)
|
||||
# define __PTL1_CONT_SHIFT (__PTL1_SHIFT + 4)
|
||||
#elif GRANULE_SIZE == _SZ16KB
|
||||
# define __PTL4_SHIFT 47
|
||||
# define __PTL3_SHIFT 36
|
||||
# define __PTL2_SHIFT 25
|
||||
# define __PTL1_SHIFT 14
|
||||
# define PTL4_INDEX_MASK ((UL(1) << 1) - 1)
|
||||
# define PTL3_INDEX_MASK ((UL(1) << 11) - 1)
|
||||
# define PTL2_INDEX_MASK PTL3_INDEX_MASK
|
||||
# define PTL1_INDEX_MASK PTL2_INDEX_MASK
|
||||
# define __PTL4_CONT_SHIFT (__PTL4_SHIFT + 0)
|
||||
# define __PTL3_CONT_SHIFT (__PTL3_SHIFT + 0)
|
||||
# define __PTL2_CONT_SHIFT (__PTL2_SHIFT + 5)
|
||||
# define __PTL1_CONT_SHIFT (__PTL1_SHIFT + 7)
|
||||
#elif GRANULE_SIZE == _SZ64KB
|
||||
# define __PTL4_SHIFT 55
|
||||
# define __PTL3_SHIFT 42
|
||||
# define __PTL2_SHIFT 29
|
||||
# define __PTL1_SHIFT 16
|
||||
# define PTL4_INDEX_MASK 0
|
||||
# define PTL3_INDEX_MASK ((UL(1) << 6) - 1)
|
||||
# define PTL2_INDEX_MASK ((UL(1) << 13) - 1)
|
||||
# define PTL1_INDEX_MASK PTL2_INDEX_MASK
|
||||
# define __PTL4_CONT_SHIFT (__PTL4_SHIFT + 0)
|
||||
# define __PTL3_CONT_SHIFT (__PTL3_SHIFT + 0)
|
||||
# define __PTL2_CONT_SHIFT (__PTL2_SHIFT + 5)
|
||||
# define __PTL1_CONT_SHIFT (__PTL1_SHIFT + 5)
|
||||
#else
|
||||
# error granule size error.
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern int first_level_block_support;
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
# define __PTL4_SIZE (UL(1) << __PTL4_SHIFT)
|
||||
# define __PTL3_SIZE (UL(1) << __PTL3_SHIFT)
|
||||
# define __PTL2_SIZE (UL(1) << __PTL2_SHIFT)
|
||||
# define __PTL1_SIZE (UL(1) << __PTL1_SHIFT)
|
||||
# define __PTL4_MASK (~(__PTL4_SIZE - 1))
|
||||
# define __PTL3_MASK (~(__PTL3_SIZE - 1))
|
||||
# define __PTL2_MASK (~(__PTL2_SIZE - 1))
|
||||
# define __PTL1_MASK (~(__PTL1_SIZE - 1))
|
||||
|
||||
# define __PTL4_CONT_SIZE (UL(1) << __PTL4_CONT_SHIFT)
|
||||
# define __PTL3_CONT_SIZE (UL(1) << __PTL3_CONT_SHIFT)
|
||||
# define __PTL2_CONT_SIZE (UL(1) << __PTL2_CONT_SHIFT)
|
||||
# define __PTL1_CONT_SIZE (UL(1) << __PTL1_CONT_SHIFT)
|
||||
# define __PTL4_CONT_MASK (~(__PTL4_CONT_SIZE - 1))
|
||||
# define __PTL3_CONT_MASK (~(__PTL3_CONT_SIZE - 1))
|
||||
# define __PTL2_CONT_MASK (~(__PTL2_CONT_SIZE - 1))
|
||||
# define __PTL1_CONT_MASK (~(__PTL1_CONT_SIZE - 1))
|
||||
# define __PTL4_CONT_COUNT (UL(1) << (__PTL4_CONT_SHIFT - __PTL4_SHIFT))
|
||||
# define __PTL3_CONT_COUNT (UL(1) << (__PTL3_CONT_SHIFT - __PTL3_SHIFT))
|
||||
# define __PTL2_CONT_COUNT (UL(1) << (__PTL2_CONT_SHIFT - __PTL2_SHIFT))
|
||||
# define __PTL1_CONT_COUNT (UL(1) << (__PTL1_CONT_SHIFT - __PTL1_SHIFT))
|
||||
|
||||
/* calculate entries */
|
||||
#if (CONFIG_ARM64_PGTABLE_LEVELS > 3) && (VA_BITS > __PTL4_SHIFT)
|
||||
# define __PTL4_ENTRIES (UL(1) << (VA_BITS - __PTL4_SHIFT))
|
||||
# define __PTL3_ENTRIES (UL(1) << (__PTL1_SHIFT - 3))
|
||||
# define __PTL2_ENTRIES (UL(1) << (__PTL1_SHIFT - 3))
|
||||
# define __PTL1_ENTRIES (UL(1) << (__PTL1_SHIFT - 3))
|
||||
#elif (CONFIG_ARM64_PGTABLE_LEVELS > 2) && (VA_BITS > __PTL3_SHIFT)
|
||||
# define __PTL4_ENTRIES 1
|
||||
# define __PTL3_ENTRIES (UL(1) << (VA_BITS - __PTL3_SHIFT))
|
||||
# define __PTL2_ENTRIES (UL(1) << (__PTL1_SHIFT - 3))
|
||||
# define __PTL1_ENTRIES (UL(1) << (__PTL1_SHIFT - 3))
|
||||
#elif (CONFIG_ARM64_PGTABLE_LEVELS > 1) && (VA_BITS > __PTL2_SHIFT)
|
||||
# define __PTL4_ENTRIES 1
|
||||
# define __PTL3_ENTRIES 1
|
||||
# define __PTL2_ENTRIES (UL(1) << (VA_BITS - __PTL2_SHIFT))
|
||||
# define __PTL1_ENTRIES (UL(1) << (__PTL1_SHIFT - 3))
|
||||
#elif VA_BITS > __PTL1_SHIFT
|
||||
# define __PTL4_ENTRIES 1
|
||||
# define __PTL3_ENTRIES 1
|
||||
# define __PTL2_ENTRIES 1
|
||||
# define __PTL1_ENTRIES (UL(1) << (VA_BITS - __PTL1_SHIFT))
|
||||
#else
|
||||
# define __PTL4_ENTRIES 1
|
||||
# define __PTL3_ENTRIES 1
|
||||
# define __PTL2_ENTRIES 1
|
||||
# define __PTL1_ENTRIES 1
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
static const unsigned int PTL4_SHIFT = __PTL4_SHIFT;
|
||||
static const unsigned int PTL3_SHIFT = __PTL3_SHIFT;
|
||||
static const unsigned int PTL2_SHIFT = __PTL2_SHIFT;
|
||||
static const unsigned int PTL1_SHIFT = __PTL1_SHIFT;
|
||||
static const unsigned long PTL4_SIZE = __PTL4_SIZE;
|
||||
static const unsigned long PTL3_SIZE = __PTL3_SIZE;
|
||||
static const unsigned long PTL2_SIZE = __PTL2_SIZE;
|
||||
static const unsigned long PTL1_SIZE = __PTL1_SIZE;
|
||||
static const unsigned long PTL4_MASK = __PTL4_MASK;
|
||||
static const unsigned long PTL3_MASK = __PTL3_MASK;
|
||||
static const unsigned long PTL2_MASK = __PTL2_MASK;
|
||||
static const unsigned long PTL1_MASK = __PTL1_MASK;
|
||||
static const unsigned int PTL4_ENTRIES = __PTL4_ENTRIES;
|
||||
static const unsigned int PTL3_ENTRIES = __PTL3_ENTRIES;
|
||||
static const unsigned int PTL2_ENTRIES = __PTL2_ENTRIES;
|
||||
static const unsigned int PTL1_ENTRIES = __PTL1_ENTRIES;
|
||||
static const unsigned int PTL4_CONT_SHIFT = __PTL4_CONT_SHIFT;
|
||||
static const unsigned int PTL3_CONT_SHIFT = __PTL3_CONT_SHIFT;
|
||||
static const unsigned int PTL2_CONT_SHIFT = __PTL2_CONT_SHIFT;
|
||||
static const unsigned int PTL1_CONT_SHIFT = __PTL1_CONT_SHIFT;
|
||||
static const unsigned long PTL4_CONT_SIZE = __PTL4_CONT_SIZE;
|
||||
static const unsigned long PTL3_CONT_SIZE = __PTL3_CONT_SIZE;
|
||||
static const unsigned long PTL2_CONT_SIZE = __PTL2_CONT_SIZE;
|
||||
static const unsigned long PTL1_CONT_SIZE = __PTL1_CONT_SIZE;
|
||||
static const unsigned long PTL4_CONT_MASK = __PTL4_CONT_MASK;
|
||||
static const unsigned long PTL3_CONT_MASK = __PTL3_CONT_MASK;
|
||||
static const unsigned long PTL2_CONT_MASK = __PTL2_CONT_MASK;
|
||||
static const unsigned long PTL1_CONT_MASK = __PTL1_CONT_MASK;
|
||||
static const unsigned int PTL4_CONT_COUNT = __PTL4_CONT_COUNT;
|
||||
static const unsigned int PTL3_CONT_COUNT = __PTL3_CONT_COUNT;
|
||||
static const unsigned int PTL2_CONT_COUNT = __PTL2_CONT_COUNT;
|
||||
static const unsigned int PTL1_CONT_COUNT = __PTL1_CONT_COUNT;
|
||||
#else
|
||||
# define PTL4_SHIFT __PTL4_SHIFT
|
||||
# define PTL3_SHIFT __PTL3_SHIFT
|
||||
# define PTL2_SHIFT __PTL2_SHIFT
|
||||
# define PTL1_SHIFT __PTL1_SHIFT
|
||||
# define PTL4_SIZE __PTL4_SIZE
|
||||
# define PTL3_SIZE __PTL3_SIZE
|
||||
# define PTL2_SIZE __PTL2_SIZE
|
||||
# define PTL1_SIZE __PTL1_SIZE
|
||||
# define PTL4_MASK __PTL4_MASK
|
||||
# define PTL3_MASK __PTL3_MASK
|
||||
# define PTL2_MASK __PTL2_MASK
|
||||
# define PTL1_MASK __PTL1_MASK
|
||||
# define PTL4_ENTRIES __PTL4_ENTRIES
|
||||
# define PTL3_ENTRIES __PTL3_ENTRIES
|
||||
# define PTL2_ENTRIES __PTL2_ENTRIES
|
||||
# define PTL1_ENTRIES __PTL1_ENTRIES
|
||||
# define PTL4_CONT_SHIFT __PTL4_CONT_SHIFT
|
||||
# define PTL3_CONT_SHIFT __PTL3_CONT_SHIFT
|
||||
# define PTL2_CONT_SHIFT __PTL2_CONT_SHIFT
|
||||
# define PTL1_CONT_SHIFT __PTL1_CONT_SHIFT
|
||||
# define PTL4_CONT_SIZE __PTL4_CONT_SIZE
|
||||
# define PTL3_CONT_SIZE __PTL3_CONT_SIZE
|
||||
# define PTL2_CONT_SIZE __PTL2_CONT_SIZE
|
||||
# define PTL1_CONT_SIZE __PTL1_CONT_SIZE
|
||||
# define PTL4_CONT_MASK __PTL4_CONT_MASK
|
||||
# define PTL3_CONT_MASK __PTL3_CONT_MASK
|
||||
# define PTL2_CONT_MASK __PTL2_CONT_MASK
|
||||
# define PTL1_CONT_MASK __PTL1_CONT_MASK
|
||||
# define PTL4_CONT_COUNT __PTL4_CONT_COUNT
|
||||
# define PTL3_CONT_COUNT __PTL3_CONT_COUNT
|
||||
# define PTL2_CONT_COUNT __PTL2_CONT_COUNT
|
||||
# define PTL1_CONT_COUNT __PTL1_CONT_COUNT
|
||||
#endif/*__ASSEMBLY__*/
|
||||
|
||||
#define __page_size(pgshift) (UL(1) << (pgshift))
|
||||
#define __page_mask(pgsize) (~((pgsize) - 1))
|
||||
#define __page_offset(addr, size) ((unsigned long)(addr) & ((size) - 1))
|
||||
#define __page_align(addr, size) ((unsigned long)(addr) & ~((size) - 1))
|
||||
#define __page_align_up(addr, size) __page_align((unsigned long)(addr) + (size) - 1, size)
|
||||
|
||||
/*
|
||||
* nornal page
|
||||
*/
|
||||
#define PAGE_SHIFT __PTL1_SHIFT
|
||||
#define PAGE_SIZE __page_size(PAGE_SHIFT)
|
||||
#define PAGE_MASK __page_mask(PAGE_SIZE)
|
||||
#define PAGE_P2ALIGN 0
|
||||
#define page_offset(addr) __page_offset(addr, PAGE_SIZE)
|
||||
#define page_align(addr) __page_align(addr, PAGE_SIZE)
|
||||
#define page_align_up(addr) __page_align_up(addr, PAGE_SIZE)
|
||||
|
||||
/*
|
||||
* large page
|
||||
*/
|
||||
#define LARGE_PAGE_SHIFT __PTL2_SHIFT
|
||||
#define LARGE_PAGE_SIZE __page_size(LARGE_PAGE_SHIFT)
|
||||
#define LARGE_PAGE_MASK __page_mask(LARGE_PAGE_SIZE)
|
||||
#define LARGE_PAGE_P2ALIGN (LARGE_PAGE_SHIFT - PAGE_SHIFT)
|
||||
#define large_page_offset(addr) __page_offset(addr, LARGE_PAGE_SIZE)
|
||||
#define large_page_align(addr) __page_align(addr, LARGE_PAGE_SIZE)
|
||||
#define large_page_align_up(addr) __page_align_up(addr, LARGE_PAGE_SIZE)
|
||||
|
||||
/*
|
||||
*
|
||||
*/
|
||||
#define TTBR_ASID_SHIFT 48
|
||||
#define TTBR_ASID_MASK (0xFFFFUL << TTBR_ASID_SHIFT)
|
||||
#define TTBR_BADDR_MASK (~TTBR_ASID_MASK)
|
||||
|
||||
#include "pgtable-hwdef.h"
|
||||
|
||||
#define KERNEL_PHYS_OFFSET
|
||||
|
||||
#define PT_PHYSMASK PHYS_MASK
|
||||
/* We allow user programs to access all the memory (D_Block, D_Page) */
|
||||
#define PFL_KERN_BLK_ATTR PROT_SECT_NORMAL_EXEC
|
||||
#define PFL_KERN_PAGE_ATTR PAGE_KERNEL_EXEC
|
||||
/* for the page table entry that points another page table (D_Table) */
|
||||
#define PFL_PDIR_TBL_ATTR PMD_TYPE_TABLE
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
# define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS)
|
||||
#else
|
||||
# define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS - 1)
|
||||
#endif
|
||||
#define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
|
||||
#define IDMAP_DIR_SIZE (3 * PAGE_SIZE)
|
||||
|
||||
/* [Page level Write Throgh] ページキャッシュ方式 0:ライトバック 1:ライトスルー */
|
||||
#define PFL1_PWT 0 //< DEBUG_ARCH_DEP, devobj.cの直接参照を関数化 (is_pte_pwd)
|
||||
/* [Page level Cache Disable] ページキャッシュ 0:有効 1:無効 */
|
||||
#define PFL1_PCD 0 //< DEBUG_ARCH_DEP, devobj.cの直接参照を関数化 (is_pte_pcd)
|
||||
|
||||
#define PTE_NULL (0)
|
||||
|
||||
#define PTE_FILEOFF PTE_SPECIAL
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
# define USER_STACK_PREPAGE_SIZE PAGE_SIZE
|
||||
# define USER_STACK_PAGE_MASK PAGE_MASK
|
||||
# define USER_STACK_PAGE_P2ALIGN PAGE_P2ALIGN
|
||||
# define USER_STACK_PAGE_SHIFT PAGE_SHIFT
|
||||
#else
|
||||
# define USER_STACK_PREPAGE_SIZE LARGE_PAGE_SIZE
|
||||
# define USER_STACK_PAGE_MASK LARGE_PAGE_MASK
|
||||
# define USER_STACK_PAGE_P2ALIGN LARGE_PAGE_P2ALIGN
|
||||
# define USER_STACK_PAGE_SHIFT LARGE_PAGE_SHIFT
|
||||
#endif
|
||||
|
||||
#define PT_ENTRIES (PAGE_SIZE >> 3)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <ihk/types.h>
|
||||
|
||||
typedef unsigned long pte_t;
|
||||
|
||||
/*
|
||||
* pagemap kernel ABI bits
|
||||
*/
|
||||
#define PM_ENTRY_BYTES sizeof(uint64_t)
|
||||
#define PM_STATUS_BITS 3
|
||||
#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
|
||||
#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
|
||||
#define PM_STATUS(nr) (((nr) << PM_STATUS_OFFSET) & PM_STATUS_MASK)
|
||||
#define PM_PSHIFT_BITS 6
|
||||
#define PM_PSHIFT_OFFSET (PM_STATUS_OFFSET - PM_PSHIFT_BITS)
|
||||
#define PM_PSHIFT_MASK (((1LL << PM_PSHIFT_BITS) - 1) << PM_PSHIFT_OFFSET)
|
||||
#define PM_PSHIFT(x) (((uint64_t) (x) << PM_PSHIFT_OFFSET) & PM_PSHIFT_MASK)
|
||||
#define PM_PFRAME_MASK ((1LL << PM_PSHIFT_OFFSET) - 1)
|
||||
#define PM_PFRAME(x) ((x) & PM_PFRAME_MASK)
|
||||
|
||||
#define PM_PRESENT PM_STATUS(4LL)
|
||||
#define PM_SWAP PM_STATUS(2LL)
|
||||
|
||||
|
||||
/* For easy conversion, it is better to be the same as architecture's ones */
|
||||
enum ihk_mc_pt_attribute {
|
||||
/* ページが物理メモリにロードされているか */
|
||||
PTATTR_ACTIVE = PTE_VALID,
|
||||
/* Read/Writeフラグ */
|
||||
PTATTR_WRITABLE = PTE_RDONLY, //共通定義と意味が反転するので注意
|
||||
/* ユーザ/特権フラグ */
|
||||
PTATTR_USER = PTE_USER | PTE_NG,
|
||||
/* ページの変更を示す */
|
||||
PTATTR_DIRTY = PTE_DIRTY,
|
||||
/* ラージページを示す */
|
||||
PTATTR_LARGEPAGE = PMD_TABLE_BIT, //共通定義と意味が反転するので注意
|
||||
/* remap_file_page フラグ */
|
||||
PTATTR_FILEOFF = PTE_FILEOFF,
|
||||
/* 実行不可フラグ */
|
||||
PTATTR_NO_EXECUTE = PTE_UXN,
|
||||
/* キャッシュ無し */
|
||||
PTATTR_UNCACHABLE = PTE_ATTRINDX(1),
|
||||
/* ユーザ空間向けを示す */
|
||||
PTATTR_FOR_USER = UL(1) << (PHYS_MASK_SHIFT - 1),
|
||||
/* WriteCombine */
|
||||
PTATTR_WRITE_COMBINED = PTE_ATTRINDX(2),
|
||||
/* converted flag */
|
||||
ARCH_PTATTR_FLIPPED = PTE_PROT_NONE,
|
||||
};
|
||||
extern enum ihk_mc_pt_attribute attr_mask;
|
||||
|
||||
static inline int pfn_is_write_combined(uintptr_t pfn)
|
||||
{
|
||||
return ((pfn & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC));
|
||||
}
|
||||
|
||||
//共通部と意味がするビット定義
|
||||
#define attr_flip_bits (PTATTR_WRITABLE | PTATTR_LARGEPAGE)
|
||||
|
||||
static inline int pgsize_to_tbllv(size_t pgsize);
|
||||
static inline int pte_is_type_page(const pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
int ret = 0; //default D_TABLE
|
||||
int level = pgsize_to_tbllv(pgsize);
|
||||
|
||||
switch (level) {
|
||||
case 4:
|
||||
case 3:
|
||||
case 2:
|
||||
// check D_BLOCK
|
||||
ret = ((*ptep & PMD_TYPE_MASK) == PMD_TYPE_SECT);
|
||||
break;
|
||||
case 1:
|
||||
// check D_PAGE
|
||||
ret = ((*ptep & PTE_TYPE_MASK) == PTE_TYPE_PAGE);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int pte_is_null(pte_t *ptep)
|
||||
{
|
||||
return (*ptep == PTE_NULL);
|
||||
}
|
||||
|
||||
static inline int pte_is_present(pte_t *ptep)
|
||||
{
|
||||
return !!(*ptep & PMD_SECT_VALID);
|
||||
}
|
||||
|
||||
static inline int pte_is_writable(pte_t *ptep)
|
||||
{
|
||||
extern int kprintf(const char *format, ...);
|
||||
kprintf("ERROR: %s is not implemented. \n", __func__);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int pte_is_dirty(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
int ret = 0;
|
||||
int do_check = pte_is_type_page(ptep, pgsize);
|
||||
if (do_check) {
|
||||
ret = !!(*ptep & PTE_DIRTY);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int pte_is_fileoff(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
int ret = 0;
|
||||
int do_check = pte_is_type_page(ptep, pgsize);
|
||||
if (do_check) {
|
||||
ret = !!(*ptep & PTE_FILEOFF);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void pte_update_phys(pte_t *ptep, unsigned long phys)
|
||||
{
|
||||
*ptep = (*ptep & ~PT_PHYSMASK) | (phys & PT_PHYSMASK);
|
||||
}
|
||||
|
||||
static inline uintptr_t pte_get_phys(pte_t *ptep)
|
||||
{
|
||||
return (uintptr_t)(*ptep & PT_PHYSMASK);
|
||||
}
|
||||
|
||||
static inline off_t pte_get_off(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
return (off_t)(*ptep & PHYS_MASK);
|
||||
}
|
||||
|
||||
static inline enum ihk_mc_pt_attribute pte_get_attr(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
enum ihk_mc_pt_attribute attr;
|
||||
|
||||
attr = *ptep & attr_mask;
|
||||
attr ^= attr_flip_bits;
|
||||
if ((*ptep & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_DEVICE_nGnRE)) {
|
||||
attr |= PTATTR_UNCACHABLE;
|
||||
} else if ((*ptep & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_NC)) {
|
||||
attr |= PTATTR_WRITE_COMBINED;
|
||||
}
|
||||
if (((pgsize == PTL2_SIZE) || (pgsize == PTL3_SIZE))
|
||||
&& ((*ptep & PMD_TYPE_MASK) == PMD_TYPE_SECT)) {
|
||||
attr |= PTATTR_LARGEPAGE;
|
||||
}
|
||||
|
||||
return attr;
|
||||
}
|
||||
|
||||
static inline void pte_make_null(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
*ptep = PTE_NULL;
|
||||
}
|
||||
|
||||
static inline void pte_make_fileoff(off_t off,
|
||||
enum ihk_mc_pt_attribute ptattr, size_t pgsize, pte_t *ptep)
|
||||
{
|
||||
if (((PTL4_SIZE == pgsize || PTL4_CONT_SIZE == pgsize)
|
||||
&& CONFIG_ARM64_PGTABLE_LEVELS > 3) ||
|
||||
((PTL3_SIZE == pgsize || PTL3_CONT_SIZE == pgsize)
|
||||
&& CONFIG_ARM64_PGTABLE_LEVELS > 2) ||
|
||||
(PTL2_SIZE == pgsize || PTL2_CONT_SIZE == pgsize) ||
|
||||
(PTL1_SIZE == pgsize || PTL1_CONT_SIZE == pgsize)) {
|
||||
*ptep = PTE_FILEOFF | off | PTE_TYPE_PAGE;
|
||||
}
|
||||
}
|
||||
|
||||
#if 0 /* XXX: workaround. cannot use panic() here */
|
||||
static inline void pte_xchg(pte_t *ptep, pte_t *valp)
|
||||
{
|
||||
*valp = xchg(ptep, *valp);
|
||||
}
|
||||
#else
|
||||
#define pte_xchg(p,vp) do { *(vp) = xchg((p), *(vp)); } while (0)
|
||||
#endif
|
||||
|
||||
static inline void pte_clear_dirty(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
int do_clear = pte_is_type_page(ptep, pgsize);
|
||||
if (do_clear) {
|
||||
*ptep = *ptep & ~PTE_DIRTY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void pte_set_dirty(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
int do_set = pte_is_type_page(ptep, pgsize);
|
||||
if (do_set) {
|
||||
*ptep |= PTE_DIRTY;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int pte_is_contiguous(const pte_t *ptep)
|
||||
{
|
||||
return !!(*ptep & PTE_CONT);
|
||||
}
|
||||
|
||||
static inline int pgsize_is_contiguous(size_t pgsize)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if ((pgsize == PTL4_CONT_SIZE && CONFIG_ARM64_PGTABLE_LEVELS > 3) ||
|
||||
(pgsize == PTL3_CONT_SIZE && CONFIG_ARM64_PGTABLE_LEVELS > 2) ||
|
||||
(pgsize == PTL2_CONT_SIZE) ||
|
||||
(pgsize == PTL1_CONT_SIZE)) {
|
||||
ret = 1;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int pgsize_to_tbllv(size_t pgsize)
|
||||
{
|
||||
int level = -EINVAL;
|
||||
|
||||
if ((pgsize == PTL4_CONT_SIZE || pgsize == PTL4_SIZE)
|
||||
&& (CONFIG_ARM64_PGTABLE_LEVELS > 3)) {
|
||||
level = 4;
|
||||
} else if ((pgsize == PTL3_CONT_SIZE || pgsize == PTL3_SIZE)
|
||||
&& (CONFIG_ARM64_PGTABLE_LEVELS > 2)) {
|
||||
level = 3;
|
||||
} else if (pgsize == PTL2_CONT_SIZE || pgsize == PTL2_SIZE) {
|
||||
level = 2;
|
||||
} else if (pgsize == PTL1_CONT_SIZE || pgsize == PTL1_SIZE) {
|
||||
level = 1;
|
||||
}
|
||||
return level;
|
||||
}
|
||||
|
||||
static inline int pgsize_to_pgshift(size_t pgsize)
|
||||
{
|
||||
/* We need to use if instead of switch because
|
||||
* sometimes PTLX_CONT_SIZE == PTLX_SIZE
|
||||
*/
|
||||
if (pgsize == PTL4_CONT_SIZE) {
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
|
||||
return PTL4_CONT_SHIFT;
|
||||
}
|
||||
} else if (pgsize == PTL4_SIZE) {
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
|
||||
return PTL4_SHIFT;
|
||||
}
|
||||
} else if (pgsize == PTL3_CONT_SIZE) {
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
|
||||
return PTL3_CONT_SHIFT;
|
||||
}
|
||||
} else if (pgsize == PTL3_SIZE) {
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
|
||||
return PTL3_SHIFT;
|
||||
}
|
||||
} else if (pgsize == PTL2_CONT_SIZE) {
|
||||
return PTL2_CONT_SHIFT;
|
||||
} else if (pgsize == PTL2_SIZE) {
|
||||
return PTL2_SHIFT;
|
||||
} else if (pgsize == PTL1_CONT_SIZE) {
|
||||
return PTL1_CONT_SHIFT;
|
||||
} else if (pgsize == PTL1_SIZE) {
|
||||
return PTL1_SHIFT;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline size_t tbllv_to_pgsize(int level)
|
||||
{
|
||||
size_t pgsize = 0;
|
||||
|
||||
switch (level) {
|
||||
case 4:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
|
||||
pgsize = PTL4_SIZE;
|
||||
} else {
|
||||
panic("page table level 4 is invalid.");
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
|
||||
pgsize = PTL3_SIZE;
|
||||
} else {
|
||||
panic("page table level 3 is invalid.");
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
pgsize = PTL2_SIZE;
|
||||
break;
|
||||
case 1:
|
||||
pgsize = PTL1_SIZE;
|
||||
break;
|
||||
default:
|
||||
panic("page table level is invalid.");
|
||||
}
|
||||
return pgsize;
|
||||
}
|
||||
|
||||
static inline size_t tbllv_to_contpgsize(int level)
|
||||
{
|
||||
size_t pgsize = 0;
|
||||
|
||||
switch (level) {
|
||||
case 4:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
|
||||
pgsize = PTL4_CONT_SIZE;
|
||||
} else {
|
||||
panic("page table level 4 is invalid.");
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
|
||||
pgsize = PTL3_CONT_SIZE;
|
||||
} else {
|
||||
panic("page table level 3 is invalid.");
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
pgsize = PTL2_CONT_SIZE;
|
||||
break;
|
||||
case 1:
|
||||
pgsize = PTL1_CONT_SIZE;
|
||||
break;
|
||||
default:
|
||||
panic("page table level is invalid.");
|
||||
}
|
||||
return pgsize;
|
||||
}
|
||||
|
||||
static inline int tbllv_to_contpgshift(int level)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (level) {
|
||||
case 4:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
|
||||
ret = PTL4_CONT_SHIFT;
|
||||
} else {
|
||||
panic("page table level 4 is invalid.");
|
||||
}
|
||||
|
||||
break;
|
||||
case 3:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
|
||||
ret = PTL3_CONT_SHIFT;
|
||||
} else {
|
||||
panic("page table level 3 is invalid.");
|
||||
}
|
||||
|
||||
break;
|
||||
case 2:
|
||||
ret = PTL2_CONT_SHIFT;
|
||||
break;
|
||||
case 1:
|
||||
ret = PTL1_CONT_SHIFT;
|
||||
break;
|
||||
default:
|
||||
panic("page table level is invalid.");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline pte_t *get_contiguous_head(pte_t *__ptep, size_t __pgsize)
|
||||
{
|
||||
unsigned long align;
|
||||
int shift = 0;
|
||||
|
||||
switch (pgsize_to_tbllv(__pgsize)) {
|
||||
case 4:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
|
||||
shift = PTL4_CONT_SHIFT - PTL4_SHIFT;
|
||||
} else {
|
||||
panic("page table level 4 is invalid.");
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
|
||||
shift = PTL3_CONT_SHIFT - PTL3_SHIFT;
|
||||
} else {
|
||||
panic("page table level 3 is invalid.");
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
shift = PTL2_CONT_SHIFT - PTL2_SHIFT;
|
||||
break;
|
||||
case 1:
|
||||
shift = PTL1_CONT_SHIFT - PTL1_SHIFT;
|
||||
break;
|
||||
default:
|
||||
panic("page table level is invalid.");
|
||||
}
|
||||
align = sizeof(*__ptep) << shift;
|
||||
return (pte_t *)__page_align(__ptep, align);
|
||||
}
|
||||
|
||||
static inline pte_t *get_contiguous_tail(pte_t *__ptep, size_t __pgsize)
|
||||
{
|
||||
unsigned long align;
|
||||
int shift = 0;
|
||||
|
||||
switch (pgsize_to_tbllv(__pgsize)) {
|
||||
case 4:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 3) {
|
||||
shift = PTL4_CONT_SHIFT - PTL4_SHIFT;
|
||||
} else {
|
||||
panic("page table level 4 is invalid.");
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
if (CONFIG_ARM64_PGTABLE_LEVELS > 2) {
|
||||
shift = PTL3_CONT_SHIFT - PTL3_SHIFT;
|
||||
} else {
|
||||
panic("page table level 3 is invalid.");
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
shift = PTL2_CONT_SHIFT - PTL2_SHIFT;
|
||||
break;
|
||||
case 1:
|
||||
shift = PTL1_CONT_SHIFT - PTL1_SHIFT;
|
||||
break;
|
||||
default:
|
||||
panic("page table level is invalid.");
|
||||
}
|
||||
align = sizeof(*__ptep) << shift;
|
||||
return (pte_t *)__page_align_up(__ptep + 1, align) - 1;
|
||||
}
|
||||
|
||||
static inline int split_contiguous_pages(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
int ret;
|
||||
pte_t *head = get_contiguous_head(ptep, pgsize);
|
||||
pte_t *tail = get_contiguous_tail(ptep, pgsize);
|
||||
pte_t *ptr;
|
||||
|
||||
uintptr_t phys;
|
||||
struct page *page;
|
||||
|
||||
phys = pte_get_phys(head);
|
||||
page = phys_to_page(phys);
|
||||
if (page && (page_is_in_memobj(page)
|
||||
|| page_is_multi_mapped(page))) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
for (ptr = head; ptr <= tail; ptr++) {
|
||||
*ptr &= ~PTE_CONT;
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int page_is_contiguous_head(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
pte_t *ptr = get_contiguous_head(ptep, pgsize);
|
||||
|
||||
return (ptr == ptep);
|
||||
}
|
||||
|
||||
static inline int page_is_contiguous_tail(pte_t *ptep, size_t pgsize)
|
||||
{
|
||||
pte_t *ptr = get_contiguous_tail(ptep, pgsize);
|
||||
|
||||
return (ptr == ptep);
|
||||
}
|
||||
|
||||
/* Return true if PTE doesn't belong to a contiguous PTE group or PTE
|
||||
* is the head of a contiguous PTE group
|
||||
*/
|
||||
static inline int pte_is_head(pte_t *ptep, pte_t *old, size_t cont_size)
|
||||
{
|
||||
if (!pte_is_contiguous(old))
|
||||
return 1;
|
||||
return page_is_contiguous_head(ptep, cont_size);
|
||||
}
|
||||
|
||||
struct page_table;
|
||||
void arch_adjust_allocate_page_size(struct page_table *pt,
|
||||
uintptr_t fault_addr,
|
||||
pte_t *ptep,
|
||||
void **pgaddrp,
|
||||
size_t *pgsizep);
|
||||
void set_pte(pte_t *ppte, unsigned long phys, enum ihk_mc_pt_attribute attr);
|
||||
pte_t *get_pte(struct page_table *pt, void *virt, enum ihk_mc_pt_attribute attr);
|
||||
|
||||
struct page_table *get_init_page_table(void);
|
||||
void *early_alloc_pages(int nr_pages);
|
||||
void *get_last_early_heap(void);
|
||||
void flush_tlb(void);
|
||||
void flush_tlb_single(unsigned long addr);
|
||||
|
||||
void *map_fixed_area(unsigned long phys, unsigned long size, int uncachable);
|
||||
|
||||
void set_address_space_id(struct page_table *pt, int asid);
|
||||
int get_address_space_id(const struct page_table *pt);
|
||||
|
||||
typedef pte_t translation_table_t;
|
||||
void set_translation_table(struct page_table *pt, translation_table_t* tt);
|
||||
translation_table_t* get_translation_table(const struct page_table *pt);
|
||||
translation_table_t* get_translation_table_as_paddr(const struct page_table *pt);
|
||||
|
||||
extern unsigned long ap_trampoline;
|
||||
//#define AP_TRAMPOLINE 0x10000
|
||||
#define AP_TRAMPOLINE_SIZE 0x2000
|
||||
|
||||
/* Local is cachable */
|
||||
#define IHK_IKC_QUEUE_PT_ATTR (PTATTR_NO_EXECUTE | PTATTR_WRITABLE)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_ARCH_MEMORY_H */
|
||||
63
arch/arm64/kernel/include/arch-perfctr.h
Normal file
63
arch/arm64/kernel/include/arch-perfctr.h
Normal file
@ -0,0 +1,63 @@
|
||||
/* arch-perfctr.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
#ifndef __ARCH_PERFCTR_H__
|
||||
#define __ARCH_PERFCTR_H__
|
||||
|
||||
#include <ihk/types.h>
|
||||
#include <ihk/cpu.h>
|
||||
#include <bitops.h>
|
||||
|
||||
struct per_cpu_arm_pmu {
|
||||
int num_events;
|
||||
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
|
||||
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
|
||||
};
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/pmu.h */
|
||||
struct arm_pmu {
|
||||
struct ihk_mc_interrupt_handler* handler;
|
||||
uint32_t (*read_counter)(int);
|
||||
void (*write_counter)(int, uint32_t);
|
||||
void (*reset)(void*);
|
||||
int (*enable_pmu)(void);
|
||||
void (*disable_pmu)(void);
|
||||
int (*enable_counter)(unsigned long counter_mask);
|
||||
int (*disable_counter)(unsigned long counter_mask);
|
||||
int (*enable_intens)(unsigned long counter_mask);
|
||||
int (*disable_intens)(unsigned long counter_mask);
|
||||
int (*set_event_filter)(unsigned long*, int);
|
||||
void (*write_evtype)(int, uint32_t);
|
||||
int (*get_event_idx)(int num_events, unsigned long used_mask,
|
||||
unsigned long config);
|
||||
int (*map_event)(uint32_t, uint64_t);
|
||||
int (*map_hw_event)(uint64_t config);
|
||||
int (*map_cache_event)(uint64_t config);
|
||||
int (*map_raw_event)(uint64_t config);
|
||||
void (*enable_user_access_pmu_regs)(void);
|
||||
void (*disable_user_access_pmu_regs)(void);
|
||||
int (*counter_mask_valid)(unsigned long counter_mask);
|
||||
struct per_cpu_arm_pmu *per_cpu;
|
||||
};
|
||||
|
||||
static inline const struct arm_pmu* get_cpu_pmu(void)
|
||||
{
|
||||
extern struct arm_pmu cpu_pmu;
|
||||
return &cpu_pmu;
|
||||
}
|
||||
|
||||
static inline const struct per_cpu_arm_pmu *get_per_cpu_pmu(void)
|
||||
{
|
||||
const struct arm_pmu *cpu_pmu = get_cpu_pmu();
|
||||
|
||||
return &cpu_pmu->per_cpu[ihk_mc_get_processor_id()];
|
||||
}
|
||||
|
||||
int arm64_init_perfctr(void);
|
||||
void arm64_init_per_cpu_perfctr(void);
|
||||
int arm64_enable_pmu(void);
|
||||
void arm64_disable_pmu(void);
|
||||
int armv8pmu_init(struct arm_pmu* cpu_pmu);
|
||||
void armv8pmu_per_cpu_init(struct per_cpu_arm_pmu *per_cpu);
|
||||
void arm64_enable_user_access_pmu_regs(void);
|
||||
void arm64_disable_user_access_pmu_regs(void);
|
||||
|
||||
#endif
|
||||
13
arch/arm64/kernel/include/arch-string.h
Normal file
13
arch/arm64/kernel/include/arch-string.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* arch-string.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ARCH_STRING_H
|
||||
#define __HEADER_ARM64_COMMON_ARCH_STRING_H
|
||||
|
||||
#define ARCH_FAST_MEMCPY
|
||||
|
||||
extern void *__inline_memcpy(void *to, const void *from, size_t t);
|
||||
|
||||
#define ARCH_FAST_MEMSET
|
||||
|
||||
extern void *__inline_memset(void *s, unsigned long c, size_t count);
|
||||
|
||||
#endif /* __HEADER_ARM64_COMMON_ARCH_TIMER_H */
|
||||
31
arch/arm64/kernel/include/arch-timer.h
Normal file
31
arch/arm64/kernel/include/arch-timer.h
Normal file
@ -0,0 +1,31 @@
|
||||
/* arch-timer.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ARCH_TIMER_H
|
||||
#define __HEADER_ARM64_COMMON_ARCH_TIMER_H
|
||||
|
||||
#include <ihk/cpu.h>
|
||||
|
||||
/* @ref.impl include/clocksource/arm_arch_timer.h */
|
||||
#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */
|
||||
#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */
|
||||
#define ARCH_TIMER_VIRT_EVT_EN (1 << 2)
|
||||
#define ARCH_TIMER_EVT_TRIGGER_SHIFT (4)
|
||||
#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT)
|
||||
#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
|
||||
#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
|
||||
|
||||
/* @ref.impl linux4.10.16 */
|
||||
/* include/clocksource/arm_arch_timer.h */
|
||||
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
|
||||
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
|
||||
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
|
||||
|
||||
enum arch_timer_reg {
|
||||
ARCH_TIMER_REG_CTRL,
|
||||
ARCH_TIMER_REG_TVAL,
|
||||
};
|
||||
|
||||
extern int get_timer_intrid(void);
|
||||
extern void arch_timer_init(void);
|
||||
extern struct ihk_mc_interrupt_handler *get_timer_handler(void);
|
||||
|
||||
#endif /* __HEADER_ARM64_COMMON_ARCH_TIMER_H */
|
||||
7
arch/arm64/kernel/include/arch/auxvec.h
Normal file
7
arch/arm64/kernel/include/arch/auxvec.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* auxvec.h COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
#ifndef __HEADER_ARM64_ARCH_AUXVEC_H
|
||||
#define __HEADER_ARM64_ARCH_AUXVEC_H
|
||||
|
||||
#define AT_SYSINFO_EHDR 33
|
||||
|
||||
#endif /* __HEADER_ARM64_ARCH_AUXVEC_H */
|
||||
107
arch/arm64/kernel/include/arch/cpu.h
Normal file
107
arch/arm64/kernel/include/arch/cpu.h
Normal file
@ -0,0 +1,107 @@
|
||||
/* cpu.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
#ifndef __HEADER_ARM64_ARCH_CPU_H
|
||||
#define __HEADER_ARM64_ARCH_CPU_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define sev() asm volatile("sev" : : : "memory")
|
||||
#define wfe() asm volatile("wfe" : : : "memory")
|
||||
#define wfi() asm volatile("wfi" : : : "memory")
|
||||
|
||||
#define isb() asm volatile("isb" : : : "memory")
|
||||
#define dmb(opt) asm volatile("dmb " #opt : : : "memory")
|
||||
#define dsb(opt) asm volatile("dsb " #opt : : : "memory")
|
||||
|
||||
#include <registers.h>
|
||||
|
||||
#define mb() dsb(sy)
|
||||
#define rmb() dsb(ld)
|
||||
#define wmb() dsb(st)
|
||||
|
||||
#define dma_rmb() dmb(oshld)
|
||||
#define dma_wmb() dmb(oshst)
|
||||
|
||||
//#ifndef CONFIG_SMP
|
||||
//#else
|
||||
#define smp_mb() dmb(ish)
|
||||
#define smp_rmb() dmb(ishld)
|
||||
#define smp_wmb() dmb(ishst)
|
||||
|
||||
#define arch_barrier() smp_mb()
|
||||
|
||||
#define smp_store_release(p, v) \
|
||||
do { \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 4: \
|
||||
asm volatile ("stlr %w1, %0" \
|
||||
: "=Q" (*p) : "r" (v) : "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm volatile ("stlr %1, %0" \
|
||||
: "=Q" (*p) : "r" (v) : "memory"); \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
({ \
|
||||
typeof(*p) ___p1; \
|
||||
compiletime_assert_atomic_type(*p); \
|
||||
switch (sizeof(*p)) { \
|
||||
case 4: \
|
||||
asm volatile ("ldar %w0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
case 8: \
|
||||
asm volatile ("ldar %0, %1" \
|
||||
: "=r" (___p1) : "Q" (*p) : "memory"); \
|
||||
break; \
|
||||
} \
|
||||
___p1; \
|
||||
})
|
||||
//#endif /*CONFIG_SMP*/
|
||||
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
|
||||
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
|
||||
#define nop() asm volatile("nop");
|
||||
|
||||
#define smp_mb__before_atomic() smp_mb()
|
||||
#define smp_mb__after_atomic() smp_mb()
|
||||
|
||||
#define read_tsc() \
|
||||
({ \
|
||||
unsigned long cval; \
|
||||
cval = rdtsc(); \
|
||||
cval; \
|
||||
})
|
||||
|
||||
void init_tod_data(void);
|
||||
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
static inline void cpu_enable_nmi(void)
|
||||
{
|
||||
asm volatile("msr daifclr, #2": : : "memory");
|
||||
}
|
||||
|
||||
static inline void cpu_disable_nmi(void)
|
||||
{
|
||||
asm volatile("msr daifset, #2": : : "memory");
|
||||
}
|
||||
#else/*defined(CONFIG_HAS_NMI)*/
|
||||
static inline void cpu_enable_nmi(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void cpu_disable_nmi(void)
|
||||
{
|
||||
}
|
||||
#endif/*defined(CONFIG_HAS_NMI)*/
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
void arch_flush_icache_all(void);
|
||||
|
||||
#endif /* !__HEADER_ARM64_ARCH_CPU_H */
|
||||
17
arch/arm64/kernel/include/arch/mm.h
Normal file
17
arch/arm64/kernel/include/arch/mm.h
Normal file
@ -0,0 +1,17 @@
|
||||
/* mm.h COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
#ifndef __HEADER_ARM64_ARCH_MM_H
|
||||
#define __HEADER_ARM64_ARCH_MM_H
|
||||
|
||||
struct process_vm;
|
||||
|
||||
static inline void
|
||||
flush_nfo_tlb()
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
flush_nfo_tlb_mm(struct process_vm *vm)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* __HEADER_ARM64_ARCH_MM_H */
|
||||
36
arch/arm64/kernel/include/arch/mman.h
Normal file
36
arch/arm64/kernel/include/arch/mman.h
Normal file
@ -0,0 +1,36 @@
|
||||
/* mman.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
/* @ref.impl linux-linaro/include/uapi/asm-generic/mman.h */
|
||||
|
||||
#ifndef __HEADER_ARM64_ARCH_MMAN_H
|
||||
#define __HEADER_ARM64_ARCH_MMAN_H
|
||||
|
||||
#include <arch-memory.h>
|
||||
|
||||
/*
|
||||
* mapping flags
|
||||
*/
|
||||
#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
|
||||
#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
|
||||
#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
|
||||
#define MAP_LOCKED 0x2000 /* pages are locked */
|
||||
#define MAP_NORESERVE 0x4000 /* don't check for reservations */
|
||||
#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
|
||||
#define MAP_NONBLOCK 0x10000 /* do not block on IO */
|
||||
#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
|
||||
#define MAP_HUGETLB 0x40000 /* create a huge page mapping */
|
||||
|
||||
/* Bits [26:31] are reserved, see mman-common.h for MAP_HUGETLB usage */
|
||||
#define MAP_HUGE_SHIFT 26
|
||||
#define MAP_HUGE_FIRST_BLOCK (__PTL3_SHIFT << MAP_HUGE_SHIFT)
|
||||
#define MAP_HUGE_FIRST_CONT_BLOCK (__PTL3_CONT_SHIFT << MAP_HUGE_SHIFT)
|
||||
#define MAP_HUGE_SECOND_BLOCK (__PTL2_SHIFT << MAP_HUGE_SHIFT)
|
||||
#define MAP_HUGE_SECOND_CONT_BLOCK (__PTL2_CONT_SHIFT << MAP_HUGE_SHIFT)
|
||||
#define MAP_HUGE_THIRD_CONT_BLOCK (__PTL1_CONT_SHIFT << MAP_HUGE_SHIFT)
|
||||
|
||||
/*
|
||||
* for mlockall()
|
||||
*/
|
||||
#define MCL_CURRENT 1 /* lock all current mappings */
|
||||
#define MCL_FUTURE 2 /* lock all future mappings */
|
||||
|
||||
#endif /* __HEADER_ARM64_ARCH_MMAN_H */
|
||||
40
arch/arm64/kernel/include/arch/shm.h
Normal file
40
arch/arm64/kernel/include/arch/shm.h
Normal file
@ -0,0 +1,40 @@
|
||||
/* shm.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
#ifndef __HEADER_ARM64_ARCH_SHM_H
|
||||
#define __HEADER_ARM64_ARCH_SHM_H
|
||||
|
||||
#include <arch-memory.h>
|
||||
|
||||
/* shmflg */
|
||||
#define SHM_HUGE_SHIFT 26
|
||||
#define SHM_HUGE_FIRST_BLOCK (__PTL3_SHIFT << SHM_HUGE_SHIFT)
|
||||
#define SHM_HUGE_FIRST_CONT_BLOCK (__PTL3_CONT_SHIFT << SHM_HUGE_SHIFT)
|
||||
#define SHM_HUGE_SECOND_BLOCK (__PTL2_SHIFT << SHM_HUGE_SHIFT)
|
||||
#define SHM_HUGE_SECOND_CONT_BLOCK (__PTL2_CONT_SHIFT << SHM_HUGE_SHIFT)
|
||||
#define SHM_HUGE_THIRD_CONT_BLOCK (__PTL1_CONT_SHIFT << SHM_HUGE_SHIFT)
|
||||
|
||||
struct ipc_perm {
|
||||
key_t key;
|
||||
uid_t uid;
|
||||
gid_t gid;
|
||||
uid_t cuid;
|
||||
gid_t cgid;
|
||||
uint16_t mode;
|
||||
uint8_t padding[2];
|
||||
uint16_t seq;
|
||||
uint8_t padding2[22];
|
||||
};
|
||||
|
||||
struct shmid_ds {
|
||||
struct ipc_perm shm_perm;
|
||||
size_t shm_segsz;
|
||||
time_t shm_atime;
|
||||
time_t shm_dtime;
|
||||
time_t shm_ctime;
|
||||
pid_t shm_cpid;
|
||||
pid_t shm_lpid;
|
||||
uint64_t shm_nattch;
|
||||
uint8_t padding[12];
|
||||
int init_pgshift;
|
||||
};
|
||||
|
||||
#endif /* __HEADER_ARM64_ARCH_SHM_H */
|
||||
106
arch/arm64/kernel/include/arm-gic-v2.h
Normal file
106
arch/arm64/kernel/include/arm-gic-v2.h
Normal file
@ -0,0 +1,106 @@
|
||||
/* arm-gic-v2.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
/*
|
||||
* include/linux/irqchip/arm-gic.h
|
||||
*
|
||||
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef __LINUX_IRQCHIP_ARM_GIC_H
|
||||
#define __LINUX_IRQCHIP_ARM_GIC_H
|
||||
|
||||
/* check config */
|
||||
#if defined(CONFIG_HAS_NMI) && !defined(CONFIG_ARM_GIC_V3)
|
||||
# error GICv2 is not support NMI
|
||||
#endif
|
||||
|
||||
/* @ref.impl include/linux/irqchip/arm-gic.h */
|
||||
|
||||
#define GIC_CPU_CTRL 0x00
|
||||
#define GIC_CPU_PRIMASK 0x04
|
||||
#define GIC_CPU_BINPOINT 0x08
|
||||
#define GIC_CPU_INTACK 0x0c
|
||||
#define GIC_CPU_EOI 0x10
|
||||
#define GIC_CPU_RUNNINGPRI 0x14
|
||||
#define GIC_CPU_HIGHPRI 0x18
|
||||
#define GIC_CPU_ALIAS_BINPOINT 0x1c
|
||||
#define GIC_CPU_ACTIVEPRIO 0xd0
|
||||
#define GIC_CPU_IDENT 0xfc
|
||||
|
||||
#define GICC_ENABLE 0x1
|
||||
#define GICC_INT_PRI_THRESHOLD 0xf0
|
||||
#define GICC_IAR_INT_ID_MASK 0x3ff
|
||||
#define GICC_INT_SPURIOUS 1023
|
||||
#define GICC_DIS_BYPASS_MASK 0x1e0
|
||||
|
||||
#define GIC_DIST_CTRL 0x000
|
||||
#define GIC_DIST_CTR 0x004
|
||||
#define GIC_DIST_IGROUP 0x080
|
||||
#define GIC_DIST_ENABLE_SET 0x100
|
||||
#define GIC_DIST_ENABLE_CLEAR 0x180
|
||||
#define GIC_DIST_PENDING_SET 0x200
|
||||
#define GIC_DIST_PENDING_CLEAR 0x280
|
||||
#define GIC_DIST_ACTIVE_SET 0x300
|
||||
#define GIC_DIST_ACTIVE_CLEAR 0x380
|
||||
#define GIC_DIST_PRI 0x400
|
||||
#define GIC_DIST_TARGET 0x800
|
||||
#define GIC_DIST_CONFIG 0xc00
|
||||
#define GIC_DIST_SOFTINT 0xf00
|
||||
#define GIC_DIST_SGI_PENDING_CLEAR 0xf10
|
||||
#define GIC_DIST_SGI_PENDING_SET 0xf20
|
||||
|
||||
#define GICD_ENABLE 0x1
|
||||
#define GICD_DISABLE 0x0
|
||||
#define GICD_INT_ACTLOW_LVLTRIG 0x0
|
||||
#define GICD_INT_EN_CLR_X32 0xffffffff
|
||||
#define GICD_INT_EN_SET_SGI 0x0000ffff
|
||||
#define GICD_INT_EN_CLR_PPI 0xffff0000
|
||||
|
||||
#ifdef CONFIG_HAS_NMI
|
||||
#define GICD_INT_NMI_PRI 0x40
|
||||
#define GICD_INT_DEF_PRI 0xc0U
|
||||
#else
|
||||
#define GICD_INT_DEF_PRI 0xa0U
|
||||
#endif
|
||||
#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
|
||||
(GICD_INT_DEF_PRI << 16) |\
|
||||
(GICD_INT_DEF_PRI << 8) |\
|
||||
GICD_INT_DEF_PRI)
|
||||
|
||||
#define GICH_HCR 0x0
|
||||
#define GICH_VTR 0x4
|
||||
#define GICH_VMCR 0x8
|
||||
#define GICH_MISR 0x10
|
||||
#define GICH_EISR0 0x20
|
||||
#define GICH_EISR1 0x24
|
||||
#define GICH_ELRSR0 0x30
|
||||
#define GICH_ELRSR1 0x34
|
||||
#define GICH_APR 0xf0
|
||||
#define GICH_LR0 0x100
|
||||
|
||||
#define GICH_HCR_EN (1 << 0)
|
||||
#define GICH_HCR_UIE (1 << 1)
|
||||
|
||||
#define GICH_LR_VIRTUALID (0x3ff << 0)
|
||||
#define GICH_LR_PHYSID_CPUID_SHIFT (10)
|
||||
#define GICH_LR_PHYSID_CPUID (7 << GICH_LR_PHYSID_CPUID_SHIFT)
|
||||
#define GICH_LR_STATE (3 << 28)
|
||||
#define GICH_LR_PENDING_BIT (1 << 28)
|
||||
#define GICH_LR_ACTIVE_BIT (1 << 29)
|
||||
#define GICH_LR_EOI (1 << 19)
|
||||
|
||||
#define GICH_VMCR_CTRL_SHIFT 0
|
||||
#define GICH_VMCR_CTRL_MASK (0x21f << GICH_VMCR_CTRL_SHIFT)
|
||||
#define GICH_VMCR_PRIMASK_SHIFT 27
|
||||
#define GICH_VMCR_PRIMASK_MASK (0x1f << GICH_VMCR_PRIMASK_SHIFT)
|
||||
#define GICH_VMCR_BINPOINT_SHIFT 21
|
||||
#define GICH_VMCR_BINPOINT_MASK (0x7 << GICH_VMCR_BINPOINT_SHIFT)
|
||||
#define GICH_VMCR_ALIAS_BINPOINT_SHIFT 18
|
||||
#define GICH_VMCR_ALIAS_BINPOINT_MASK (0x7 << GICH_VMCR_ALIAS_BINPOINT_SHIFT)
|
||||
|
||||
#define GICH_MISR_EOI (1 << 0)
|
||||
#define GICH_MISR_U (1 << 1)
|
||||
|
||||
#endif /* __LINUX_IRQCHIP_ARM_GIC_H */
|
||||
385
arch/arm64/kernel/include/arm-gic-v3.h
Normal file
385
arch/arm64/kernel/include/arm-gic-v3.h
Normal file
@ -0,0 +1,385 @@
|
||||
/* arm-gic-v3.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
/*
|
||||
* Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
|
||||
#define __LINUX_IRQCHIP_ARM_GIC_V3_H
|
||||
|
||||
#include <stringify.h>
|
||||
/* @ref.impl include/linux/irqchip/arm-gic-v3.h */
|
||||
#include <sysreg.h>
|
||||
|
||||
/*
|
||||
* Distributor registers. We assume we're running non-secure, with ARE
|
||||
* being set. Secure-only and non-ARE registers are not described.
|
||||
*/
|
||||
#define GICD_CTLR 0x0000
|
||||
#define GICD_TYPER 0x0004
|
||||
#define GICD_IIDR 0x0008
|
||||
#define GICD_STATUSR 0x0010
|
||||
#define GICD_SETSPI_NSR 0x0040
|
||||
#define GICD_CLRSPI_NSR 0x0048
|
||||
#define GICD_SETSPI_SR 0x0050
|
||||
#define GICD_CLRSPI_SR 0x0058
|
||||
#define GICD_SEIR 0x0068
|
||||
#define GICD_IGROUPR 0x0080
|
||||
#define GICD_ISENABLER 0x0100
|
||||
#define GICD_ICENABLER 0x0180
|
||||
#define GICD_ISPENDR 0x0200
|
||||
#define GICD_ICPENDR 0x0280
|
||||
#define GICD_ISACTIVER 0x0300
|
||||
#define GICD_ICACTIVER 0x0380
|
||||
#define GICD_IPRIORITYR 0x0400
|
||||
#define GICD_ICFGR 0x0C00
|
||||
#define GICD_IGRPMODR 0x0D00
|
||||
#define GICD_NSACR 0x0E00
|
||||
#define GICD_IROUTER 0x6000
|
||||
#define GICD_IDREGS 0xFFD0
|
||||
#define GICD_PIDR2 0xFFE8
|
||||
|
||||
/*
|
||||
* Those registers are actually from GICv2, but the spec demands that they
|
||||
* are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3).
|
||||
*/
|
||||
#define GICD_ITARGETSR 0x0800
|
||||
#define GICD_SGIR 0x0F00
|
||||
#define GICD_CPENDSGIR 0x0F10
|
||||
#define GICD_SPENDSGIR 0x0F20
|
||||
|
||||
#define GICD_CTLR_RWP (1U << 31)
|
||||
#define GICD_CTLR_DS (1U << 6)
|
||||
#define GICD_CTLR_ARE_NS (1U << 4)
|
||||
#define GICD_CTLR_ENABLE_G1A (1U << 1)
|
||||
#define GICD_CTLR_ENABLE_G1 (1U << 0)
|
||||
|
||||
/*
|
||||
* In systems with a single security state (what we emulate in KVM)
|
||||
* the meaning of the interrupt group enable bits is slightly different
|
||||
*/
|
||||
#define GICD_CTLR_ENABLE_SS_G1 (1U << 1)
|
||||
#define GICD_CTLR_ENABLE_SS_G0 (1U << 0)
|
||||
|
||||
#define GICD_TYPER_LPIS (1U << 17)
|
||||
#define GICD_TYPER_MBIS (1U << 16)
|
||||
|
||||
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
|
||||
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
|
||||
#define GICD_TYPER_LPIS (1U << 17)
|
||||
|
||||
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
|
||||
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
|
||||
|
||||
#define GIC_PIDR2_ARCH_MASK 0xf0
|
||||
#define GIC_PIDR2_ARCH_GICv3 0x30
|
||||
#define GIC_PIDR2_ARCH_GICv4 0x40
|
||||
|
||||
#define GIC_V3_DIST_SIZE 0x10000
|
||||
|
||||
/*
|
||||
* Re-Distributor registers, offsets from RD_base
|
||||
*/
|
||||
#define GICR_CTLR GICD_CTLR
|
||||
#define GICR_IIDR 0x0004
|
||||
#define GICR_TYPER 0x0008
|
||||
#define GICR_STATUSR GICD_STATUSR
|
||||
#define GICR_WAKER 0x0014
|
||||
#define GICR_SETLPIR 0x0040
|
||||
#define GICR_CLRLPIR 0x0048
|
||||
#define GICR_SEIR GICD_SEIR
|
||||
#define GICR_PROPBASER 0x0070
|
||||
#define GICR_PENDBASER 0x0078
|
||||
#define GICR_INVLPIR 0x00A0
|
||||
#define GICR_INVALLR 0x00B0
|
||||
#define GICR_SYNCR 0x00C0
|
||||
#define GICR_MOVLPIR 0x0100
|
||||
#define GICR_MOVALLR 0x0110
|
||||
#define GICR_IDREGS GICD_IDREGS
|
||||
#define GICR_PIDR2 GICD_PIDR2
|
||||
|
||||
#define GICR_CTLR_ENABLE_LPIS (1UL << 0)
|
||||
|
||||
#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff)
|
||||
|
||||
#define GICR_WAKER_ProcessorSleep (1U << 1)
|
||||
#define GICR_WAKER_ChildrenAsleep (1U << 2)
|
||||
|
||||
#define GICR_PROPBASER_NonShareable (0U << 10)
|
||||
#define GICR_PROPBASER_InnerShareable (1U << 10)
|
||||
#define GICR_PROPBASER_OuterShareable (2U << 10)
|
||||
#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10)
|
||||
#define GICR_PROPBASER_nCnB (0U << 7)
|
||||
#define GICR_PROPBASER_nC (1U << 7)
|
||||
#define GICR_PROPBASER_RaWt (2U << 7)
|
||||
#define GICR_PROPBASER_RaWb (3U << 7)
|
||||
#define GICR_PROPBASER_WaWt (4U << 7)
|
||||
#define GICR_PROPBASER_WaWb (5U << 7)
|
||||
#define GICR_PROPBASER_RaWaWt (6U << 7)
|
||||
#define GICR_PROPBASER_RaWaWb (7U << 7)
|
||||
#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7)
|
||||
#define GICR_PROPBASER_IDBITS_MASK (0x1f)
|
||||
|
||||
#define GICR_PENDBASER_NonShareable (0U << 10)
|
||||
#define GICR_PENDBASER_InnerShareable (1U << 10)
|
||||
#define GICR_PENDBASER_OuterShareable (2U << 10)
|
||||
#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10)
|
||||
#define GICR_PENDBASER_nCnB (0U << 7)
|
||||
#define GICR_PENDBASER_nC (1U << 7)
|
||||
#define GICR_PENDBASER_RaWt (2U << 7)
|
||||
#define GICR_PENDBASER_RaWb (3U << 7)
|
||||
#define GICR_PENDBASER_WaWt (4U << 7)
|
||||
#define GICR_PENDBASER_WaWb (5U << 7)
|
||||
#define GICR_PENDBASER_RaWaWt (6U << 7)
|
||||
#define GICR_PENDBASER_RaWaWb (7U << 7)
|
||||
#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7)
|
||||
|
||||
/*
|
||||
* Re-Distributor registers, offsets from SGI_base
|
||||
*/
|
||||
#define GICR_IGROUPR0 GICD_IGROUPR
|
||||
#define GICR_ISENABLER0 GICD_ISENABLER
|
||||
#define GICR_ICENABLER0 GICD_ICENABLER
|
||||
#define GICR_ISPENDR0 GICD_ISPENDR
|
||||
#define GICR_ICPENDR0 GICD_ICPENDR
|
||||
#define GICR_ISACTIVER0 GICD_ISACTIVER
|
||||
#define GICR_ICACTIVER0 GICD_ICACTIVER
|
||||
#define GICR_IPRIORITYR0 GICD_IPRIORITYR
|
||||
#define GICR_ICFGR0 GICD_ICFGR
|
||||
#define GICR_IGRPMODR0 GICD_IGRPMODR
|
||||
#define GICR_NSACR GICD_NSACR
|
||||
|
||||
#define GICR_TYPER_PLPIS (1U << 0)
|
||||
#define GICR_TYPER_VLPIS (1U << 1)
|
||||
#define GICR_TYPER_LAST (1U << 4)
|
||||
|
||||
#define GIC_V3_REDIST_SIZE 0x20000
|
||||
|
||||
#define LPI_PROP_GROUP1 (1 << 1)
|
||||
#define LPI_PROP_ENABLED (1 << 0)
|
||||
|
||||
/*
|
||||
* ITS registers, offsets from ITS_base
|
||||
*/
|
||||
#define GITS_CTLR 0x0000
|
||||
#define GITS_IIDR 0x0004
|
||||
#define GITS_TYPER 0x0008
|
||||
#define GITS_CBASER 0x0080
|
||||
#define GITS_CWRITER 0x0088
|
||||
#define GITS_CREADR 0x0090
|
||||
#define GITS_BASER 0x0100
|
||||
#define GITS_PIDR2 GICR_PIDR2
|
||||
|
||||
#define GITS_TRANSLATER 0x10040
|
||||
|
||||
#define GITS_CTLR_ENABLE (1U << 0)
|
||||
#define GITS_CTLR_QUIESCENT (1U << 31)
|
||||
|
||||
#define GITS_TYPER_DEVBITS_SHIFT 13
|
||||
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
|
||||
#define GITS_TYPER_PTA (1UL << 19)
|
||||
|
||||
#define GITS_CBASER_VALID (1UL << 63)
|
||||
#define GITS_CBASER_nCnB (0UL << 59)
|
||||
#define GITS_CBASER_nC (1UL << 59)
|
||||
#define GITS_CBASER_RaWt (2UL << 59)
|
||||
#define GITS_CBASER_RaWb (3UL << 59)
|
||||
#define GITS_CBASER_WaWt (4UL << 59)
|
||||
#define GITS_CBASER_WaWb (5UL << 59)
|
||||
#define GITS_CBASER_RaWaWt (6UL << 59)
|
||||
#define GITS_CBASER_RaWaWb (7UL << 59)
|
||||
#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59)
|
||||
#define GITS_CBASER_NonShareable (0UL << 10)
|
||||
#define GITS_CBASER_InnerShareable (1UL << 10)
|
||||
#define GITS_CBASER_OuterShareable (2UL << 10)
|
||||
#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10)
|
||||
|
||||
#define GITS_BASER_NR_REGS 8
|
||||
|
||||
#define GITS_BASER_VALID (1UL << 63)
|
||||
#define GITS_BASER_nCnB (0UL << 59)
|
||||
#define GITS_BASER_nC (1UL << 59)
|
||||
#define GITS_BASER_RaWt (2UL << 59)
|
||||
#define GITS_BASER_RaWb (3UL << 59)
|
||||
#define GITS_BASER_WaWt (4UL << 59)
|
||||
#define GITS_BASER_WaWb (5UL << 59)
|
||||
#define GITS_BASER_RaWaWt (6UL << 59)
|
||||
#define GITS_BASER_RaWaWb (7UL << 59)
|
||||
#define GITS_BASER_CACHEABILITY_MASK (7UL << 59)
|
||||
#define GITS_BASER_TYPE_SHIFT (56)
|
||||
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
|
||||
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
|
||||
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1)
|
||||
#define GITS_BASER_NonShareable (0UL << 10)
|
||||
#define GITS_BASER_InnerShareable (1UL << 10)
|
||||
#define GITS_BASER_OuterShareable (2UL << 10)
|
||||
#define GITS_BASER_SHAREABILITY_SHIFT (10)
|
||||
#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT)
|
||||
#define GITS_BASER_PAGE_SIZE_SHIFT (8)
|
||||
#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT)
|
||||
#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT)
|
||||
#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT)
|
||||
#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT)
|
||||
#define GITS_BASER_PAGES_MAX 256
|
||||
|
||||
#define GITS_BASER_TYPE_NONE 0
|
||||
#define GITS_BASER_TYPE_DEVICE 1
|
||||
#define GITS_BASER_TYPE_VCPU 2
|
||||
#define GITS_BASER_TYPE_CPU 3
|
||||
#define GITS_BASER_TYPE_COLLECTION 4
|
||||
#define GITS_BASER_TYPE_RESERVED5 5
|
||||
#define GITS_BASER_TYPE_RESERVED6 6
|
||||
#define GITS_BASER_TYPE_RESERVED7 7
|
||||
|
||||
/*
|
||||
* ITS commands
|
||||
*/
|
||||
#define GITS_CMD_MAPD 0x08
|
||||
#define GITS_CMD_MAPC 0x09
|
||||
#define GITS_CMD_MAPVI 0x0a
|
||||
#define GITS_CMD_MOVI 0x01
|
||||
#define GITS_CMD_DISCARD 0x0f
|
||||
#define GITS_CMD_INV 0x0c
|
||||
#define GITS_CMD_MOVALL 0x0e
|
||||
#define GITS_CMD_INVALL 0x0d
|
||||
#define GITS_CMD_INT 0x03
|
||||
#define GITS_CMD_CLEAR 0x04
|
||||
#define GITS_CMD_SYNC 0x05
|
||||
|
||||
/*
|
||||
* CPU interface registers
|
||||
*/
|
||||
#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1)
|
||||
#define ICC_CTLR_EL1_EOImode_drop (1U << 1)
|
||||
#define ICC_SRE_EL1_SRE (1U << 0)
|
||||
|
||||
/*
|
||||
* Hypervisor interface registers (SRE only)
|
||||
*/
|
||||
#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1)
|
||||
|
||||
#define ICH_LR_EOI (1UL << 41)
|
||||
#define ICH_LR_GROUP (1UL << 60)
|
||||
#define ICH_LR_STATE (3UL << 62)
|
||||
#define ICH_LR_PENDING_BIT (1UL << 62)
|
||||
#define ICH_LR_ACTIVE_BIT (1UL << 63)
|
||||
|
||||
#define ICH_MISR_EOI (1 << 0)
|
||||
#define ICH_MISR_U (1 << 1)
|
||||
|
||||
#define ICH_HCR_EN (1 << 0)
|
||||
#define ICH_HCR_UIE (1 << 1)
|
||||
|
||||
#define ICH_VMCR_CTLR_SHIFT 0
|
||||
#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT)
|
||||
#define ICH_VMCR_BPR1_SHIFT 18
|
||||
#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT)
|
||||
#define ICH_VMCR_BPR0_SHIFT 21
|
||||
#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT)
|
||||
#define ICH_VMCR_PMR_SHIFT 24
|
||||
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
|
||||
|
||||
#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
|
||||
#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
|
||||
#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
|
||||
#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
|
||||
#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
|
||||
#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
|
||||
#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
|
||||
#define ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
|
||||
|
||||
#define ICC_IAR1_EL1_SPURIOUS 0x3ff
|
||||
|
||||
#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
|
||||
|
||||
#define ICC_SRE_EL2_SRE (1 << 0)
|
||||
#define ICC_SRE_EL2_ENABLE (1 << 3)
|
||||
|
||||
#define ICC_SGI1R_TARGET_LIST_SHIFT 0
|
||||
#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_1_SHIFT 16
|
||||
#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_SGI_ID_SHIFT 24
|
||||
#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT)
|
||||
#define ICC_SGI1R_AFFINITY_2_SHIFT 32
|
||||
#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40
|
||||
#define ICC_SGI1R_AFFINITY_3_SHIFT 48
|
||||
#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT)
|
||||
|
||||
#ifdef CONFIG_HAS_NMI
|
||||
/* PMR values used to mask/unmask interrupts */
|
||||
#define ICC_PMR_EL1_G_SHIFT 6
|
||||
#define ICC_PMR_EL1_G_BIT (1 << ICC_PMR_EL1_G_SHIFT)
|
||||
#define ICC_PMR_EL1_UNMASKED 0xf0
|
||||
#define ICC_PMR_EL1_MASKED (ICC_PMR_EL1_UNMASKED ^ ICC_PMR_EL1_G_BIT)
|
||||
|
||||
/*
|
||||
* This is the GIC interrupt mask bit. It is not actually part of the
|
||||
* PSR and so does not appear in the user API, we are simply using some
|
||||
* reserved bits in the PSR to store some state from the interrupt
|
||||
* controller. The context save/restore functions will extract the
|
||||
* ICC_PMR_EL1_G_BIT and save it as the PSR_G_BIT.
|
||||
*/
|
||||
#define PSR_G_BIT 0x00400000
|
||||
#define PSR_G_SHIFT 22
|
||||
#define PSR_G_PMR_G_SHIFT (PSR_G_SHIFT - ICC_PMR_EL1_G_SHIFT)
|
||||
#define PSR_I_PMR_G_SHIFT (7 - ICC_PMR_EL1_G_SHIFT)
|
||||
#endif /* CONFIG_HAS_NMI */
|
||||
|
||||
/*
|
||||
* System register definitions
|
||||
*/
|
||||
#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4)
|
||||
#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0)
|
||||
#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1)
|
||||
#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2)
|
||||
#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3)
|
||||
#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5)
|
||||
#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7)
|
||||
|
||||
#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x)
|
||||
#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x)
|
||||
|
||||
#define ICH_LR0_EL2 __LR0_EL2(0)
|
||||
#define ICH_LR1_EL2 __LR0_EL2(1)
|
||||
#define ICH_LR2_EL2 __LR0_EL2(2)
|
||||
#define ICH_LR3_EL2 __LR0_EL2(3)
|
||||
#define ICH_LR4_EL2 __LR0_EL2(4)
|
||||
#define ICH_LR5_EL2 __LR0_EL2(5)
|
||||
#define ICH_LR6_EL2 __LR0_EL2(6)
|
||||
#define ICH_LR7_EL2 __LR0_EL2(7)
|
||||
#define ICH_LR8_EL2 __LR8_EL2(0)
|
||||
#define ICH_LR9_EL2 __LR8_EL2(1)
|
||||
#define ICH_LR10_EL2 __LR8_EL2(2)
|
||||
#define ICH_LR11_EL2 __LR8_EL2(3)
|
||||
#define ICH_LR12_EL2 __LR8_EL2(4)
|
||||
#define ICH_LR13_EL2 __LR8_EL2(5)
|
||||
#define ICH_LR14_EL2 __LR8_EL2(6)
|
||||
#define ICH_LR15_EL2 __LR8_EL2(7)
|
||||
|
||||
#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
|
||||
#define ICH_AP0R0_EL2 __AP0Rx_EL2(0)
|
||||
#define ICH_AP0R1_EL2 __AP0Rx_EL2(1)
|
||||
#define ICH_AP0R2_EL2 __AP0Rx_EL2(2)
|
||||
#define ICH_AP0R3_EL2 __AP0Rx_EL2(3)
|
||||
|
||||
#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x)
|
||||
#define ICH_AP1R0_EL2 __AP1Rx_EL2(0)
|
||||
#define ICH_AP1R1_EL2 __AP1Rx_EL2(1)
|
||||
#define ICH_AP1R2_EL2 __AP1Rx_EL2(2)
|
||||
#define ICH_AP1R3_EL2 __AP1Rx_EL2(3)
|
||||
|
||||
#endif /* __LINUX_IRQCHIP_ARM_GIC_V3_H */
|
||||
28
arch/arm64/kernel/include/asm-offsets.h
Normal file
28
arch/arm64/kernel/include/asm-offsets.h
Normal file
@ -0,0 +1,28 @@
|
||||
/* asm-offsets.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ASM_OFFSETS_H
|
||||
#define __HEADER_ARM64_COMMON_ASM_OFFSETS_H
|
||||
|
||||
#define S_X0 0x00 /* offsetof(struct pt_regs, regs[0]) */
|
||||
#define S_X1 0x08 /* offsetof(struct pt_regs, regs[1]) */
|
||||
#define S_X2 0x10 /* offsetof(struct pt_regs, regs[2]) */
|
||||
#define S_X3 0x18 /* offsetof(struct pt_regs, regs[3]) */
|
||||
#define S_X4 0x20 /* offsetof(struct pt_regs, regs[4]) */
|
||||
#define S_X5 0x28 /* offsetof(struct pt_regs, regs[5]) */
|
||||
#define S_X6 0x30 /* offsetof(struct pt_regs, regs[6]) */
|
||||
#define S_X7 0x38 /* offsetof(struct pt_regs, regs[7]) */
|
||||
#define S_LR 0xf0 /* offsetof(struct pt_regs, regs[30]) */
|
||||
#define S_SP 0xf8 /* offsetof(struct pt_regs, sp) */
|
||||
#define S_PC 0x100 /* offsetof(struct pt_regs, pc) */
|
||||
#define S_PSTATE 0x108 /* offsetof(struct pt_regs, pstate) */
|
||||
#define S_ORIG_X0 0x110 /* offsetof(struct pt_regs, orig_x0) */
|
||||
#define S_ORIG_PC 0x118 /* offsetof(struct pt_regs, orig_pc) */
|
||||
#define S_SYSCALLNO 0x120 /* offsetof(struct pt_regs, syscallno) */
|
||||
#define S_FRAME_SIZE 0x130 /* sizeof(struct pt_regs) must be 16 byte align */
|
||||
|
||||
#define CPU_INFO_SETUP 0x10 /* offsetof(struct cpu_info, cpu_setup) */
|
||||
#define CPU_INFO_SZ 0x18 /* sizeof(struct cpu_info) */
|
||||
|
||||
#define TI_FLAGS 0x00 /* offsetof(struct thread_info, flags) */
|
||||
#define TI_CPU_CONTEXT 0x10 /* offsetof(struct thread_info, cpu_context) */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_ASM_OFFSETS_H */
|
||||
20
arch/arm64/kernel/include/asm-syscall.h
Normal file
20
arch/arm64/kernel/include/asm-syscall.h
Normal file
@ -0,0 +1,20 @@
|
||||
/* asm-syscall.h COPYRIGHT FUJITSU LIMITED 2018 */
|
||||
#ifndef __HEADER_ARM64_ASM_SYSCALL_H
|
||||
#define __HEADER_ARM64_ASM_SYSCALL_H
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#define DECLARATOR(number, name) .equ __NR_##name, number
|
||||
#define SYSCALL_HANDLED(number, name) DECLARATOR(number, name)
|
||||
#define SYSCALL_DELEGATED(number, name) DECLARATOR(number, name)
|
||||
|
||||
#include <config.h>
|
||||
#include <syscall_list.h>
|
||||
|
||||
#undef DECLARATOR
|
||||
#undef SYSCALL_HANDLED
|
||||
#undef SYSCALL_DELEGATED
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_ASM_SYSCALL_H */
|
||||
149
arch/arm64/kernel/include/assembler.h
Normal file
149
arch/arm64/kernel/include/assembler.h
Normal file
@ -0,0 +1,149 @@
|
||||
/* assembler.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ASSEMBLER_H
|
||||
#define __HEADER_ARM64_COMMON_ASSEMBLER_H
|
||||
|
||||
#include <thread_info.h>
|
||||
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
#include <arm-gic-v3.h>
|
||||
#else /* defined(CONFIG_HAS_NMI) */
|
||||
#include <sysreg.h>
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
/*
|
||||
* Enable and disable pseudo NMI.
|
||||
*/
|
||||
.macro disable_nmi
|
||||
msr daifset, #2
|
||||
.endm
|
||||
|
||||
.macro enable_nmi
|
||||
msr daifclr, #2
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Enable and disable interrupts.
|
||||
*/
|
||||
.macro disable_irq, tmp
|
||||
mov \tmp, #ICC_PMR_EL1_MASKED
|
||||
msr_s ICC_PMR_EL1, \tmp
|
||||
.endm
|
||||
|
||||
.macro enable_irq, tmp
|
||||
mov \tmp, #ICC_PMR_EL1_UNMASKED
|
||||
msr_s ICC_PMR_EL1, \tmp
|
||||
.endm
|
||||
|
||||
#else /* defined(CONFIG_HAS_NMI) */
|
||||
/*
|
||||
* Enable and disable pseudo NMI.
|
||||
*/
|
||||
.macro disable_nmi
|
||||
.endm
|
||||
|
||||
.macro enable_nmi
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Enable and disable interrupts.
|
||||
*/
|
||||
.macro disable_irq, tmp
|
||||
msr daifset, #2
|
||||
.endm
|
||||
|
||||
.macro enable_irq, tmp
|
||||
msr daifclr, #2
|
||||
.endm
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
|
||||
/*
|
||||
* Enable and disable debug exceptions.
|
||||
*/
|
||||
.macro disable_dbg
|
||||
msr daifset, #8
|
||||
.endm
|
||||
|
||||
.macro enable_dbg
|
||||
msr daifclr, #8
|
||||
.endm
|
||||
|
||||
.macro disable_step_tsk, flgs, tmp
|
||||
tbz \flgs, #TIF_SINGLESTEP, 9990f
|
||||
mrs \tmp, mdscr_el1
|
||||
bic \tmp, \tmp, #1
|
||||
msr mdscr_el1, \tmp
|
||||
isb // Synchronise with enable_dbg
|
||||
9990:
|
||||
.endm
|
||||
|
||||
.macro enable_step_tsk, flgs, tmp
|
||||
tbz \flgs, #TIF_SINGLESTEP, 9990f
|
||||
disable_dbg
|
||||
mrs \tmp, mdscr_el1
|
||||
orr \tmp, \tmp, #1
|
||||
msr mdscr_el1, \tmp
|
||||
b 9991f
|
||||
9990:
|
||||
mrs \tmp, mdscr_el1
|
||||
bic \tmp, \tmp, #1
|
||||
msr mdscr_el1, \tmp
|
||||
isb // Synchronise with enable_dbg
|
||||
9991:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Enable both debug exceptions and interrupts. This is likely to be
|
||||
* faster than two daifclr operations, since writes to this register
|
||||
* are self-synchronising.
|
||||
*/
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
.macro enable_dbg_and_irq, tmp
|
||||
enable_dbg
|
||||
enable_irq \tmp
|
||||
.endm
|
||||
#else /* defined(CONFIG_HAS_NMI) */
|
||||
.macro enable_dbg_and_irq, tmp
|
||||
msr daifclr, #(8 | 2)
|
||||
.endm
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
|
||||
/*
|
||||
* Register aliases.
|
||||
*/
|
||||
lr .req x30 // link register
|
||||
|
||||
/*
|
||||
* Vector entry
|
||||
*/
|
||||
.macro ventry label
|
||||
.align 7
|
||||
b \label
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Select code when configured for BE.
|
||||
*/
|
||||
//#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
//#define CPU_BE(code...) code
|
||||
//#else
|
||||
#define CPU_BE(code...)
|
||||
//#endif
|
||||
|
||||
/*
|
||||
* Select code when configured for LE.
|
||||
*/
|
||||
//#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
//#define CPU_LE(code...)
|
||||
//#else
|
||||
#define CPU_LE(code...) code
|
||||
//#endif
|
||||
|
||||
#define ENDPIPROC(x) \
|
||||
.globl __pi_##x; \
|
||||
.type __pi_##x, %function; \
|
||||
.set __pi_##x, x; \
|
||||
.size __pi_##x, . - x; \
|
||||
ENDPROC(x)
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_ASSEMBLER_H */
|
||||
7
arch/arm64/kernel/include/cache.h
Normal file
7
arch/arm64/kernel/include/cache.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* cache.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_CACHE_H
|
||||
#define __HEADER_ARM64_COMMON_CACHE_H
|
||||
|
||||
#define L1_CACHE_SHIFT 6
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_CACHE_H */
|
||||
32
arch/arm64/kernel/include/cas.h
Normal file
32
arch/arm64/kernel/include/cas.h
Normal file
@ -0,0 +1,32 @@
|
||||
/* cas.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
#ifndef __HEADER_ARM64_COMMON_CAS_H
|
||||
#define __HEADER_ARM64_COMMON_CAS_H
|
||||
|
||||
#include <arch/cpu.h>
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cmpxchg.h::__cmpxchg (size == 8 case) */
|
||||
/* 8 byte compare and swap, return 0:fail, 1:success */
|
||||
static inline int
|
||||
compare_and_swap(void *addr, unsigned long olddata, unsigned long newdata)
|
||||
{
|
||||
unsigned long oldval = 0, res = 0;
|
||||
|
||||
smp_mb();
|
||||
do {
|
||||
asm volatile("// __cmpxchg8\n"
|
||||
" ldxr %1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %1, %3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxr %w0, %4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(unsigned long *)addr)
|
||||
: "Ir" (olddata), "r" (newdata)
|
||||
: "cc");
|
||||
} while (res);
|
||||
smp_mb();
|
||||
|
||||
return (oldval == olddata);
|
||||
}
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_CAS_H */
|
||||
32
arch/arm64/kernel/include/compiler.h
Normal file
32
arch/arm64/kernel/include/compiler.h
Normal file
@ -0,0 +1,32 @@
|
||||
/* compiler.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
#ifndef __ASM_COMPILER_H
|
||||
#define __ASM_COMPILER_H
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/compiler.h::__asmeq(x,y) */
|
||||
/*
|
||||
* This is used to ensure the compiler did actually allocate the register we
|
||||
* asked it for some inline assembly sequences. Apparently we can't trust the
|
||||
* compiler from one version to another so a bit of paranoia won't hurt. This
|
||||
* string is meant to be concatenated with the inline asm string and will
|
||||
* cause compilation to stop on mismatch. (for details, see gcc PR 15089)
|
||||
*/
|
||||
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
|
||||
|
||||
/* @ref.impl include/linux/compiler.h::__section(S) */
|
||||
/* Simple shorthand for a section definition */
|
||||
# define __section(S) __attribute__ ((__section__(#S)))
|
||||
|
||||
/* @ref.impl include/linux/compiler.h::__aligned(x) */
|
||||
/*
|
||||
* From the GCC manual:
|
||||
*
|
||||
* Many functions have no effects except the return value and their
|
||||
* return value depends only on the parameters and/or global
|
||||
* variables. Such a function can be subject to common subexpression
|
||||
* elimination and loop optimization just as an arithmetic operator
|
||||
* would be.
|
||||
* [...]
|
||||
*/
|
||||
#define __aligned(x) __attribute__((aligned(x)))
|
||||
|
||||
#endif /* __ASM_COMPILER_H */
|
||||
23
arch/arm64/kernel/include/const.h
Normal file
23
arch/arm64/kernel/include/const.h
Normal file
@ -0,0 +1,23 @@
|
||||
/* const.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_CONST_H
|
||||
#define __HEADER_ARM64_COMMON_CONST_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define __AC(X,Y) (X##Y)
|
||||
#define _AC(X,Y) __AC(X,Y)
|
||||
#define _AT(T,X) ((T)(X))
|
||||
#else /* !__ASSEMBLY__ */
|
||||
#define _AC(X,Y) X
|
||||
#define _AT(T,X) X
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define _BITUL(x) (_AC(1,UL) << (x))
|
||||
#define _BITULL(x) (_AC(1,ULL) << (x))
|
||||
|
||||
/*
|
||||
* Allow for constants defined here to be used from assembly code
|
||||
* by prepending the UL suffix only with actual C code compilation.
|
||||
*/
|
||||
#define UL(x) _AC(x, UL)
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_CONST_H */
|
||||
8
arch/arm64/kernel/include/context.h
Normal file
8
arch/arm64/kernel/include/context.h
Normal file
@ -0,0 +1,8 @@
|
||||
/* context.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_CONTEXT_H
|
||||
#define __HEADER_ARM64_COMMON_CONTEXT_H
|
||||
|
||||
void switch_mm(struct page_table *pgtbl);
|
||||
void free_mmu_context(struct page_table *pgtbl);
|
||||
|
||||
#endif /*__HEADER_ARM64_COMMON_CONTEXT_H*/
|
||||
182
arch/arm64/kernel/include/cpufeature.h
Normal file
182
arch/arm64/kernel/include/cpufeature.h
Normal file
@ -0,0 +1,182 @@
|
||||
/* cpufeature.h COPYRIGHT FUJITSU LIMITED 2017 */
|
||||
|
||||
#ifndef __ASM_CPUFEATURE_H
|
||||
#define __ASM_CPUFEATURE_H
|
||||
|
||||
#include <types.h>
|
||||
#include <cpuinfo.h>
|
||||
#include <sysreg.h>
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
/* CPU feature register tracking */
|
||||
enum ftr_type {
|
||||
FTR_EXACT, /* Use a predefined safe value */
|
||||
FTR_LOWER_SAFE, /* Smaller value is safe */
|
||||
FTR_HIGHER_SAFE,/* Bigger value is safe */
|
||||
};
|
||||
|
||||
#define FTR_STRICT (1) /* SANITY check strict matching required */
|
||||
#define FTR_NONSTRICT (0) /* SANITY check ignored */
|
||||
|
||||
#define FTR_SIGNED (1) /* Value should be treated as signed */
|
||||
#define FTR_UNSIGNED (0) /* Value should be treated as unsigned */
|
||||
|
||||
#define FTR_VISIBLE (1) /* Feature visible to the user space */
|
||||
#define FTR_HIDDEN (0) /* Feature is hidden from the user */
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
struct arm64_ftr_bits {
|
||||
int sign; /* Value is signed ? */
|
||||
int visible;
|
||||
int strict; /* CPU Sanity check: strict matching required ? */
|
||||
enum ftr_type type;
|
||||
uint8_t shift;
|
||||
uint8_t width;
|
||||
int64_t safe_val; /* safe value for FTR_EXACT features */
|
||||
};
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
/*
|
||||
* @arm64_ftr_reg - Feature register
|
||||
* @strict_mask Bits which should match across all CPUs for sanity.
|
||||
* @sys_val Safe value across the CPUs (system view)
|
||||
*/
|
||||
struct arm64_ftr_reg {
|
||||
const char *name;
|
||||
uint64_t strict_mask;
|
||||
uint64_t user_mask;
|
||||
uint64_t sys_val;
|
||||
uint64_t user_val;
|
||||
const struct arm64_ftr_bits *ftr_bits;
|
||||
};
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
extern struct arm64_ftr_reg arm64_ftr_reg_ctrel0;
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
/* scope of capability check */
|
||||
enum {
|
||||
SCOPE_SYSTEM,
|
||||
SCOPE_LOCAL_CPU,
|
||||
};
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
struct arm64_cpu_capabilities {
|
||||
const char *desc;
|
||||
uint16_t capability;
|
||||
int def_scope;/* default scope */
|
||||
int (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
|
||||
int (*enable)(void *);/* Called on all active CPUs */
|
||||
uint32_t sys_reg;
|
||||
uint8_t field_pos;
|
||||
uint8_t min_field_value;
|
||||
uint8_t hwcap_type;
|
||||
int sign;
|
||||
unsigned long hwcap;
|
||||
};
|
||||
|
||||
/* @ref.impl include/linux/bitops.h */
|
||||
/*
|
||||
* Create a contiguous bitmask starting at bit position @l and ending at
|
||||
* position @h. For example
|
||||
* GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
|
||||
*/
|
||||
#define GENMASK(h, l) \
|
||||
(((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline uint64_t arm64_ftr_mask(const struct arm64_ftr_bits *ftrp)
|
||||
{
|
||||
return (uint64_t)GENMASK(ftrp->shift + ftrp->width - 1, ftrp->shift);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline int
|
||||
cpuid_feature_extract_signed_field_width(uint64_t features, int field, int width)
|
||||
{
|
||||
return (int64_t)(features << (64 - width - field)) >> (64 - width);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline int
|
||||
cpuid_feature_extract_signed_field(uint64_t features, int field)
|
||||
{
|
||||
return cpuid_feature_extract_signed_field_width(features, field, 4);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline unsigned int
|
||||
cpuid_feature_extract_unsigned_field_width(uint64_t features, int field, int width)
|
||||
{
|
||||
return (uint64_t)(features << (64 - width - field)) >> (64 - width);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline unsigned int
|
||||
cpuid_feature_extract_unsigned_field(uint64_t features, int field)
|
||||
{
|
||||
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline uint64_t arm64_ftr_reg_user_value(const struct arm64_ftr_reg *reg)
|
||||
{
|
||||
return (reg->user_val | (reg->sys_val & reg->user_mask));
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline int
|
||||
cpuid_feature_extract_field_width(uint64_t features, int field, int width, int sign)
|
||||
{
|
||||
return (sign) ?
|
||||
cpuid_feature_extract_signed_field_width(features, field, width) :
|
||||
cpuid_feature_extract_unsigned_field_width(features, field, width);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline int
|
||||
cpuid_feature_extract_field(uint64_t features, int field, int sign)
|
||||
{
|
||||
return cpuid_feature_extract_field_width(features, field, 4, sign);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline int64_t arm64_ftr_value(const struct arm64_ftr_bits *ftrp, uint64_t val)
|
||||
{
|
||||
return (int64_t)cpuid_feature_extract_field_width(val, ftrp->shift, ftrp->width, ftrp->sign);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline int id_aa64pfr0_32bit_el0(uint64_t pfr0)
|
||||
{
|
||||
uint32_t val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL0_SHIFT);
|
||||
|
||||
return val == ID_AA64PFR0_EL0_32BIT_64BIT;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cpufeature.h */
|
||||
static inline int id_aa64pfr0_sve(uint64_t pfr0)
|
||||
{
|
||||
uint32_t val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_SVE_SHIFT);
|
||||
|
||||
return val > 0;
|
||||
}
|
||||
|
||||
void setup_cpu_features(void);
|
||||
void update_cpu_features(int cpu,
|
||||
struct cpuinfo_arm64 *info,
|
||||
struct cpuinfo_arm64 *boot);
|
||||
uint64_t read_system_reg(uint32_t id);
|
||||
void init_cpu_features(struct cpuinfo_arm64 *info);
|
||||
int enable_mrs_emulation(void);
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/hwcap.h */
|
||||
enum {
|
||||
CAP_HWCAP = 1,
|
||||
#ifdef CONFIG_COMPAT
|
||||
CAP_COMPAT_HWCAP,
|
||||
CAP_COMPAT_HWCAP2,
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* __ASM_CPUFEATURE_H */
|
||||
34
arch/arm64/kernel/include/cpuinfo.h
Normal file
34
arch/arm64/kernel/include/cpuinfo.h
Normal file
@ -0,0 +1,34 @@
|
||||
/* cpuinfo.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
|
||||
#ifndef __HEADER_ARM64_COMMON_CPUINFO_H
|
||||
#define __HEADER_ARM64_COMMON_CPUINFO_H
|
||||
|
||||
#include <types.h>
|
||||
|
||||
/* @ref.impl arch/arm64/include/cpu.h */
|
||||
/*
|
||||
* Records attributes of an individual CPU.
|
||||
*/
|
||||
struct cpuinfo_arm64 {
|
||||
uint32_t reg_midr;
|
||||
unsigned int hwid; /* McKernel Original. */
|
||||
|
||||
uint32_t reg_ctr;
|
||||
uint32_t reg_cntfrq;
|
||||
uint32_t reg_dczid;
|
||||
uint32_t reg_revidr;
|
||||
|
||||
uint64_t reg_id_aa64dfr0;
|
||||
uint64_t reg_id_aa64dfr1;
|
||||
uint64_t reg_id_aa64isar0;
|
||||
uint64_t reg_id_aa64isar1;
|
||||
uint64_t reg_id_aa64mmfr0;
|
||||
uint64_t reg_id_aa64mmfr1;
|
||||
uint64_t reg_id_aa64mmfr2;
|
||||
uint64_t reg_id_aa64pfr0;
|
||||
uint64_t reg_id_aa64pfr1;
|
||||
uint64_t reg_id_aa64zfr0;
|
||||
|
||||
uint64_t reg_zcr;
|
||||
};
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_CPUINFO_H */
|
||||
13
arch/arm64/kernel/include/cpulocal.h
Normal file
13
arch/arm64/kernel/include/cpulocal.h
Normal file
@ -0,0 +1,13 @@
|
||||
/* cpulocal.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_CPULOCAL_H
|
||||
#define __HEADER_ARM64_COMMON_CPULOCAL_H
|
||||
|
||||
#include <types.h>
|
||||
#include <registers.h>
|
||||
#include <thread_info.h>
|
||||
|
||||
union arm64_cpu_local_variables *get_arm64_cpu_local_variable(int id);
|
||||
union arm64_cpu_local_variables *get_arm64_this_cpu_local(void);
|
||||
void *get_arm64_this_cpu_kstack(void);
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_CPULOCAL_H */
|
||||
12
arch/arm64/kernel/include/cputable.h
Normal file
12
arch/arm64/kernel/include/cputable.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* cputable.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_CPUTABLE_H
|
||||
#define __HEADER_ARM64_COMMON_CPUTABLE_H
|
||||
|
||||
struct cpu_info {
|
||||
unsigned int cpu_id_val;
|
||||
unsigned int cpu_id_mask;
|
||||
const char *cpu_name;
|
||||
unsigned long (*cpu_setup)(void);
|
||||
};
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_CPUTABLE_H */
|
||||
110
arch/arm64/kernel/include/cputype.h
Normal file
110
arch/arm64/kernel/include/cputype.h
Normal file
@ -0,0 +1,110 @@
|
||||
/* cputype.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
/* @ref.impl arch/arm64/include/asm/cputype.h */
|
||||
#ifndef __HEADER_ARM64_COMMON_CPUTYPE_H
|
||||
#define __HEADER_ARM64_COMMON_CPUTYPE_H
|
||||
|
||||
#include <sysreg.h>
|
||||
|
||||
#define MPIDR_LEVEL_BITS_SHIFT 3
|
||||
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
|
||||
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
|
||||
|
||||
#define MPIDR_LEVEL_SHIFT(level) \
|
||||
(((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
|
||||
|
||||
#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
|
||||
((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
|
||||
|
||||
#define read_cpuid(reg) read_sysreg_s(SYS_ ## reg)
|
||||
|
||||
#define MIDR_REVISION_MASK 0xf
|
||||
#define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK)
|
||||
|
||||
#define MIDR_PARTNUM_SHIFT 4
|
||||
#define MIDR_PARTNUM_MASK (0xfff << MIDR_PARTNUM_SHIFT)
|
||||
#define MIDR_PARTNUM(midr) \
|
||||
(((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT)
|
||||
|
||||
#define MIDR_ARCHITECTURE_SHIFT 16
|
||||
#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT)
|
||||
#define MIDR_ARCHITECTURE(midr) \
|
||||
(((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT)
|
||||
|
||||
#define MIDR_VARIANT_SHIFT 20
|
||||
#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
|
||||
#define MIDR_VARIANT(midr) \
|
||||
(((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
|
||||
|
||||
#define MIDR_IMPLEMENTOR_SHIFT 24
|
||||
#define MIDR_IMPLEMENTOR_MASK (0xffU << MIDR_IMPLEMENTOR_SHIFT)
|
||||
#define MIDR_IMPLEMENTOR(midr) \
|
||||
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
|
||||
|
||||
#define MIDR_CPU_MODEL(imp, partnum) \
|
||||
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
|
||||
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
|
||||
((partnum) << MIDR_PARTNUM_SHIFT))
|
||||
|
||||
#define MIDR_CPU_VAR_REV(var, rev) \
|
||||
(((var) << MIDR_VARIANT_SHIFT) | (rev))
|
||||
|
||||
#define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
|
||||
MIDR_ARCHITECTURE_MASK)
|
||||
|
||||
#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
|
||||
({ \
|
||||
u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
|
||||
u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
|
||||
\
|
||||
_model == (model) && rv >= (rv_min) && rv <= (rv_max); \
|
||||
})
|
||||
|
||||
#define ARM_CPU_IMP_ARM 0x41
|
||||
#define ARM_CPU_IMP_APM 0x50
|
||||
#define ARM_CPU_IMP_CAVIUM 0x43
|
||||
#define ARM_CPU_IMP_BRCM 0x42
|
||||
#define ARM_CPU_IMP_QCOM 0x51
|
||||
|
||||
#define ARM_CPU_PART_AEM_V8 0xD0F
|
||||
#define ARM_CPU_PART_FOUNDATION 0xD00
|
||||
#define ARM_CPU_PART_CORTEX_A57 0xD07
|
||||
#define ARM_CPU_PART_CORTEX_A72 0xD08
|
||||
#define ARM_CPU_PART_CORTEX_A53 0xD03
|
||||
#define ARM_CPU_PART_CORTEX_A73 0xD09
|
||||
#define ARM_CPU_PART_CORTEX_A75 0xD0A
|
||||
|
||||
#define APM_CPU_PART_POTENZA 0x000
|
||||
|
||||
#define CAVIUM_CPU_PART_THUNDERX 0x0A1
|
||||
#define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2
|
||||
#define CAVIUM_CPU_PART_THUNDERX_83XX 0x0A3
|
||||
#define CAVIUM_CPU_PART_THUNDERX2 0x0AF
|
||||
|
||||
#define BRCM_CPU_PART_VULCAN 0x516
|
||||
|
||||
#define QCOM_CPU_PART_FALKOR_V1 0x800
|
||||
#define QCOM_CPU_PART_FALKOR 0xC00
|
||||
|
||||
#define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53)
|
||||
#define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57)
|
||||
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
|
||||
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
|
||||
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
|
||||
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
|
||||
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
|
||||
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
|
||||
#define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
|
||||
#define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
|
||||
#define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
|
||||
#define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static unsigned int read_cpuid_id(void)
|
||||
{
|
||||
return read_cpuid(MIDR_EL1);
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_CPUTYPE_H */
|
||||
35
arch/arm64/kernel/include/debug-monitors.h
Normal file
35
arch/arm64/kernel/include/debug-monitors.h
Normal file
@ -0,0 +1,35 @@
|
||||
/* debug-monitors.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
|
||||
#ifndef __HEADER_ARM64_COMMON_DEBUG_MONITORS_H
|
||||
#define __HEADER_ARM64_COMMON_DEBUG_MONITORS_H
|
||||
|
||||
/* Low-level stepping controls. */
|
||||
#define DBG_MDSCR_SS (1 << 0)
|
||||
#define DBG_SPSR_SS (1 << 21)
|
||||
|
||||
/* MDSCR_EL1 enabling bits */
|
||||
#define DBG_MDSCR_KDE (1 << 13)
|
||||
#define DBG_MDSCR_MDE (1 << 15)
|
||||
#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE)
|
||||
|
||||
#define DBG_ESR_EVT(x) (((x) >> 27) & 0x7)
|
||||
|
||||
/* AArch64 */
|
||||
#define DBG_ESR_EVT_HWBP 0x0
|
||||
#define DBG_ESR_EVT_HWSS 0x1
|
||||
#define DBG_ESR_EVT_HWWP 0x2
|
||||
#define DBG_ESR_EVT_BRK 0x6
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
unsigned char debug_monitors_arch(void);
|
||||
void mdscr_write(unsigned int mdscr);
|
||||
unsigned int mdscr_read(void);
|
||||
void debug_monitors_init(void);
|
||||
|
||||
struct pt_regs;
|
||||
void set_regs_spsr_ss(struct pt_regs *regs);
|
||||
void clear_regs_spsr_ss(struct pt_regs *regs);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_DEBUG_MONITORS_H */
|
||||
28
arch/arm64/kernel/include/elf.h
Normal file
28
arch/arm64/kernel/include/elf.h
Normal file
@ -0,0 +1,28 @@
|
||||
/* elf.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ELF_H
|
||||
#define __HEADER_ARM64_COMMON_ELF_H
|
||||
|
||||
#include <ihk/context.h>
|
||||
|
||||
/* ELF target machines defined */
|
||||
#define EM_AARCH64 183
|
||||
|
||||
/* ELF header defined */
|
||||
#define ELF_CLASS ELFCLASS64
|
||||
#define ELF_DATA ELFDATA2LSB
|
||||
#define ELF_OSABI ELFOSABI_NONE
|
||||
#define ELF_ABIVERSION El_ABIVERSION_NONE
|
||||
#define ELF_ARCH EM_AARCH64
|
||||
|
||||
#define ELF_NGREG64 (sizeof (struct user_pt_regs) / sizeof(elf_greg64_t))
|
||||
|
||||
/* PTRACE_GETREGSET and PTRACE_SETREGSET requests. */
|
||||
#define NT_ARM_TLS 0x401 /* ARM TLS register */
|
||||
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
|
||||
#define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */
|
||||
#define NT_ARM_SYSTEM_CALL 0x404 /* ARM system call number */
|
||||
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
|
||||
|
||||
typedef elf_greg64_t elf_gregset64_t[ELF_NGREG64];
|
||||
|
||||
#endif /* __HEADER_ARM64_COMMON_ELF_H */
|
||||
60
arch/arm64/kernel/include/elfnote.h
Normal file
60
arch/arm64/kernel/include/elfnote.h
Normal file
@ -0,0 +1,60 @@
|
||||
/* elfnote.h COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
/* @ref.impl include/linux/elfnote.h */
|
||||
/*
|
||||
* Helper macros to generate ELF Note structures, which are put into a
|
||||
* PT_NOTE segment of the final vmlinux image. These are useful for
|
||||
* including name-value pairs of metadata into the kernel binary (or
|
||||
* modules?) for use by external programs.
|
||||
*
|
||||
* Each note has three parts: a name, a type and a desc. The name is
|
||||
* intended to distinguish the note's originator, so it would be a
|
||||
* company, project, subsystem, etc; it must be in a suitable form for
|
||||
* use in a section name. The type is an integer which is used to tag
|
||||
* the data, and is considered to be within the "name" namespace (so
|
||||
* "FooCo"'s type 42 is distinct from "BarProj"'s type 42). The
|
||||
* "desc" field is the actual data. There are no constraints on the
|
||||
* desc field's contents, though typically they're fairly small.
|
||||
*
|
||||
* All notes from a given NAME are put into a section named
|
||||
* .note.NAME. When the kernel image is finally linked, all the notes
|
||||
* are packed into a single .notes section, which is mapped into the
|
||||
* PT_NOTE segment. Because notes for a given name are grouped into
|
||||
* the same section, they'll all be adjacent the output file.
|
||||
*
|
||||
* This file defines macros for both C and assembler use. Their
|
||||
* syntax is slightly different, but they're semantically similar.
|
||||
*
|
||||
* See the ELF specification for more detail about ELF notes.
|
||||
*/
|
||||
#ifndef __HEADER_ARM64_COMMON_ELFNOTE_H
|
||||
#define __HEADER_ARM64_COMMON_ELFNOTE_H
|
||||
|
||||
#ifdef __ASSEMBLER__
|
||||
|
||||
/*
|
||||
* Generate a structure with the same shape as Elf{32,64}_Nhdr (which
|
||||
* turn out to be the same size and shape), followed by the name and
|
||||
* desc data with appropriate padding. The 'desctype' argument is the
|
||||
* assembler pseudo op defining the type of the data e.g. .asciz while
|
||||
* 'descdata' is the data itself e.g. "hello, world".
|
||||
*
|
||||
* e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
|
||||
* ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
|
||||
*/
|
||||
#define ELFNOTE_START(name, type, flags) \
|
||||
.pushsection .note.name, flags,@note ; \
|
||||
.balign 4 ; \
|
||||
.long 2f - 1f /* namesz */ ; \
|
||||
.long 4484f - 3f /* descsz */ ; \
|
||||
.long type ; \
|
||||
1:.asciz #name ; \
|
||||
2:.balign 4 ; \
|
||||
3:
|
||||
|
||||
#define ELFNOTE_END \
|
||||
4484:.balign 4 ; \
|
||||
.popsection ;
|
||||
|
||||
#endif /* __ASSEMBLER__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_ELFNOTE_H */
|
||||
112
arch/arm64/kernel/include/errno.h
Normal file
112
arch/arm64/kernel/include/errno.h
Normal file
@ -0,0 +1,112 @@
|
||||
/* errno.h COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
#ifndef __HEADER_ARM64_COMMON_ERRNO_H
|
||||
#define __HEADER_ARM64_COMMON_ERRNO_H
|
||||
|
||||
#include <generic-errno.h>
|
||||
|
||||
#define EDEADLK 35 /* Resource deadlock would occur */
|
||||
#define ENAMETOOLONG 36 /* File name too long */
|
||||
#define ENOLCK 37 /* No record locks available */
|
||||
#define ENOSYS 38 /* Function not implemented */
|
||||
#define ENOTEMPTY 39 /* Directory not empty */
|
||||
#define ELOOP 40 /* Too many symbolic links encountered */
|
||||
#define EWOULDBLOCK EAGAIN /* Operation would block */
|
||||
#define ENOMSG 42 /* No message of desired type */
|
||||
#define EIDRM 43 /* Identifier removed */
|
||||
#define ECHRNG 44 /* Channel number out of range */
|
||||
#define EL2NSYNC 45 /* Level 2 not synchronized */
|
||||
#define EL3HLT 46 /* Level 3 halted */
|
||||
#define EL3RST 47 /* Level 3 reset */
|
||||
#define ELNRNG 48 /* Link number out of range */
|
||||
#define EUNATCH 49 /* Protocol driver not attached */
|
||||
#define ENOCSI 50 /* No CSI structure available */
|
||||
#define EL2HLT 51 /* Level 2 halted */
|
||||
#define EBADE 52 /* Invalid exchange */
|
||||
#define EBADR 53 /* Invalid request descriptor */
|
||||
#define EXFULL 54 /* Exchange full */
|
||||
#define ENOANO 55 /* No anode */
|
||||
#define EBADRQC 56 /* Invalid request code */
|
||||
#define EBADSLT 57 /* Invalid slot */
|
||||
|
||||
#define EDEADLOCK EDEADLK
|
||||
|
||||
#define EBFONT 59 /* Bad font file format */
|
||||
#define ENOSTR 60 /* Device not a stream */
|
||||
#define ENODATA 61 /* No data available */
|
||||
#define ETIME 62 /* Timer expired */
|
||||
#define ENOSR 63 /* Out of streams resources */
|
||||
#define ENONET 64 /* Machine is not on the network */
|
||||
#define ENOPKG 65 /* Package not installed */
|
||||
#define EREMOTE 66 /* Object is remote */
|
||||
#define ENOLINK 67 /* Link has been severed */
|
||||
#define EADV 68 /* Advertise error */
|
||||
#define ESRMNT 69 /* Srmount error */
|
||||
#define ECOMM 70 /* Communication error on send */
|
||||
#define EPROTO 71 /* Protocol error */
|
||||
#define EMULTIHOP 72 /* Multihop attempted */
|
||||
#define EDOTDOT 73 /* RFS specific error */
|
||||
#define EBADMSG 74 /* Not a data message */
|
||||
#define EOVERFLOW 75 /* Value too large for defined data type */
|
||||
#define ENOTUNIQ 76 /* Name not unique on network */
|
||||
#define EBADFD 77 /* File descriptor in bad state */
|
||||
#define EREMCHG 78 /* Remote address changed */
|
||||
#define ELIBACC 79 /* Can not access a needed shared library */
|
||||
#define ELIBBAD 80 /* Accessing a corrupted shared library */
|
||||
#define ELIBSCN 81 /* .lib section in a.out corrupted */
|
||||
#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
|
||||
#define ELIBEXEC 83 /* Cannot exec a shared library directly */
|
||||
#define EILSEQ 84 /* Illegal byte sequence */
|
||||
#define ERESTART 85 /* Interrupted system call should be restarted */
|
||||
#define ESTRPIPE 86 /* Streams pipe error */
|
||||
#define EUSERS 87 /* Too many users */
|
||||
#define ENOTSOCK 88 /* Socket operation on non-socket */
|
||||
#define EDESTADDRREQ 89 /* Destination address required */
|
||||
#define EMSGSIZE 90 /* Message too long */
|
||||
#define EPROTOTYPE 91 /* Protocol wrong type for socket */
|
||||
#define ENOPROTOOPT 92 /* Protocol not available */
|
||||
#define EPROTONOSUPPORT 93 /* Protocol not supported */
|
||||
#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
|
||||
#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
|
||||
#define EPFNOSUPPORT 96 /* Protocol family not supported */
|
||||
#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
|
||||
#define EADDRINUSE 98 /* Address already in use */
|
||||
#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
|
||||
#define ENETDOWN 100 /* Network is down */
|
||||
#define ENETUNREACH 101 /* Network is unreachable */
|
||||
#define ENETRESET 102 /* Network dropped connection because of reset */
|
||||
#define ECONNABORTED 103 /* Software caused connection abort */
|
||||
#define ECONNRESET 104 /* Connection reset by peer */
|
||||
#define ENOBUFS 105 /* No buffer space available */
|
||||
#define EISCONN 106 /* Transport endpoint is already connected */
|
||||
#define ENOTCONN 107 /* Transport endpoint is not connected */
|
||||
#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
|
||||
#define ETOOMANYREFS 109 /* Too many references: cannot splice */
|
||||
#define ETIMEDOUT 110 /* Connection timed out */
|
||||
#define ECONNREFUSED 111 /* Connection refused */
|
||||
#define EHOSTDOWN 112 /* Host is down */
|
||||
#define EHOSTUNREACH 113 /* No route to host */
|
||||
#define EALREADY 114 /* Operation already in progress */
|
||||
#define EINPROGRESS 115 /* Operation now in progress */
|
||||
#define ESTALE 116 /* Stale NFS file handle */
|
||||
#define EUCLEAN 117 /* Structure needs cleaning */
|
||||
#define ENOTNAM 118 /* Not a XENIX named type file */
|
||||
#define ENAVAIL 119 /* No XENIX semaphores available */
|
||||
#define EISNAM 120 /* Is a named type file */
|
||||
#define EREMOTEIO 121 /* Remote I/O error */
|
||||
#define EDQUOT 122 /* Quota exceeded */
|
||||
|
||||
#define ENOMEDIUM 123 /* No medium found */
|
||||
#define EMEDIUMTYPE 124 /* Wrong medium type */
|
||||
#define ECANCELED 125 /* Operation Canceled */
|
||||
#define ENOKEY 126 /* Required key not available */
|
||||
#define EKEYEXPIRED 127 /* Key has expired */
|
||||
#define EKEYREVOKED 128 /* Key has been revoked */
|
||||
#define EKEYREJECTED 129 /* Key was rejected by service */
|
||||
|
||||
/* for robust mutexes */
|
||||
#define EOWNERDEAD 130 /* Owner died */
|
||||
#define ENOTRECOVERABLE 131 /* State not recoverable */
|
||||
|
||||
#define ERFKILL 132 /* Operation not possible due to RF-kill */
|
||||
|
||||
#endif
|
||||
180
arch/arm64/kernel/include/esr.h
Normal file
180
arch/arm64/kernel/include/esr.h
Normal file
@ -0,0 +1,180 @@
|
||||
/* esr.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
/*
|
||||
* Copyright (C) 2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ESR_H
|
||||
#define __ASM_ESR_H
|
||||
|
||||
#include <const.h>
|
||||
|
||||
#define ESR_ELx_EC_UNKNOWN (0x00)
|
||||
#define ESR_ELx_EC_WFx (0x01)
|
||||
/* Unallocated EC: 0x02 */
|
||||
#define ESR_ELx_EC_CP15_32 (0x03)
|
||||
#define ESR_ELx_EC_CP15_64 (0x04)
|
||||
#define ESR_ELx_EC_CP14_MR (0x05)
|
||||
#define ESR_ELx_EC_CP14_LS (0x06)
|
||||
#define ESR_ELx_EC_FP_ASIMD (0x07)
|
||||
#define ESR_ELx_EC_CP10_ID (0x08)
|
||||
/* Unallocated EC: 0x09 - 0x0B */
|
||||
#define ESR_ELx_EC_CP14_64 (0x0C)
|
||||
/* Unallocated EC: 0x0d */
|
||||
#define ESR_ELx_EC_ILL (0x0E)
|
||||
/* Unallocated EC: 0x0F - 0x10 */
|
||||
#define ESR_ELx_EC_SVC32 (0x11)
|
||||
#define ESR_ELx_EC_HVC32 (0x12)
|
||||
#define ESR_ELx_EC_SMC32 (0x13)
|
||||
/* Unallocated EC: 0x14 */
|
||||
#define ESR_ELx_EC_SVC64 (0x15)
|
||||
#define ESR_ELx_EC_HVC64 (0x16)
|
||||
#define ESR_ELx_EC_SMC64 (0x17)
|
||||
#define ESR_ELx_EC_SYS64 (0x18)
|
||||
#define ESR_ELx_EC_SVE (0x19)
|
||||
/* Unallocated EC: 0x1A - 0x1E */
|
||||
#define ESR_ELx_EC_IMP_DEF (0x1f)
|
||||
#define ESR_ELx_EC_IABT_LOW (0x20)
|
||||
#define ESR_ELx_EC_IABT_CUR (0x21)
|
||||
#define ESR_ELx_EC_PC_ALIGN (0x22)
|
||||
/* Unallocated EC: 0x23 */
|
||||
#define ESR_ELx_EC_DABT_LOW (0x24)
|
||||
#define ESR_ELx_EC_DABT_CUR (0x25)
|
||||
#define ESR_ELx_EC_SP_ALIGN (0x26)
|
||||
/* Unallocated EC: 0x27 */
|
||||
#define ESR_ELx_EC_FP_EXC32 (0x28)
|
||||
/* Unallocated EC: 0x29 - 0x2B */
|
||||
#define ESR_ELx_EC_FP_EXC64 (0x2C)
|
||||
/* Unallocated EC: 0x2D - 0x2E */
|
||||
#define ESR_ELx_EC_SERROR (0x2F)
|
||||
#define ESR_ELx_EC_BREAKPT_LOW (0x30)
|
||||
#define ESR_ELx_EC_BREAKPT_CUR (0x31)
|
||||
#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
|
||||
#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
|
||||
#define ESR_ELx_EC_WATCHPT_LOW (0x34)
|
||||
#define ESR_ELx_EC_WATCHPT_CUR (0x35)
|
||||
/* Unallocated EC: 0x36 - 0x37 */
|
||||
#define ESR_ELx_EC_BKPT32 (0x38)
|
||||
/* Unallocated EC: 0x39 */
|
||||
#define ESR_ELx_EC_VECTOR32 (0x3A)
|
||||
/* Unallocted EC: 0x3B */
|
||||
#define ESR_ELx_EC_BRK64 (0x3C)
|
||||
/* Unallocated EC: 0x3D - 0x3F */
|
||||
#define ESR_ELx_EC_MAX (0x3F)
|
||||
|
||||
#define ESR_ELx_EC_SHIFT (26)
|
||||
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
|
||||
#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
|
||||
|
||||
#define ESR_ELx_IL (UL(1) << 25)
|
||||
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
|
||||
|
||||
/* ISS field definitions shared by different classes */
|
||||
#define ESR_ELx_WNR (UL(1) << 6)
|
||||
|
||||
/* Shared ISS field definitions for Data/Instruction aborts */
|
||||
#define ESR_ELx_EA (UL(1) << 9)
|
||||
#define ESR_ELx_S1PTW (UL(1) << 7)
|
||||
|
||||
/* Shared ISS fault status code(IFSC/DFSC) for Data/Instruction aborts */
|
||||
#define ESR_ELx_FSC (0x3F)
|
||||
#define ESR_ELx_FSC_TYPE (0x3C)
|
||||
#define ESR_ELx_FSC_EXTABT (0x10)
|
||||
#define ESR_ELx_FSC_ACCESS (0x08)
|
||||
#define ESR_ELx_FSC_FAULT (0x04)
|
||||
#define ESR_ELx_FSC_PERM (0x0C)
|
||||
|
||||
/* ISS field definitions for Data Aborts */
|
||||
#define ESR_ELx_ISV (UL(1) << 24)
|
||||
#define ESR_ELx_SAS_SHIFT (22)
|
||||
#define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
|
||||
#define ESR_ELx_SSE (UL(1) << 21)
|
||||
#define ESR_ELx_SRT_SHIFT (16)
|
||||
#define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
|
||||
#define ESR_ELx_SF (UL(1) << 15)
|
||||
#define ESR_ELx_AR (UL(1) << 14)
|
||||
#define ESR_ELx_CM (UL(1) << 8)
|
||||
|
||||
/* ISS field definitions for exceptions taken in to Hyp */
|
||||
#define ESR_ELx_CV (UL(1) << 24)
|
||||
#define ESR_ELx_COND_SHIFT (20)
|
||||
#define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
|
||||
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
|
||||
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
|
||||
|
||||
/* ESR value templates for specific events */
|
||||
|
||||
/* BRK instruction trap from AArch64 state */
|
||||
#define ESR_ELx_VAL_BRK64(imm) \
|
||||
((ESR_ELx_EC_BRK64 << ESR_ELx_EC_SHIFT) | ESR_ELx_IL | \
|
||||
((imm) & 0xffff))
|
||||
|
||||
/* ISS field definitions for System instruction traps */
|
||||
#define ESR_ELx_SYS64_ISS_RES0_SHIFT 22
|
||||
#define ESR_ELx_SYS64_ISS_RES0_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_RES0_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_DIR_MASK 0x1
|
||||
#define ESR_ELx_SYS64_ISS_DIR_READ 0x1
|
||||
#define ESR_ELx_SYS64_ISS_DIR_WRITE 0x0
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_RT_SHIFT 5
|
||||
#define ESR_ELx_SYS64_ISS_RT_MASK (UL(0x1f) << ESR_ELx_SYS64_ISS_RT_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_CRM_SHIFT 1
|
||||
#define ESR_ELx_SYS64_ISS_CRM_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRM_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_CRN_SHIFT 10
|
||||
#define ESR_ELx_SYS64_ISS_CRN_MASK (UL(0xf) << ESR_ELx_SYS64_ISS_CRN_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_OP1_SHIFT 14
|
||||
#define ESR_ELx_SYS64_ISS_OP1_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP1_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_OP2_SHIFT 17
|
||||
#define ESR_ELx_SYS64_ISS_OP2_MASK (UL(0x7) << ESR_ELx_SYS64_ISS_OP2_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_OP0_SHIFT 20
|
||||
#define ESR_ELx_SYS64_ISS_OP0_MASK (UL(0x3) << ESR_ELx_SYS64_ISS_OP0_SHIFT)
|
||||
#define ESR_ELx_SYS64_ISS_SYS_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP1_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP2_MASK | \
|
||||
ESR_ELx_SYS64_ISS_CRN_MASK | \
|
||||
ESR_ELx_SYS64_ISS_CRM_MASK)
|
||||
#define ESR_ELx_SYS64_ISS_SYS_VAL(op0, op1, op2, crn, crm) \
|
||||
(((op0) << ESR_ELx_SYS64_ISS_OP0_SHIFT) | \
|
||||
((op1) << ESR_ELx_SYS64_ISS_OP1_SHIFT) | \
|
||||
((op2) << ESR_ELx_SYS64_ISS_OP2_SHIFT) | \
|
||||
((crn) << ESR_ELx_SYS64_ISS_CRN_SHIFT) | \
|
||||
((crm) << ESR_ELx_SYS64_ISS_CRM_SHIFT))
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_SYS_OP_MASK (ESR_ELx_SYS64_ISS_SYS_MASK | \
|
||||
ESR_ELx_SYS64_ISS_DIR_MASK)
|
||||
/*
|
||||
* User space cache operations have the following sysreg encoding
|
||||
* in System instructions.
|
||||
* op0=1, op1=3, op2=1, crn=7, crm={ 5, 10, 11, 14 }, WRITE (L=0)
|
||||
*/
|
||||
#define ESR_ELx_SYS64_ISS_CRM_DC_CIVAC 14
|
||||
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAU 11
|
||||
#define ESR_ELx_SYS64_ISS_CRM_DC_CVAC 10
|
||||
#define ESR_ELx_SYS64_ISS_CRM_IC_IVAU 5
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK (ESR_ELx_SYS64_ISS_OP0_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP1_MASK | \
|
||||
ESR_ELx_SYS64_ISS_OP2_MASK | \
|
||||
ESR_ELx_SYS64_ISS_CRN_MASK | \
|
||||
ESR_ELx_SYS64_ISS_DIR_MASK)
|
||||
#define ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL \
|
||||
(ESR_ELx_SYS64_ISS_SYS_VAL(1, 3, 1, 7, 0) | \
|
||||
ESR_ELx_SYS64_ISS_DIR_WRITE)
|
||||
|
||||
#define ESR_ELx_SYS64_ISS_SYS_CTR ESR_ELx_SYS64_ISS_SYS_VAL(3, 3, 1, 0, 0)
|
||||
#define ESR_ELx_SYS64_ISS_SYS_CTR_READ (ESR_ELx_SYS64_ISS_SYS_CTR | \
|
||||
ESR_ELx_SYS64_ISS_DIR_READ)
|
||||
|
||||
#endif /* __ASM_ESR_H */
|
||||
102
arch/arm64/kernel/include/fpsimd.h
Normal file
102
arch/arm64/kernel/include/fpsimd.h
Normal file
@ -0,0 +1,102 @@
|
||||
/* fpsimd.h COPYRIGHT FUJITSU LIMITED 2016-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_FPSIMD_H
|
||||
#define __HEADER_ARM64_COMMON_FPSIMD_H
|
||||
|
||||
#include <ptrace.h>
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* FP/SIMD storage area has:
|
||||
* - FPSR and FPCR
|
||||
* - 32 128-bit data registers
|
||||
*
|
||||
* Note that user_fpsimd forms a prefix of this structure, which is
|
||||
* relied upon in the ptrace FP/SIMD accessors.
|
||||
*/
|
||||
/* @ref.impl arch/arm64/include/asm/fpsimd.h::struct fpsimd_state */
|
||||
struct fpsimd_state {
|
||||
union {
|
||||
struct user_fpsimd_state user_fpsimd;
|
||||
struct {
|
||||
__uint128_t vregs[32];
|
||||
unsigned int fpsr;
|
||||
unsigned int fpcr;
|
||||
/*
|
||||
* For ptrace compatibility, pad to next 128-bit
|
||||
* boundary here if extending this struct.
|
||||
*/
|
||||
};
|
||||
};
|
||||
/* the id of the last cpu to have restored this state */
|
||||
unsigned int cpu;
|
||||
};
|
||||
|
||||
/* need for struct process */
|
||||
typedef struct fpsimd_state fp_regs_struct;
|
||||
|
||||
extern void thread_fpsimd_to_sve(struct thread *thread, fp_regs_struct *fp_regs);
|
||||
extern void thread_sve_to_fpsimd(struct thread *thread, fp_regs_struct *fp_regs);
|
||||
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
|
||||
extern size_t sve_state_size(struct thread const *thread);
|
||||
extern void sve_free(struct thread *thread);
|
||||
extern int sve_alloc(struct thread *thread);
|
||||
extern void sve_save_state(void *state, unsigned int *pfpsr);
|
||||
extern void sve_load_state(void const *state, unsigned int const *pfpsr, unsigned long vq_minus_1);
|
||||
extern unsigned int sve_get_vl(void);
|
||||
extern int sve_set_thread_vl(unsigned long arg);
|
||||
extern int sve_get_thread_vl(void);
|
||||
extern int sve_set_vector_length(struct thread *thread, unsigned long vl, unsigned long flags);
|
||||
|
||||
#define SVE_SET_VL(arg) sve_set_thread_vl(arg)
|
||||
#define SVE_GET_VL() sve_get_thread_vl()
|
||||
|
||||
/* Maximum VL that SVE VL-agnostic software can transparently support */
|
||||
#define SVE_VL_ARCH_MAX 0x100
|
||||
|
||||
#else /* CONFIG_ARM64_SVE */
|
||||
|
||||
#include <ihk/debug.h>
|
||||
#include <errno.h>
|
||||
|
||||
static void sve_save_state(void *state, unsigned int *pfpsr)
|
||||
{
|
||||
panic("PANIC:sve_save_state() was called CONFIG_ARM64_SVE off.\n");
|
||||
}
|
||||
|
||||
static void sve_load_state(void const *state, unsigned int const *pfpsr, unsigned long vq_minus_1)
|
||||
{
|
||||
panic("PANIC:sve_load_state() was called CONFIG_ARM64_SVE off.\n");
|
||||
}
|
||||
|
||||
static unsigned int sve_get_vl(void)
|
||||
{
|
||||
panic("PANIC:sve_get_vl() was called CONFIG_ARM64_SVE off.\n");
|
||||
return (unsigned int)-1;
|
||||
}
|
||||
|
||||
static int sve_set_vector_length(struct thread *thread, unsigned long vl, unsigned long flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* for prctl syscall */
|
||||
#define SVE_SET_VL(a) (-EINVAL)
|
||||
#define SVE_GET_VL() (-EINVAL)
|
||||
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
|
||||
extern void sve_setup(void);
|
||||
extern void fpsimd_save_state(struct fpsimd_state *state);
|
||||
extern void fpsimd_load_state(struct fpsimd_state *state);
|
||||
extern void thread_fpsimd_save(struct thread *thread);
|
||||
extern void thread_fpsimd_load(struct thread *thread);
|
||||
|
||||
extern int sve_max_vl;
|
||||
extern int sve_default_vl;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_FPSIMD_H */
|
||||
151
arch/arm64/kernel/include/fpsimdmacros.h
Normal file
151
arch/arm64/kernel/include/fpsimdmacros.h
Normal file
@ -0,0 +1,151 @@
|
||||
/* fpsimdmacros.h COPYRIGHT FUJITSU LIMITED 2016-2017 */
|
||||
|
||||
.macro _check_reg nr
|
||||
.if (\nr) < 0 || (\nr) > 31
|
||||
.error "Bad register number \nr."
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro _check_zreg znr
|
||||
.if (\znr) < 0 || (\znr) > 31
|
||||
.error "Bad Scalable Vector Extension vector register number \znr."
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro _check_preg pnr
|
||||
.if (\pnr) < 0 || (\pnr) > 15
|
||||
.error "Bad Scalable Vector Extension predicate register number \pnr."
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro _check_num n, min, max
|
||||
.if (\n) < (\min) || (\n) > (\max)
|
||||
.error "Number \n out of range [\min,\max]"
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro _zstrv znt, nspb, ioff=0
|
||||
_check_zreg \znt
|
||||
_check_reg \nspb
|
||||
_check_num (\ioff), -0x100, 0xff
|
||||
.inst 0xe5804000 \
|
||||
| (\znt) \
|
||||
| ((\nspb) << 5) \
|
||||
| (((\ioff) & 7) << 10) \
|
||||
| (((\ioff) & 0x1f8) << 13)
|
||||
.endm
|
||||
|
||||
.macro _zldrv znt, nspb, ioff=0
|
||||
_check_zreg \znt
|
||||
_check_reg \nspb
|
||||
_check_num (\ioff), -0x100, 0xff
|
||||
.inst 0x85804000 \
|
||||
| (\znt) \
|
||||
| ((\nspb) << 5) \
|
||||
| (((\ioff) & 7) << 10) \
|
||||
| (((\ioff) & 0x1f8) << 13)
|
||||
.endm
|
||||
|
||||
.macro _zstrp pnt, nspb, ioff=0
|
||||
_check_preg \pnt
|
||||
_check_reg \nspb
|
||||
_check_num (\ioff), -0x100, 0xff
|
||||
.inst 0xe5800000 \
|
||||
| (\pnt) \
|
||||
| ((\nspb) << 5) \
|
||||
| (((\ioff) & 7) << 10) \
|
||||
| (((\ioff) & 0x1f8) << 13)
|
||||
.endm
|
||||
|
||||
.macro _zldrp pnt, nspb, ioff=0
|
||||
_check_preg \pnt
|
||||
_check_reg \nspb
|
||||
_check_num (\ioff), -0x100, 0xff
|
||||
.inst 0x85800000 \
|
||||
| (\pnt) \
|
||||
| ((\nspb) << 5) \
|
||||
| (((\ioff) & 7) << 10) \
|
||||
| (((\ioff) & 0x1f8) << 13)
|
||||
.endm
|
||||
|
||||
.macro _zrdvl nspd, is1
|
||||
_check_reg \nspd
|
||||
_check_num (\is1), -0x20, 0x1f
|
||||
.inst 0x04bf5000 \
|
||||
| (\nspd) \
|
||||
| (((\is1) & 0x3f) << 5)
|
||||
.endm
|
||||
|
||||
.macro _zrdffr pnd
|
||||
_check_preg \pnd
|
||||
.inst 0x2519f000 \
|
||||
| (\pnd)
|
||||
.endm
|
||||
|
||||
.macro _zwrffr pnd
|
||||
_check_preg \pnd
|
||||
.inst 0x25289000 \
|
||||
| ((\pnd) << 5)
|
||||
.endm
|
||||
|
||||
.macro for from, to, insn
|
||||
.if (\from) >= (\to)
|
||||
\insn (\from)
|
||||
.exitm
|
||||
.endif
|
||||
|
||||
for \from, ((\from) + (\to)) / 2, \insn
|
||||
for ((\from) + (\to)) / 2 + 1, \to, \insn
|
||||
.endm
|
||||
|
||||
.macro sve_save nb, xpfpsr, ntmp
|
||||
.macro savez n
|
||||
_zstrv \n, \nb, (\n) - 34
|
||||
.endm
|
||||
|
||||
.macro savep n
|
||||
_zstrp \n, \nb, (\n) - 16
|
||||
.endm
|
||||
|
||||
for 0, 31, savez
|
||||
for 0, 15, savep
|
||||
_zrdffr 0
|
||||
_zstrp 0, \nb
|
||||
_zldrp 0, \nb, -16
|
||||
|
||||
mrs x\ntmp, fpsr
|
||||
str w\ntmp, [\xpfpsr]
|
||||
mrs x\ntmp, fpcr
|
||||
str w\ntmp, [\xpfpsr, #4]
|
||||
|
||||
.purgem savez
|
||||
.purgem savep
|
||||
.endm
|
||||
|
||||
.macro sve_load nb, xpfpsr, xvqminus1 ntmp
|
||||
mrs_s x\ntmp, SYS_ZCR_EL1
|
||||
bic x\ntmp, x\ntmp, ZCR_EL1_LEN_MASK
|
||||
orr x\ntmp, x\ntmp, \xvqminus1
|
||||
msr_s SYS_ZCR_EL1, x\ntmp // self-synchronising
|
||||
|
||||
.macro loadz n
|
||||
_zldrv \n, \nb, (\n) - 34
|
||||
.endm
|
||||
|
||||
.macro loadp n
|
||||
_zldrp \n, \nb, (\n) - 16
|
||||
.endm
|
||||
|
||||
for 0, 31, loadz
|
||||
_zldrp 0, \nb
|
||||
_zwrffr 0
|
||||
for 0, 15, loadp
|
||||
|
||||
ldr w\ntmp, [\xpfpsr]
|
||||
msr fpsr, x\ntmp
|
||||
ldr w\ntmp, [\xpfpsr, #4]
|
||||
msr fpcr, x\ntmp
|
||||
|
||||
.purgem loadz
|
||||
.purgem loadp
|
||||
.endm
|
||||
92
arch/arm64/kernel/include/hw_breakpoint.h
Normal file
92
arch/arm64/kernel/include/hw_breakpoint.h
Normal file
@ -0,0 +1,92 @@
|
||||
/* hw_breakpoint.h COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
#ifndef __HEADER_ARM64_COMMON_HW_BREAKPOINT_H
|
||||
#define __HEADER_ARM64_COMMON_HW_BREAKPOINT_H
|
||||
|
||||
#include <ihk/types.h>
|
||||
|
||||
int hw_breakpoint_slots(int type);
|
||||
unsigned long read_wb_reg(int reg, int n);
|
||||
void write_wb_reg(int reg, int n, unsigned long val);
|
||||
void hw_breakpoint_reset(void);
|
||||
void arch_hw_breakpoint_init(void);
|
||||
|
||||
struct user_hwdebug_state;
|
||||
int arch_validate_hwbkpt_settings(long note_type, struct user_hwdebug_state *hws, size_t len);
|
||||
|
||||
extern int core_num_brps;
|
||||
extern int core_num_wrps;
|
||||
|
||||
/* @ref.impl include/uapi/linux/hw_breakpoint.h::HW_BREAKPOINT_LEN_n, HW_BREAKPOINT_xxx, bp_type_idx */
|
||||
enum {
|
||||
HW_BREAKPOINT_LEN_1 = 1,
|
||||
HW_BREAKPOINT_LEN_2 = 2,
|
||||
HW_BREAKPOINT_LEN_4 = 4,
|
||||
HW_BREAKPOINT_LEN_8 = 8,
|
||||
};
|
||||
|
||||
enum {
|
||||
HW_BREAKPOINT_EMPTY = 0,
|
||||
HW_BREAKPOINT_R = 1,
|
||||
HW_BREAKPOINT_W = 2,
|
||||
HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
|
||||
HW_BREAKPOINT_X = 4,
|
||||
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
|
||||
};
|
||||
|
||||
enum bp_type_idx {
|
||||
TYPE_INST = 0,
|
||||
TYPE_DATA = 1,
|
||||
TYPE_MAX
|
||||
};
|
||||
|
||||
/* Breakpoint */
|
||||
#define ARM_BREAKPOINT_EXECUTE 0
|
||||
|
||||
/* Watchpoints */
|
||||
#define ARM_BREAKPOINT_LOAD 1
|
||||
#define ARM_BREAKPOINT_STORE 2
|
||||
#define AARCH64_ESR_ACCESS_MASK (1 << 6)
|
||||
|
||||
/* Privilege Levels */
|
||||
#define AARCH64_BREAKPOINT_EL1 1
|
||||
#define AARCH64_BREAKPOINT_EL0 2
|
||||
|
||||
/* Lengths */
|
||||
#define ARM_BREAKPOINT_LEN_1 0x1
|
||||
#define ARM_BREAKPOINT_LEN_2 0x3
|
||||
#define ARM_BREAKPOINT_LEN_4 0xf
|
||||
#define ARM_BREAKPOINT_LEN_8 0xff
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/hw_breakpoint.h::ARM_MAX_[BRP|WRP] */
|
||||
/*
|
||||
* Limits.
|
||||
* Changing these will require modifications to the register accessors.
|
||||
*/
|
||||
#define ARM_MAX_BRP 16
|
||||
#define ARM_MAX_WRP 16
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/hw_breakpoint.h::AARCH64_DBG_REG_xxx */
|
||||
/* Virtual debug register bases. */
|
||||
#define AARCH64_DBG_REG_BVR 0
|
||||
#define AARCH64_DBG_REG_BCR (AARCH64_DBG_REG_BVR + ARM_MAX_BRP)
|
||||
#define AARCH64_DBG_REG_WVR (AARCH64_DBG_REG_BCR + ARM_MAX_BRP)
|
||||
#define AARCH64_DBG_REG_WCR (AARCH64_DBG_REG_WVR + ARM_MAX_WRP)
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/hw_breakpoint.h::AARCH64_DBG_REG_NAME_xxx */
|
||||
/* Debug register names. */
|
||||
#define AARCH64_DBG_REG_NAME_BVR "bvr"
|
||||
#define AARCH64_DBG_REG_NAME_BCR "bcr"
|
||||
#define AARCH64_DBG_REG_NAME_WVR "wvr"
|
||||
#define AARCH64_DBG_REG_NAME_WCR "wcr"
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/hw_breakpoint.h::AARCH64_DBG_[READ|WRITE] */
|
||||
/* Accessor macros for the debug registers. */
|
||||
#define AARCH64_DBG_READ(N, REG, VAL) do {\
|
||||
asm volatile("mrs %0, dbg" REG #N "_el1" : "=r" (VAL));\
|
||||
} while (0)
|
||||
|
||||
#define AARCH64_DBG_WRITE(N, REG, VAL) do {\
|
||||
asm volatile("msr dbg" REG #N "_el1, %0" :: "r" (VAL));\
|
||||
} while (0)
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_HW_BREAKPOINT_H */
|
||||
26
arch/arm64/kernel/include/hwcap.h
Normal file
26
arch/arm64/kernel/include/hwcap.h
Normal file
@ -0,0 +1,26 @@
|
||||
/* hwcap.h COPYRIGHT FUJITSU LIMITED 2017 */
|
||||
#ifndef _UAPI__ASM_HWCAP_H
|
||||
#define _UAPI__ASM_HWCAP_H
|
||||
|
||||
/*
|
||||
* HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
|
||||
*/
|
||||
#define HWCAP_FP (1 << 0)
|
||||
#define HWCAP_ASIMD (1 << 1)
|
||||
#define HWCAP_EVTSTRM (1 << 2)
|
||||
#define HWCAP_AES (1 << 3)
|
||||
#define HWCAP_PMULL (1 << 4)
|
||||
#define HWCAP_SHA1 (1 << 5)
|
||||
#define HWCAP_SHA2 (1 << 6)
|
||||
#define HWCAP_CRC32 (1 << 7)
|
||||
#define HWCAP_ATOMICS (1 << 8)
|
||||
#define HWCAP_FPHP (1 << 9)
|
||||
#define HWCAP_ASIMDHP (1 << 10)
|
||||
#define HWCAP_CPUID (1 << 11)
|
||||
#define HWCAP_ASIMDRDM (1 << 12)
|
||||
#define HWCAP_SVE (1 << 13)
|
||||
|
||||
unsigned long arch_get_hwcap(void);
|
||||
extern unsigned long elf_hwcap;
|
||||
|
||||
#endif /* _UAPI__ASM_HWCAP_H */
|
||||
365
arch/arm64/kernel/include/ihk/atomic.h
Normal file
365
arch/arm64/kernel/include/ihk/atomic.h
Normal file
@ -0,0 +1,365 @@
|
||||
/* atomic.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
#ifndef __HEADER_ARM64_IHK_ATOMIC_H
|
||||
#define __HEADER_ARM64_IHK_ATOMIC_H
|
||||
|
||||
#include <arch/cpu.h>
|
||||
|
||||
/***********************************************************************
|
||||
* ihk_atomic_t
|
||||
*/
|
||||
|
||||
typedef struct {
|
||||
int counter;
|
||||
} ihk_atomic_t;
|
||||
|
||||
#define IHK_ATOMIC_INIT(i) { (i) }
|
||||
|
||||
static inline int ihk_atomic_read(const ihk_atomic_t *v)
|
||||
{
|
||||
return (*(volatile int *)&(v)->counter);
|
||||
}
|
||||
|
||||
static inline void ihk_atomic_set(ihk_atomic_t *v, int i)
|
||||
{
|
||||
v->counter = i;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_add (atomic_##op) */
|
||||
static inline void ihk_atomic_add(int i, ihk_atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
asm volatile("// atomic_add\n"
|
||||
"1: ldxr %w0, %2\n"
|
||||
" add %w0, %w0, %w3\n"
|
||||
" stxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
: "Ir" (i));
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_sub (atomic_##op) */
|
||||
static inline void ihk_atomic_sub(int i, ihk_atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
asm volatile("// atomic_sub\n"
|
||||
"1: ldxr %w0, %2\n"
|
||||
" sub %w0, %w0, %w3\n"
|
||||
" stxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
: "Ir" (i));
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_inc */
|
||||
#define ihk_atomic_inc(v) ihk_atomic_add(1, v)
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_dec */
|
||||
#define ihk_atomic_dec(v) ihk_atomic_sub(1, v)
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_add_return (atomic_##op##_return) */
|
||||
static inline int ihk_atomic_add_return(int i, ihk_atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
asm volatile("// atomic_add_return\n"
|
||||
"1: ldxr %w0, %2\n"
|
||||
" add %w0, %w0, %w3\n"
|
||||
" stlxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
: "Ir" (i)
|
||||
: "memory");
|
||||
|
||||
smp_mb();
|
||||
return result;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_sub_return (atomic_##op##_return) */
|
||||
static inline int ihk_atomic_sub_return(int i, ihk_atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
asm volatile("// atomic_sub_return\n"
|
||||
"1: ldxr %w0, %2\n"
|
||||
" sub %w0, %w0, %w3\n"
|
||||
" stlxr %w1, %w0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
||||
: "Ir" (i)
|
||||
: "memory");
|
||||
|
||||
smp_mb();
|
||||
return result;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_inc_and_test */
|
||||
#define ihk_atomic_inc_and_test(v) (ihk_atomic_add_return(1, v) == 0)
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_dec_and_test */
|
||||
#define ihk_atomic_dec_and_test(v) (ihk_atomic_sub_return(1, v) == 0)
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_inc_return */
|
||||
#define ihk_atomic_inc_return(v) (ihk_atomic_add_return(1, v))
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic_dec_return */
|
||||
#define ihk_atomic_dec_return(v) (ihk_atomic_sub_return(1, v))
|
||||
|
||||
/***********************************************************************
|
||||
* ihk_atomic64_t
|
||||
*/
|
||||
typedef struct {
|
||||
long counter64;
|
||||
} ihk_atomic64_t;
|
||||
|
||||
#define IHK_ATOMIC64_INIT(i) { .counter64 = (i) }
|
||||
|
||||
static inline long ihk_atomic64_read(const ihk_atomic64_t *v)
|
||||
{
|
||||
return *(volatile long *)&(v)->counter64;
|
||||
}
|
||||
|
||||
static inline void ihk_atomic64_set(ihk_atomic64_t *v, long i)
|
||||
{
|
||||
v->counter64 = i;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic64_add (atomic64_##op) */
|
||||
static inline void ihk_atomic64_add(long i, ihk_atomic64_t *v)
|
||||
{
|
||||
long result;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile("// atomic64_add\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" add %0, %0, %3\n"
|
||||
" stxr %w1, %0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter64)
|
||||
: "Ir" (i));
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/atomic.h::atomic64_inc */
|
||||
#define ihk_atomic64_inc(v) ihk_atomic64_add(1LL, (v))
|
||||
|
||||
#define ihk_atomic64_cmpxchg(p, o, n) cmpxchg(&((p)->counter64), o, n)
|
||||
|
||||
/***********************************************************************
|
||||
* others
|
||||
*/
|
||||
/* @ref.impl arch/arm64/include/asm/cmpxchg.h::__xchg */
|
||||
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
|
||||
{
|
||||
unsigned long ret = 0, tmp;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
asm volatile("// __xchg1\n"
|
||||
"1: ldxrb %w0, %2\n"
|
||||
" stlxrb %w1, %w3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned char *)ptr)
|
||||
: "r" (x)
|
||||
: "memory");
|
||||
break;
|
||||
case 2:
|
||||
asm volatile("// __xchg2\n"
|
||||
"1: ldxrh %w0, %2\n"
|
||||
" stlxrh %w1, %w3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned short *)ptr)
|
||||
: "r" (x)
|
||||
: "memory");
|
||||
break;
|
||||
case 4:
|
||||
asm volatile("// __xchg4\n"
|
||||
"1: ldxr %w0, %2\n"
|
||||
" stlxr %w1, %w3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned int *)ptr)
|
||||
: "r" (x)
|
||||
: "memory");
|
||||
break;
|
||||
case 8:
|
||||
asm volatile("// __xchg8\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" stlxr %w1, %3, %2\n"
|
||||
" cbnz %w1, 1b\n"
|
||||
: "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr)
|
||||
: "r" (x)
|
||||
: "memory");
|
||||
break;
|
||||
/*
|
||||
default:
|
||||
BUILD_BUG();
|
||||
*/
|
||||
}
|
||||
|
||||
smp_mb();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cmpxchg.h::xchg */
|
||||
#define xchg(ptr,x) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define xchg4(ptr, x) xchg(ptr,x)
|
||||
#define xchg8(ptr, x) xchg(ptr,x)
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cmpxchg.h::__cmpxchg */
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long oldval = 0, res;
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
do {
|
||||
asm volatile("// __cmpxchg1\n"
|
||||
" ldxrb %w1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxrb %w0, %w4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(unsigned char *)ptr)
|
||||
: "Ir" (old), "r" (new) : "cc");
|
||||
} while (res);
|
||||
break;
|
||||
|
||||
case 2:
|
||||
do {
|
||||
asm volatile("// __cmpxchg2\n"
|
||||
" ldxrh %w1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxrh %w0, %w4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(unsigned short *)ptr)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
break;
|
||||
|
||||
case 4:
|
||||
do {
|
||||
asm volatile("// __cmpxchg4\n"
|
||||
" ldxr %w1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %w1, %w3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxr %w0, %w4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(unsigned int *)ptr)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
break;
|
||||
|
||||
case 8:
|
||||
do {
|
||||
asm volatile("// __cmpxchg8\n"
|
||||
" ldxr %1, %2\n"
|
||||
" mov %w0, #0\n"
|
||||
" cmp %1, %3\n"
|
||||
" b.ne 1f\n"
|
||||
" stxr %w0, %4, %2\n"
|
||||
"1:\n"
|
||||
: "=&r" (res), "=&r" (oldval), "+Q" (*(unsigned long *)ptr)
|
||||
: "Ir" (old), "r" (new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
break;
|
||||
/*
|
||||
default:
|
||||
BUILD_BUG();
|
||||
*/
|
||||
}
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cmpxchg.h::__cmpxchg_mb */
|
||||
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/cmpxchg.h::cmpxchg */
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) __ret; \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__cmpxchg_mb((ptr), (unsigned long)(o), (unsigned long)(n), \
|
||||
sizeof(*(ptr))); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define atomic_cmpxchg4(ptr, o, n) cmpxchg(ptr,o,n)
|
||||
#define atomic_cmpxchg8(ptr, o, n) cmpxchg(ptr,o,n)
|
||||
|
||||
static inline void ihk_atomic_add_long(long i, long *v)
|
||||
{
|
||||
long result;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile("// atomic64_add\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" add %0, %0, %3\n"
|
||||
" stxr %w1, %0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (*v)
|
||||
: "Ir" (i));
|
||||
}
|
||||
|
||||
static inline void ihk_atomic_add_ulong(long i, unsigned long *v)
|
||||
{
|
||||
long result;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile("// atomic64_add\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" add %0, %0, %3\n"
|
||||
" stxr %w1, %0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (*v)
|
||||
: "Ir" (i));
|
||||
}
|
||||
|
||||
static inline unsigned long ihk_atomic_add_long_return(long i, long *v)
|
||||
{
|
||||
unsigned long result;
|
||||
unsigned long tmp;
|
||||
|
||||
asm volatile("// atomic64_add_return\n"
|
||||
"1: ldxr %0, %2\n"
|
||||
" add %0, %0, %3\n"
|
||||
" stlxr %w1, %0, %2\n"
|
||||
" cbnz %w1, 1b"
|
||||
: "=&r" (result), "=&r" (tmp), "+Q" (*v)
|
||||
: "Ir" (i)
|
||||
: "memory");
|
||||
|
||||
smp_mb();
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_IHK_ATOMIC_H */
|
||||
83
arch/arm64/kernel/include/ihk/context.h
Normal file
83
arch/arm64/kernel/include/ihk/context.h
Normal file
@ -0,0 +1,83 @@
|
||||
/* context.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#ifndef __HEADER_ARM64_IHK_CONTEXT_H
|
||||
#define __HEADER_ARM64_IHK_CONTEXT_H
|
||||
|
||||
#include <registers.h>
|
||||
|
||||
struct thread_info;
|
||||
typedef struct {
|
||||
struct thread_info *thread;
|
||||
} ihk_mc_kernel_context_t;
|
||||
|
||||
struct user_pt_regs {
|
||||
unsigned long regs[31];
|
||||
unsigned long sp;
|
||||
unsigned long pc;
|
||||
unsigned long pstate;
|
||||
};
|
||||
|
||||
struct pt_regs {
|
||||
union {
|
||||
struct user_pt_regs user_regs;
|
||||
struct {
|
||||
unsigned long regs[31];
|
||||
unsigned long sp;
|
||||
unsigned long pc;
|
||||
unsigned long pstate;
|
||||
};
|
||||
};
|
||||
unsigned long orig_x0;
|
||||
unsigned long orig_pc;
|
||||
unsigned long syscallno;
|
||||
unsigned long __padding;
|
||||
};
|
||||
|
||||
typedef struct pt_regs ihk_mc_user_context_t;
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/ptrace.h */
|
||||
#define GET_IP(regs) ((unsigned long)(regs)->pc)
|
||||
#define SET_IP(regs, value) ((regs)->pc = ((uint64_t) (value)))
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/ptrace.h */
|
||||
/* AArch32 CPSR bits */
|
||||
#define COMPAT_PSR_MODE_MASK 0x0000001f
|
||||
|
||||
/* @ref.impl include/asm-generic/ptrace.h */
|
||||
static inline unsigned long instruction_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return GET_IP(regs);
|
||||
}
|
||||
/* @ref.impl include/asm-generic/ptrace.h */
|
||||
static inline void instruction_pointer_set(struct pt_regs *regs,
|
||||
unsigned long val)
|
||||
{
|
||||
SET_IP(regs, val);
|
||||
}
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/ptrace.h */
|
||||
/*
|
||||
* Write a register given an architectural register index r.
|
||||
* This handles the common case where 31 means XZR, not SP.
|
||||
*/
|
||||
static inline void pt_regs_write_reg(struct pt_regs *regs, int r,
|
||||
unsigned long val)
|
||||
{
|
||||
if (r != 31)
|
||||
regs->regs[r] = val;
|
||||
}
|
||||
|
||||
/* temp */
|
||||
#define ihk_mc_syscall_arg0(uc) ((uc)->regs[0])
|
||||
#define ihk_mc_syscall_arg1(uc) ((uc)->regs[1])
|
||||
#define ihk_mc_syscall_arg2(uc) ((uc)->regs[2])
|
||||
#define ihk_mc_syscall_arg3(uc) ((uc)->regs[3])
|
||||
#define ihk_mc_syscall_arg4(uc) ((uc)->regs[4])
|
||||
#define ihk_mc_syscall_arg5(uc) ((uc)->regs[5])
|
||||
|
||||
#define ihk_mc_syscall_ret(uc) ((uc)->regs[0])
|
||||
#define ihk_mc_syscall_number(uc) ((uc)->regs[8])
|
||||
|
||||
#define ihk_mc_syscall_pc(uc) ((uc)->pc)
|
||||
#define ihk_mc_syscall_sp(uc) ((uc)->sp)
|
||||
|
||||
#endif /* !__HEADER_ARM64_IHK_CONTEXT_H */
|
||||
14
arch/arm64/kernel/include/ihk/ikc.h
Normal file
14
arch/arm64/kernel/include/ihk/ikc.h
Normal file
@ -0,0 +1,14 @@
|
||||
/* ikc.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_IHK_IKC_H
|
||||
#define __HEADER_ARM64_IHK_IKC_H
|
||||
|
||||
#include <ikc/ihk.h>
|
||||
|
||||
#define IKC_PORT_IKC2MCKERNEL 501
|
||||
#define IKC_PORT_IKC2LINUX 503
|
||||
|
||||
/* manycore side */
|
||||
int ihk_mc_ikc_init_first(struct ihk_ikc_channel_desc *,
|
||||
ihk_ikc_ph_t handler);
|
||||
|
||||
#endif /* !__HEADER_ARM64_IHK_IKC_H */
|
||||
33
arch/arm64/kernel/include/ihk/types.h
Normal file
33
arch/arm64/kernel/include/ihk/types.h
Normal file
@ -0,0 +1,33 @@
|
||||
/* types.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
#ifndef __HEADER_ARM64_IHK_TYPES_H
|
||||
#define __HEADER_ARM64_IHK_TYPES_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef unsigned char uint8_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef unsigned int uint32_t;
|
||||
typedef unsigned long long uint64_t;
|
||||
typedef signed char int8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef signed long long int64_t;
|
||||
|
||||
typedef int64_t ptrdiff_t;
|
||||
typedef int64_t intptr_t;
|
||||
typedef uint64_t uintptr_t;
|
||||
typedef uint64_t size_t;
|
||||
typedef int64_t ssize_t;
|
||||
typedef int64_t off_t;
|
||||
|
||||
typedef int32_t key_t;
|
||||
typedef uint32_t uid_t;
|
||||
typedef uint32_t gid_t;
|
||||
typedef int64_t time_t;
|
||||
typedef int32_t pid_t;
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define NULL ((void *)0)
|
||||
|
||||
#endif /* !__HEADER_ARM64_IHK_TYPES_H */
|
||||
103
arch/arm64/kernel/include/imp-sysreg.h
Normal file
103
arch/arm64/kernel/include/imp-sysreg.h
Normal file
@ -0,0 +1,103 @@
|
||||
/* imp-sysreg.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
#ifndef __ASM_IMP_SYSREG_H
|
||||
#define __ASM_IMP_SYSREG_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/* register sys_reg list */
|
||||
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1 sys_reg(3, 0, 11, 2, 0)
|
||||
#define IMP_SCCR_CTRL_EL1 sys_reg(3, 0, 11, 8, 0)
|
||||
#define IMP_SCCR_ASSIGN_EL1 sys_reg(3, 0, 11, 8, 1)
|
||||
#define IMP_SCCR_SET0_L2_EL1 sys_reg(3, 0, 15, 8, 2)
|
||||
#define IMP_SCCR_SET1_L2_EL1 sys_reg(3, 0, 15, 8, 3)
|
||||
#define IMP_SCCR_L1_EL0 sys_reg(3, 3, 11, 8, 2)
|
||||
#define IMP_PF_CTRL_EL1 sys_reg(3, 0, 11, 4, 0)
|
||||
#define IMP_PF_STREAM_DETECT_CTRL_EL0 sys_reg(3, 3, 11, 4, 0)
|
||||
#define IMP_PF_INJECTION_CTRL0_EL0 sys_reg(3, 3, 11, 6, 0)
|
||||
#define IMP_PF_INJECTION_CTRL1_EL0 sys_reg(3, 3, 11, 6, 1)
|
||||
#define IMP_PF_INJECTION_CTRL2_EL0 sys_reg(3, 3, 11, 6, 2)
|
||||
#define IMP_PF_INJECTION_CTRL3_EL0 sys_reg(3, 3, 11, 6, 3)
|
||||
#define IMP_PF_INJECTION_CTRL4_EL0 sys_reg(3, 3, 11, 6, 4)
|
||||
#define IMP_PF_INJECTION_CTRL5_EL0 sys_reg(3, 3, 11, 6, 5)
|
||||
#define IMP_PF_INJECTION_CTRL6_EL0 sys_reg(3, 3, 11, 6, 6)
|
||||
#define IMP_PF_INJECTION_CTRL7_EL0 sys_reg(3, 3, 11, 6, 7)
|
||||
#define IMP_PF_INJECTION_DISTANCE0_EL0 sys_reg(3, 3, 11, 7, 0)
|
||||
#define IMP_PF_INJECTION_DISTANCE1_EL0 sys_reg(3, 3, 11, 7, 1)
|
||||
#define IMP_PF_INJECTION_DISTANCE2_EL0 sys_reg(3, 3, 11, 7, 2)
|
||||
#define IMP_PF_INJECTION_DISTANCE3_EL0 sys_reg(3, 3, 11, 7, 3)
|
||||
#define IMP_PF_INJECTION_DISTANCE4_EL0 sys_reg(3, 3, 11, 7, 4)
|
||||
#define IMP_PF_INJECTION_DISTANCE5_EL0 sys_reg(3, 3, 11, 7, 5)
|
||||
#define IMP_PF_INJECTION_DISTANCE6_EL0 sys_reg(3, 3, 11, 7, 6)
|
||||
#define IMP_PF_INJECTION_DISTANCE7_EL0 sys_reg(3, 3, 11, 7, 7)
|
||||
#define IMP_PF_PMUSERENR_EL0 sys_reg(3, 3, 9, 14, 0)
|
||||
#define IMP_BARRIER_CTRL_EL1 sys_reg(3, 0, 11, 12, 0)
|
||||
#define IMP_BARRIER_BST_BIT_EL1 sys_reg(3, 0, 11, 12, 4)
|
||||
#define IMP_BARRIER_INIT_SYNC_BB0_EL1 sys_reg(3, 0, 15, 13, 0)
|
||||
#define IMP_BARRIER_INIT_SYNC_BB1_EL1 sys_reg(3, 0, 15, 13, 1)
|
||||
#define IMP_BARRIER_INIT_SYNC_BB2_EL1 sys_reg(3, 0, 15, 13, 2)
|
||||
#define IMP_BARRIER_INIT_SYNC_BB3_EL1 sys_reg(3, 0, 15, 13, 3)
|
||||
#define IMP_BARRIER_INIT_SYNC_BB4_EL1 sys_reg(3, 0, 15, 13, 4)
|
||||
#define IMP_BARRIER_INIT_SYNC_BB5_EL1 sys_reg(3, 0, 15, 13, 5)
|
||||
#define IMP_BARRIER_ASSIGN_SYNC_W0_EL1 sys_reg(3, 0, 15, 15, 0)
|
||||
#define IMP_BARRIER_ASSIGN_SYNC_W1_EL1 sys_reg(3, 0, 15, 15, 1)
|
||||
#define IMP_BARRIER_ASSIGN_SYNC_W2_EL1 sys_reg(3, 0, 15, 15, 2)
|
||||
#define IMP_BARRIER_ASSIGN_SYNC_W3_EL1 sys_reg(3, 0, 15, 15, 3)
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1 sys_reg(3, 0, 11, 0, 0)
|
||||
#define IMP_FJ_CORE_UARCH_CTRL_EL2 sys_reg(3, 4, 11, 0, 4)
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1 sys_reg(3, 0, 11, 0, 5)
|
||||
|
||||
/* macros */
|
||||
#define PWR_REG_MASK(reg, feild) (((UL(1) << ((reg##_##feild##_MSB) - (reg##_##feild##_LSB) + 1)) - 1) << (reg##_##feild##_LSB))
|
||||
|
||||
/* IMP_FJ_TAG_ADDRESS_CTRL_EL1 */
|
||||
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_TBO0_SHIFT (0)
|
||||
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_SEC0_SHIFT (8)
|
||||
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_PFE0_SHIFT (9)
|
||||
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_TBO0_MASK (1UL << IMP_FJ_TAG_ADDRESS_CTRL_EL1_TBO0_SHIFT)
|
||||
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_SEC0_MASK (1UL << IMP_FJ_TAG_ADDRESS_CTRL_EL1_SEC0_SHIFT)
|
||||
#define IMP_FJ_TAG_ADDRESS_CTRL_EL1_PFE0_MASK (1UL << IMP_FJ_TAG_ADDRESS_CTRL_EL1_PFE0_SHIFT)
|
||||
|
||||
/* IMP_SCCR_CTRL_EL1 */
|
||||
#define IMP_SCCR_CTRL_EL1_EL1AE_SHIFT (63)
|
||||
#define IMP_SCCR_CTRL_EL1_EL1AE_MASK (1UL << IMP_SCCR_CTRL_EL1_EL1AE_SHIFT)
|
||||
|
||||
/* IMP_SCCR_SET0_L2_EL1 */
|
||||
#define IMP_SCCR_SET0_L2_EL1_L2_SEC0_SHIFT (0)
|
||||
|
||||
/* IMP_PF_CTRL_EL1 */
|
||||
#define IMP_PF_CTRL_EL1_EL1AE_ENABLE (1UL << 63)
|
||||
#define IMP_PF_CTRL_EL1_EL0AE_ENABLE (1UL << 62)
|
||||
|
||||
/* IMP_BARRIER_CTRL_EL1 */
|
||||
#define IMP_BARRIER_CTRL_EL1_EL1AE_ENABLE (1UL << 63)
|
||||
#define IMP_BARRIER_CTRL_EL1_EL0AE_ENABLE (1UL << 62)
|
||||
|
||||
/* IMP_SOC_STANDBY_CTRL_EL1 */
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_ECO_MODE_MSB 2
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_ECO_MODE_LSB 2
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_MODE_CHANGE_MSB 1
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_MODE_CHANGE_LSB 1
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_RETENTION_MSB 0
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_RETENTION_LSB 0
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_ECO_MODE PWR_REG_MASK(IMP_SOC_STANDBY_CTRL_EL1, ECO_MODE)
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_MODE_CHANGE PWR_REG_MASK(IMP_SOC_STANDBY_CTRL_EL1, MODE_CHANGE)
|
||||
#define IMP_SOC_STANDBY_CTRL_EL1_RETENTION PWR_REG_MASK(IMP_SOC_STANDBY_CTRL_EL1, RETENTION)
|
||||
|
||||
/* IMP_FJ_CORE_UARCH_RESTRECTION_EL1 */
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_FL_RESTRICT_TRANS_MSB 33
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_FL_RESTRICT_TRANS_LSB 33
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_ISSUE_RESTRICTION_MSB 9
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_ISSUE_RESTRICTION_LSB 8
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_EX_RESTRICTION_MSB 0
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_EX_RESTRICTION_LSB 0
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_FL_RESTRICT_TRANS PWR_REG_MASK(IMP_FJ_CORE_UARCH_RESTRECTION_EL1, FL_RESTRICT_TRANS)
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_ISSUE_RESTRICTION PWR_REG_MASK(IMP_FJ_CORE_UARCH_RESTRECTION_EL1, ISSUE_RESTRICTION)
|
||||
#define IMP_FJ_CORE_UARCH_RESTRECTION_EL1_EX_RESTRICTION PWR_REG_MASK(IMP_FJ_CORE_UARCH_RESTRECTION_EL1, EX_RESTRICTION)
|
||||
|
||||
void scdrv_registers_init(void);
|
||||
void hpc_registers_init(void);
|
||||
void vhbm_barrier_registers_init(void);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_IMP_SYSREG_H */
|
||||
99
arch/arm64/kernel/include/io.h
Normal file
99
arch/arm64/kernel/include/io.h
Normal file
@ -0,0 +1,99 @@
|
||||
/* io.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
/*
|
||||
* Based on arch/arm/include/asm/io.h
|
||||
*
|
||||
* Copyright (C) 1996-2000 Russell King
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_IO_H
|
||||
#define __ASM_IO_H
|
||||
|
||||
#include <ihk/types.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* Generic IO read/write. These perform native-endian accesses.
|
||||
*/
|
||||
static inline void __raw_writeb(uint8_t val, volatile void *addr)
|
||||
{
|
||||
asm volatile("strb %w0, [%1]" : : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline void __raw_writew(uint16_t val, volatile void *addr)
|
||||
{
|
||||
asm volatile("strh %w0, [%1]" : : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline void __raw_writel(uint32_t val, volatile void *addr)
|
||||
{
|
||||
asm volatile("str %w0, [%1]" : : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline void __raw_writeq(uint64_t val, volatile void *addr)
|
||||
{
|
||||
asm volatile("str %0, [%1]" : : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline uint8_t __raw_readb(const volatile void *addr)
|
||||
{
|
||||
uint8_t val;
|
||||
asm volatile("ldarb %w0, [%1]"
|
||||
: "=r" (val) : "r" (addr));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline uint16_t __raw_readw(const volatile void *addr)
|
||||
{
|
||||
uint16_t val;
|
||||
|
||||
asm volatile("ldarh %w0, [%1]"
|
||||
: "=r" (val) : "r" (addr));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline uint32_t __raw_readl(const volatile void *addr)
|
||||
{
|
||||
uint32_t val;
|
||||
asm volatile("ldar %w0, [%1]"
|
||||
: "=r" (val) : "r" (addr));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline uint64_t __raw_readq(const volatile void *addr)
|
||||
{
|
||||
uint64_t val;
|
||||
asm volatile("ldar %0, [%1]"
|
||||
: "=r" (val) : "r" (addr));
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Relaxed I/O memory access primitives. These follow the Device memory
|
||||
* ordering rules but do not guarantee any ordering relative to Normal memory
|
||||
* accesses.
|
||||
*/
|
||||
#define readb_relaxed(c) ({ uint8_t __v = (uint8_t)__raw_readb(c); __v; })
|
||||
#define readw_relaxed(c) ({ uint16_t __v = (uint16_t)__raw_readw(c); __v; })
|
||||
#define readl_relaxed(c) ({ uint32_t __v = (uint32_t)__raw_readl(c); __v; })
|
||||
#define readq_relaxed(c) ({ uint64_t __v = (uint64_t)__raw_readq(c); __v; })
|
||||
|
||||
#define writeb_relaxed(v,c) ((void)__raw_writeb((uint8_t)(v),(c)))
|
||||
#define writew_relaxed(v,c) ((void)__raw_writew((uint16_t)(v),(c)))
|
||||
#define writel_relaxed(v,c) ((void)__raw_writel((uint32_t)(v),(c)))
|
||||
#define writeq_relaxed(v,c) ((void)__raw_writeq((uint64_t)(v),(c)))
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_IO_H */
|
||||
46
arch/arm64/kernel/include/irq.h
Normal file
46
arch/arm64/kernel/include/irq.h
Normal file
@ -0,0 +1,46 @@
|
||||
/* irq.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
|
||||
#ifndef __HEADER_ARM64_IRQ_H
|
||||
#define __HEADER_ARM64_IRQ_H
|
||||
|
||||
#include <ihk/debug.h>
|
||||
#include <ihk/context.h>
|
||||
#include <sysreg.h>
|
||||
#include <cputype.h>
|
||||
|
||||
/* use SGI interrupt number */
|
||||
#define INTRID_CPU_NOTIFY 0
|
||||
#define INTRID_IKC 1
|
||||
#define INTRID_QUERY_FREE_MEM 2
|
||||
#define INTRID_CPU_STOP 3
|
||||
#define INTRID_TLB_FLUSH 4
|
||||
#define INTRID_STACK_TRACE 5
|
||||
#define INTRID_MULTI_INTR 6
|
||||
#define INTRID_MULTI_NMI 7
|
||||
|
||||
/* use PPI interrupt number */
|
||||
#define INTRID_PERF_OVF 23
|
||||
#define INTRID_HYP_PHYS_TIMER 26 /* cnthp */
|
||||
#define INTRID_VIRT_TIMER 27 /* cntv */
|
||||
#define INTRID_HYP_VIRT_TIMER 28 /* cnthv */
|
||||
#define INTRID_PHYS_TIMER 30 /* cntp */
|
||||
|
||||
/* Functions for GICv2 */
|
||||
extern void gic_dist_init_gicv2(unsigned long dist_base_pa, unsigned long size);
|
||||
extern void gic_cpu_init_gicv2(unsigned long cpu_base_pa, unsigned long size);
|
||||
extern void gic_enable_gicv2(void);
|
||||
extern void arm64_issue_ipi_gicv2(unsigned int cpuid, unsigned int vector);
|
||||
extern void arm64_issue_host_ipi_gicv2(uint32_t cpuid, uint32_t vector);
|
||||
extern void handle_interrupt_gicv2(struct pt_regs *regs);
|
||||
|
||||
/* Functions for GICv3 */
|
||||
extern void gic_dist_init_gicv3(unsigned long dist_base_pa, unsigned long size);
|
||||
extern void gic_cpu_init_gicv3(unsigned long cpu_base_pa, unsigned long size);
|
||||
extern void gic_enable_gicv3(void);
|
||||
extern void arm64_issue_ipi_gicv3(unsigned int cpuid, unsigned int vector);
|
||||
extern void arm64_issue_host_ipi_gicv3(uint32_t cpuid, uint32_t vector);
|
||||
extern void handle_interrupt_gicv3(struct pt_regs *regs);
|
||||
|
||||
void handle_IPI(unsigned int vector, struct pt_regs *regs);
|
||||
|
||||
#endif /* __HEADER_ARM64_IRQ_H */
|
||||
31
arch/arm64/kernel/include/irqflags.h
Normal file
31
arch/arm64/kernel/include/irqflags.h
Normal file
@ -0,0 +1,31 @@
|
||||
/* irqflags.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
#ifndef __HEADER_ARM64_COMMON_IRQFLAGS_H
|
||||
#define __HEADER_ARM64_COMMON_IRQFLAGS_H
|
||||
|
||||
#include <ptrace.h>
|
||||
|
||||
/*
|
||||
* save and restore debug state
|
||||
*/
|
||||
static inline unsigned long local_dbg_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
asm volatile(
|
||||
"mrs %0, daif // local_dbg_save\n"
|
||||
"msr daifset, #8"
|
||||
: "=r" (flags)
|
||||
:
|
||||
: "memory");
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void local_dbg_restore(unsigned long flags)
|
||||
{
|
||||
asm volatile(
|
||||
"msr daif, %0 // local_dbg_restore"
|
||||
:
|
||||
: "r" (flags)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_IRQFLAGS_H */
|
||||
25
arch/arm64/kernel/include/linkage.h
Normal file
25
arch/arm64/kernel/include/linkage.h
Normal file
@ -0,0 +1,25 @@
|
||||
/* linkage.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
#ifndef __HEADER_ARM64_COMMON_LINKAGE_H
|
||||
#define __HEADER_ARM64_COMMON_LINKAGE_H
|
||||
|
||||
#include <arch-memory.h>
|
||||
#include <compiler.h>
|
||||
|
||||
#define ASM_NL ;
|
||||
|
||||
#define __ALIGN .align 4
|
||||
#define __ALIGN_STR ".align 4"
|
||||
|
||||
#define ENTRY(name) \
|
||||
.globl name ASM_NL \
|
||||
__ALIGN ASM_NL \
|
||||
name:
|
||||
|
||||
#define END(name) \
|
||||
.size name, .-name
|
||||
|
||||
#define ENDPROC(name) \
|
||||
.type name, @function ASM_NL \
|
||||
END(name)
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_LINKAGE_H */
|
||||
22
arch/arm64/kernel/include/mmu_context.h
Normal file
22
arch/arm64/kernel/include/mmu_context.h
Normal file
@ -0,0 +1,22 @@
|
||||
/* mmu_context.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_MMU_CONTEXT_H
|
||||
#define __HEADER_ARM64_COMMON_MMU_CONTEXT_H
|
||||
|
||||
#include <pgtable.h>
|
||||
#include <memory.h>
|
||||
|
||||
/*
|
||||
* Set TTBR0 to empty_zero_page. No translations will be possible via TTBR0.
|
||||
*/
|
||||
static inline void cpu_set_reserved_ttbr0(void)
|
||||
{
|
||||
unsigned long ttbr = virt_to_phys(empty_zero_page);
|
||||
|
||||
asm(
|
||||
" msr ttbr0_el1, %0 // set TTBR0\n"
|
||||
" isb"
|
||||
:
|
||||
: "r" (ttbr));
|
||||
}
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_MMU_CONTEXT_H */
|
||||
198
arch/arm64/kernel/include/pgtable-hwdef.h
Normal file
198
arch/arm64/kernel/include/pgtable-hwdef.h
Normal file
@ -0,0 +1,198 @@
|
||||
/* pgtable-hwdef.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_PGTABLE_HWDEF_H
|
||||
#define __HEADER_ARM64_COMMON_PGTABLE_HWDEF_H
|
||||
|
||||
#ifndef __HEADER_ARM64_COMMON_ARCH_MEMORY_H
|
||||
# error arch-memory.h
|
||||
#endif
|
||||
|
||||
#define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3))
|
||||
|
||||
/*
|
||||
* PMD_SHIFT determines the size a level 2 page table entry can map.
|
||||
*/
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 2
|
||||
# define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3)
|
||||
# define PMD_SIZE (1UL << PMD_SHIFT)
|
||||
# define PMD_MASK (~(PMD_SIZE-1))
|
||||
# define PTRS_PER_PMD PTRS_PER_PTE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* PUD_SHIFT determines the size a level 1 page table entry can map.
|
||||
*/
|
||||
#if CONFIG_ARM64_PGTABLE_LEVELS > 3
|
||||
# define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3)
|
||||
# define PUD_SIZE (1UL << PUD_SHIFT)
|
||||
# define PUD_MASK (~(PUD_SIZE-1))
|
||||
# define PTRS_PER_PUD PTRS_PER_PTE
|
||||
#endif
|
||||
|
||||
/*
|
||||
* PGDIR_SHIFT determines the size a top-level page table entry can map
|
||||
* (depending on the configuration, this level can be 0, 1 or 2).
|
||||
*/
|
||||
#define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_ARM64_PGTABLE_LEVELS + 3)
|
||||
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
||||
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
|
||||
|
||||
/*
|
||||
* Section address mask and size definitions.
|
||||
*/
|
||||
#define SECTION_SHIFT PMD_SHIFT
|
||||
#define SECTION_SIZE (UL(1) << SECTION_SHIFT)
|
||||
#define SECTION_MASK (~(SECTION_SIZE-1))
|
||||
|
||||
/*
|
||||
* Level 2 descriptor (PMD).
|
||||
*/
|
||||
#define PMD_TYPE_MASK (UL(3) << 0)
|
||||
#define PMD_TYPE_FAULT (UL(0) << 0)
|
||||
#define PMD_TYPE_TABLE (UL(3) << 0)
|
||||
#define PMD_TYPE_SECT (UL(1) << 0)
|
||||
#define PMD_TABLE_BIT (UL(1) << 1)
|
||||
|
||||
/*
|
||||
* Table (D_Block)
|
||||
*/
|
||||
#define PMD_TBL_PXNT (UL(1) << 59)
|
||||
#define PMD_TBL_UXNT (UL(1) << 60)
|
||||
#define PMD_TBL_APT_USER (UL(1) << 61) /* 0:Access at EL0 permitted, 1:Access at EL0 not permitted */
|
||||
#define PMD_TBL_APT_RDONLY (UL(2) << 61) /* 0:read write(EL0-3) 0:read only(EL0-3) */
|
||||
#define PMD_TBL_NST (UL(1) << 63) /* 0:secure, 1:non-secure */
|
||||
|
||||
/*
|
||||
* Section (D_Page)
|
||||
*/
|
||||
#define PMD_SECT_VALID (UL(1) << 0)
|
||||
#define PMD_SECT_PROT_NONE (UL(1) << 58)
|
||||
#define PMD_SECT_USER (UL(1) << 6) /* AP[1] */
|
||||
#define PMD_SECT_RDONLY (UL(1) << 7) /* AP[2] */
|
||||
#define PMD_SECT_S (UL(3) << 8)
|
||||
#define PMD_SECT_AF (UL(1) << 10)
|
||||
#define PMD_SECT_NG (UL(1) << 11)
|
||||
#define PMD_SECT_CONT (UL(1) << 52)
|
||||
#define PMD_SECT_PXN (UL(1) << 53)
|
||||
#define PMD_SECT_UXN (UL(1) << 54)
|
||||
|
||||
/*
|
||||
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
|
||||
*/
|
||||
#define PMD_ATTRINDX(t) (UL(t) << 2)
|
||||
#define PMD_ATTRINDX_MASK (UL(7) << 2)
|
||||
|
||||
/*
|
||||
* Level 3 descriptor (PTE).
|
||||
*/
|
||||
#define PTE_TYPE_MASK (UL(3) << 0)
|
||||
#define PTE_TYPE_FAULT (UL(0) << 0)
|
||||
#define PTE_TYPE_PAGE (UL(3) << 0)
|
||||
#define PTE_TABLE_BIT (UL(1) << 1)
|
||||
#define PTE_USER (UL(1) << 6) /* AP[1] */
|
||||
#define PTE_RDONLY (UL(1) << 7) /* AP[2] */
|
||||
#define PTE_SHARED (UL(3) << 8) /* SH[1:0], inner shareable */
|
||||
#define PTE_AF (UL(1) << 10) /* Access Flag */
|
||||
#define PTE_NG (UL(1) << 11) /* nG */
|
||||
#define PTE_CONT (UL(1) << 52) /* Contiguous range */
|
||||
#define PTE_PXN (UL(1) << 53) /* Privileged XN */
|
||||
#define PTE_UXN (UL(1) << 54) /* User XN */
|
||||
/* Software defined PTE bits definition.*/
|
||||
#define PTE_VALID (UL(1) << 0)
|
||||
#define PTE_FILE (UL(1) << 2) /* only when !pte_present() */
|
||||
#define PTE_DIRTY (UL(1) << 55)
|
||||
#define PTE_SPECIAL (UL(1) << 56)
|
||||
#define PTE_WRITE (UL(1) << 57)
|
||||
#define PTE_PROT_NONE (UL(1) << 58) /* only when !PTE_VALID */
|
||||
|
||||
/*
|
||||
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
|
||||
*/
|
||||
#define PTE_ATTRINDX(t) (UL(t) << 2)
|
||||
#define PTE_ATTRINDX_MASK (UL(7) << 2)
|
||||
|
||||
/*
|
||||
* Highest possible physical address supported.
|
||||
*/
|
||||
#define PHYS_MASK_SHIFT (48)
|
||||
#define PHYS_MASK (((UL(1) << PHYS_MASK_SHIFT) - 1) & PAGE_MASK)
|
||||
|
||||
/*
|
||||
* TCR flags.
|
||||
*/
|
||||
#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0))
|
||||
#define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24))
|
||||
#define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24))
|
||||
#define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24))
|
||||
#define TCR_IRGN_WBnWA ((UL(3) << 8) | (UL(3) << 24))
|
||||
#define TCR_IRGN_MASK ((UL(3) << 8) | (UL(3) << 24))
|
||||
#define TCR_ORGN_NC ((UL(0) << 10) | (UL(0) << 26))
|
||||
#define TCR_ORGN_WBWA ((UL(1) << 10) | (UL(1) << 26))
|
||||
#define TCR_ORGN_WT ((UL(2) << 10) | (UL(2) << 26))
|
||||
#define TCR_ORGN_WBnWA ((UL(3) << 10) | (UL(3) << 26))
|
||||
#define TCR_ORGN_MASK ((UL(3) << 10) | (UL(3) << 26))
|
||||
#define TCR_SHARED ((UL(3) << 12) | (UL(3) << 28))
|
||||
#define TCR_TG0_4K (UL(0) << 14)
|
||||
#define TCR_TG0_64K (UL(1) << 14)
|
||||
#define TCR_TG0_16K (UL(2) << 14)
|
||||
#define TCR_TG1_16K (UL(1) << 30)
|
||||
#define TCR_TG1_4K (UL(2) << 30)
|
||||
#define TCR_TG1_64K (UL(3) << 30)
|
||||
#define TCR_ASID16 (UL(1) << 36)
|
||||
#define TCR_TBI0 (UL(1) << 37)
|
||||
|
||||
/*
|
||||
* Memory types available.
|
||||
*/
|
||||
#define MT_DEVICE_nGnRnE 0
|
||||
#define MT_DEVICE_nGnRE 1
|
||||
#define MT_DEVICE_GRE 2
|
||||
#define MT_NORMAL_NC 3
|
||||
#define MT_NORMAL 4
|
||||
|
||||
/*
|
||||
* page table entry attribute set.
|
||||
*/
|
||||
#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
|
||||
#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
|
||||
|
||||
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
|
||||
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
|
||||
#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
|
||||
#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define PAGE_KERNEL (_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC (_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
|
||||
#define PAGE_NONE (((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_SHARED (_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
|
||||
#define PAGE_SHARED_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define PAGE_COPY (_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_COPY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
|
||||
#define PAGE_READONLY (_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
|
||||
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_COPY
|
||||
#define __P011 PAGE_COPY
|
||||
#define __P100 PAGE_READONLY_EXEC
|
||||
#define __P101 PAGE_READONLY_EXEC
|
||||
#define __P110 PAGE_COPY_EXEC
|
||||
#define __P111 PAGE_COPY_EXEC
|
||||
|
||||
#define __S000 PAGE_NONE
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_READONLY_EXEC
|
||||
#define __S101 PAGE_READONLY_EXEC
|
||||
#define __S110 PAGE_SHARED_EXEC
|
||||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_PGTABLE_HWDEF_H */
|
||||
7
arch/arm64/kernel/include/pgtable.h
Normal file
7
arch/arm64/kernel/include/pgtable.h
Normal file
@ -0,0 +1,7 @@
|
||||
/* pgtable.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_PGTABLE_H
|
||||
#define __HEADER_ARM64_COMMON_PGTABLE_H
|
||||
|
||||
extern char empty_zero_page[];
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_PGTABLE_H */
|
||||
17
arch/arm64/kernel/include/prctl.h
Normal file
17
arch/arm64/kernel/include/prctl.h
Normal file
@ -0,0 +1,17 @@
|
||||
/* prctl.h COPYRIGHT FUJITSU LIMITED 2017-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_PRCTL_H
|
||||
#define __HEADER_ARM64_COMMON_PRCTL_H
|
||||
|
||||
#define PR_SET_THP_DISABLE 41
|
||||
#define PR_GET_THP_DISABLE 42
|
||||
|
||||
/* arm64 Scalable Vector Extension controls */
|
||||
/* Flag values must be kept in sync with ptrace NT_ARM_SVE interface */
|
||||
#define PR_SVE_SET_VL 50 /* set task vector length */
|
||||
# define PR_SVE_SET_VL_ONEXEC (1 << 18) /* defer effect until exec */
|
||||
#define PR_SVE_GET_VL 51 /* get task vector length */
|
||||
/* Bits common to PR_SVE_SET_VL and PR_SVE_GET_VL */
|
||||
# define PR_SVE_VL_LEN_MASK 0xffff
|
||||
# define PR_SVE_VL_INHERIT (1 << 17) /* inherit across exec */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_PRCTL_H */
|
||||
68
arch/arm64/kernel/include/psci.h
Normal file
68
arch/arm64/kernel/include/psci.h
Normal file
@ -0,0 +1,68 @@
|
||||
/* psci.h COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
/* @ref.impl include/uapi/linux/psci.h */
|
||||
/*
|
||||
* ARM Power State and Coordination Interface (PSCI) header
|
||||
*
|
||||
* This header holds common PSCI defines and macros shared
|
||||
* by: ARM kernel, ARM64 kernel, KVM ARM/ARM64 and user space.
|
||||
*
|
||||
* Copyright (C) 2014 Linaro Ltd.
|
||||
* Author: Anup Patel <anup.patel@linaro.org>
|
||||
*/
|
||||
|
||||
#ifndef __HEADER_ARM64_PSCI_H
|
||||
#define __HEADER_ARM64_PSCI_H
|
||||
|
||||
/*
|
||||
* PSCI v0.1 interface
|
||||
*
|
||||
* The PSCI v0.1 function numbers are implementation defined.
|
||||
*
|
||||
* Only PSCI return values such as: SUCCESS, NOT_SUPPORTED,
|
||||
* INVALID_PARAMS, and DENIED defined below are applicable
|
||||
* to PSCI v0.1.
|
||||
*/
|
||||
|
||||
/* PSCI v0.2 interface */
|
||||
#define PSCI_0_2_FN_BASE 0x84000000
|
||||
#define PSCI_0_2_FN(n) (PSCI_0_2_FN_BASE + (n))
|
||||
#define PSCI_0_2_64BIT 0x40000000
|
||||
#define PSCI_0_2_FN64_BASE (PSCI_0_2_FN_BASE + PSCI_0_2_64BIT)
|
||||
#define PSCI_0_2_FN64(n) (PSCI_0_2_FN64_BASE + (n))
|
||||
|
||||
#define PSCI_0_2_FN_PSCI_VERSION PSCI_0_2_FN(0)
|
||||
#define PSCI_0_2_FN_CPU_OFF PSCI_0_2_FN(2)
|
||||
#define PSCI_0_2_FN64_CPU_ON PSCI_0_2_FN64(3)
|
||||
#define PSCI_0_2_FN64_AFFINITY_INFO PSCI_0_2_FN64(4)
|
||||
|
||||
/* PSCI v0.2 power state encoding for CPU_SUSPEND function */
|
||||
#define PSCI_0_2_POWER_STATE_ID_MASK 0xffff
|
||||
#define PSCI_0_2_POWER_STATE_ID_SHIFT 0
|
||||
#define PSCI_0_2_POWER_STATE_TYPE_SHIFT 16
|
||||
#define PSCI_0_2_POWER_STATE_TYPE_MASK \
|
||||
(0x1 << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
|
||||
#define PSCI_0_2_POWER_STATE_AFFL_SHIFT 24
|
||||
#define PSCI_0_2_POWER_STATE_AFFL_MASK \
|
||||
(0x3 << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
|
||||
|
||||
/* PSCI version decoding (independent of PSCI version) */
|
||||
#define PSCI_VERSION_MAJOR_SHIFT 16
|
||||
#define PSCI_VERSION_MINOR_MASK \
|
||||
((1U << PSCI_VERSION_MAJOR_SHIFT) - 1)
|
||||
#define PSCI_VERSION_MAJOR_MASK ~PSCI_VERSION_MINOR_MASK
|
||||
#define PSCI_VERSION_MAJOR(ver) \
|
||||
(((ver) & PSCI_VERSION_MAJOR_MASK) >> PSCI_VERSION_MAJOR_SHIFT)
|
||||
#define PSCI_VERSION_MINOR(ver) \
|
||||
((ver) & PSCI_VERSION_MINOR_MASK)
|
||||
|
||||
/* PSCI return values (inclusive of all PSCI versions) */
|
||||
#define PSCI_RET_SUCCESS 0
|
||||
#define PSCI_RET_NOT_SUPPORTED -1
|
||||
#define PSCI_RET_INVALID_PARAMS -2
|
||||
#define PSCI_RET_DENIED -3
|
||||
|
||||
int psci_init(void);
|
||||
int psci_cpu_off(void);
|
||||
int cpu_psci_cpu_boot(unsigned int cpu, unsigned long pc);
|
||||
|
||||
#endif /* __HEADER_ARM64_PSCI_H */
|
||||
268
arch/arm64/kernel/include/ptrace.h
Normal file
268
arch/arm64/kernel/include/ptrace.h
Normal file
@ -0,0 +1,268 @@
|
||||
/* ptrace.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_PTRACE_H
|
||||
#define __HEADER_ARM64_COMMON_PTRACE_H
|
||||
|
||||
/*
|
||||
* PSR bits
|
||||
*/
|
||||
#define PSR_MODE_EL0t 0x00000000
|
||||
#define PSR_MODE_EL1t 0x00000004
|
||||
#define PSR_MODE_EL1h 0x00000005
|
||||
#define PSR_MODE_EL2t 0x00000008
|
||||
#define PSR_MODE_EL2h 0x00000009
|
||||
#define PSR_MODE_EL3t 0x0000000c
|
||||
#define PSR_MODE_EL3h 0x0000000d
|
||||
#define PSR_MODE_MASK 0x0000000f
|
||||
|
||||
/* AArch32 CPSR bits */
|
||||
#define PSR_MODE32_BIT 0x00000010
|
||||
|
||||
/* AArch64 SPSR bits */
|
||||
#define PSR_F_BIT 0x00000040
|
||||
#define PSR_I_BIT 0x00000080
|
||||
#define PSR_A_BIT 0x00000100
|
||||
#define PSR_D_BIT 0x00000200
|
||||
#define PSR_Q_BIT 0x08000000
|
||||
#define PSR_V_BIT 0x10000000
|
||||
#define PSR_C_BIT 0x20000000
|
||||
#define PSR_Z_BIT 0x40000000
|
||||
#define PSR_N_BIT 0x80000000
|
||||
|
||||
/*
|
||||
* Groups of PSR bits
|
||||
*/
|
||||
#define PSR_f 0xff000000 /* Flags */
|
||||
#define PSR_s 0x00ff0000 /* Status */
|
||||
#define PSR_x 0x0000ff00 /* Extension */
|
||||
#define PSR_c 0x000000ff /* Control */
|
||||
|
||||
/* Current Exception Level values, as contained in CurrentEL */
|
||||
#define CurrentEL_EL1 (1 << 2)
|
||||
#define CurrentEL_EL2 (2 << 2)
|
||||
|
||||
/* thread->ptrace_debugreg lower-area and higher-area */
|
||||
#define HWS_BREAK 0
|
||||
#define HWS_WATCH 1
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <lwk/compiler.h>
|
||||
#include <ihk/types.h>
|
||||
|
||||
struct user_hwdebug_state {
|
||||
uint32_t dbg_info;
|
||||
uint32_t pad;
|
||||
struct {
|
||||
uint64_t addr;
|
||||
uint32_t ctrl;
|
||||
uint32_t pad;
|
||||
} dbg_regs[16];
|
||||
};
|
||||
|
||||
struct user_fpsimd_state {
|
||||
__uint128_t vregs[32];
|
||||
uint32_t fpsr;
|
||||
uint32_t fpcr;
|
||||
uint32_t __reserved[2];
|
||||
};
|
||||
|
||||
extern unsigned int ptrace_hbp_get_resource_info(unsigned int note_type);
|
||||
|
||||
/* SVE/FP/SIMD state (NT_ARM_SVE) */
|
||||
|
||||
struct user_sve_header {
|
||||
uint32_t size; /* total meaningful regset content in bytes */
|
||||
uint32_t max_size; /* maxmium possible size for this thread */
|
||||
uint16_t vl; /* current vector length */
|
||||
uint16_t max_vl; /* maximum possible vector length */
|
||||
uint16_t flags;
|
||||
uint16_t __reserved;
|
||||
};
|
||||
|
||||
enum aarch64_regset {
|
||||
REGSET_GPR,
|
||||
REGSET_FPR,
|
||||
REGSET_TLS,
|
||||
REGSET_HW_BREAK,
|
||||
REGSET_HW_WATCH,
|
||||
REGSET_SYSTEM_CALL,
|
||||
#ifdef CONFIG_ARM64_SVE
|
||||
REGSET_SVE,
|
||||
#endif /* CONFIG_ARM64_SVE */
|
||||
};
|
||||
|
||||
struct thread;
|
||||
struct user_regset;
|
||||
|
||||
typedef int user_regset_active_fn(struct thread *target,
|
||||
const struct user_regset *regset);
|
||||
|
||||
typedef long user_regset_get_fn(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf);
|
||||
|
||||
typedef long user_regset_set_fn(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf);
|
||||
|
||||
typedef int user_regset_writeback_fn(struct thread *target,
|
||||
const struct user_regset *regset,
|
||||
int immediate);
|
||||
|
||||
typedef unsigned int user_regset_get_size_fn(struct thread *target,
|
||||
const struct user_regset *regset);
|
||||
|
||||
struct user_regset {
|
||||
user_regset_get_fn *get;
|
||||
user_regset_set_fn *set;
|
||||
user_regset_active_fn *active;
|
||||
user_regset_writeback_fn *writeback;
|
||||
user_regset_get_size_fn *get_size;
|
||||
unsigned int n;
|
||||
unsigned int size;
|
||||
unsigned int align;
|
||||
unsigned int bias;
|
||||
unsigned int core_note_type;
|
||||
};
|
||||
|
||||
struct user_regset_view {
|
||||
const char *name;
|
||||
const struct user_regset *regsets;
|
||||
unsigned int n;
|
||||
uint32_t e_flags;
|
||||
uint16_t e_machine;
|
||||
uint8_t ei_osabi;
|
||||
};
|
||||
|
||||
extern const struct user_regset_view *current_user_regset_view(void);
|
||||
extern const struct user_regset *find_regset(
|
||||
const struct user_regset_view *view,
|
||||
unsigned int type);
|
||||
extern unsigned int regset_size(struct thread *target,
|
||||
const struct user_regset *regset);
|
||||
|
||||
/* Definitions for user_sve_header.flags: */
|
||||
#define SVE_PT_REGS_MASK (1 << 0)
|
||||
|
||||
#define SVE_PT_REGS_FPSIMD 0
|
||||
#define SVE_PT_REGS_SVE SVE_PT_REGS_MASK
|
||||
|
||||
#define SVE_PT_VL_THREAD PR_SVE_SET_VL_THREAD
|
||||
#define SVE_PT_VL_INHERIT PR_SVE_VL_INHERIT
|
||||
#define SVE_PT_VL_ONEXEC PR_SVE_SET_VL_ONEXEC
|
||||
|
||||
/*
|
||||
* The remainder of the SVE state follows struct user_sve_header. The
|
||||
* total size of the SVE state (including header) depends on the
|
||||
* metadata in the header: SVE_PT_SIZE(vq, flags) gives the total size
|
||||
* of the state in bytes, including the header.
|
||||
*
|
||||
* Refer to <asm/sigcontext.h> for details of how to pass the correct
|
||||
* "vq" argument to these macros.
|
||||
*/
|
||||
|
||||
/* Offset from the start of struct user_sve_header to the register data */
|
||||
#define SVE_PT_REGS_OFFSET \
|
||||
((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
|
||||
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||
|
||||
/*
|
||||
* The register data content and layout depends on the value of the
|
||||
* flags field.
|
||||
*/
|
||||
|
||||
/*
|
||||
* (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD case:
|
||||
*
|
||||
* The payload starts at offset SVE_PT_FPSIMD_OFFSET, and is of type
|
||||
* struct user_fpsimd_state. Additional data might be appended in the
|
||||
* future: use SVE_PT_FPSIMD_SIZE(vq, flags) to compute the total size.
|
||||
* SVE_PT_FPSIMD_SIZE(vq, flags) will never be less than
|
||||
* sizeof(struct user_fpsimd_state).
|
||||
*/
|
||||
|
||||
#define SVE_PT_FPSIMD_OFFSET SVE_PT_REGS_OFFSET
|
||||
|
||||
#define SVE_PT_FPSIMD_SIZE(vq, flags) (sizeof(struct user_fpsimd_state))
|
||||
|
||||
/*
|
||||
* (flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE case:
|
||||
*
|
||||
* The payload starts at offset SVE_PT_SVE_OFFSET, and is of size
|
||||
* SVE_PT_SVE_SIZE(vq, flags).
|
||||
*
|
||||
* Additional macros describe the contents and layout of the payload.
|
||||
* For each, SVE_PT_SVE_x_OFFSET(args) is the start offset relative to
|
||||
* the start of struct user_sve_header, and SVE_PT_SVE_x_SIZE(args) is
|
||||
* the size in bytes:
|
||||
*
|
||||
* x type description
|
||||
* - ---- -----------
|
||||
* ZREGS \
|
||||
* ZREG |
|
||||
* PREGS | refer to <asm/sigcontext.h>
|
||||
* PREG |
|
||||
* FFR /
|
||||
*
|
||||
* FPSR uint32_t FPSR
|
||||
* FPCR uint32_t FPCR
|
||||
*
|
||||
* Additional data might be appended in the future.
|
||||
*/
|
||||
|
||||
#define SVE_PT_SVE_ZREG_SIZE(vq) SVE_SIG_ZREG_SIZE(vq)
|
||||
#define SVE_PT_SVE_PREG_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
|
||||
#define SVE_PT_SVE_FFR_SIZE(vq) SVE_SIG_FFR_SIZE(vq)
|
||||
#define SVE_PT_SVE_FPSR_SIZE sizeof(uint32_t)
|
||||
#define SVE_PT_SVE_FPCR_SIZE sizeof(uint32_t)
|
||||
|
||||
#define __SVE_SIG_TO_PT(offset) \
|
||||
((offset) - SVE_SIG_REGS_OFFSET + SVE_PT_REGS_OFFSET)
|
||||
|
||||
#define SVE_PT_SVE_OFFSET SVE_PT_REGS_OFFSET
|
||||
|
||||
#define SVE_PT_SVE_ZREGS_OFFSET \
|
||||
__SVE_SIG_TO_PT(SVE_SIG_ZREGS_OFFSET)
|
||||
#define SVE_PT_SVE_ZREG_OFFSET(vq, n) \
|
||||
__SVE_SIG_TO_PT(SVE_SIG_ZREG_OFFSET(vq, n))
|
||||
#define SVE_PT_SVE_ZREGS_SIZE(vq) \
|
||||
(SVE_PT_SVE_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_PT_SVE_ZREGS_OFFSET)
|
||||
|
||||
#define SVE_PT_SVE_PREGS_OFFSET(vq) \
|
||||
__SVE_SIG_TO_PT(SVE_SIG_PREGS_OFFSET(vq))
|
||||
#define SVE_PT_SVE_PREG_OFFSET(vq, n) \
|
||||
__SVE_SIG_TO_PT(SVE_SIG_PREG_OFFSET(vq, n))
|
||||
#define SVE_PT_SVE_PREGS_SIZE(vq) \
|
||||
(SVE_PT_SVE_PREG_OFFSET(vq, SVE_NUM_PREGS) - \
|
||||
SVE_PT_SVE_PREGS_OFFSET(vq))
|
||||
|
||||
#define SVE_PT_SVE_FFR_OFFSET(vq) \
|
||||
__SVE_SIG_TO_PT(SVE_SIG_FFR_OFFSET(vq))
|
||||
|
||||
#define SVE_PT_SVE_FPSR_OFFSET(vq) \
|
||||
((SVE_PT_SVE_FFR_OFFSET(vq) + SVE_PT_SVE_FFR_SIZE(vq) + \
|
||||
(SVE_VQ_BYTES - 1)) \
|
||||
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||
#define SVE_PT_SVE_FPCR_OFFSET(vq) \
|
||||
(SVE_PT_SVE_FPSR_OFFSET(vq) + SVE_PT_SVE_FPSR_SIZE)
|
||||
|
||||
/*
|
||||
* Any future extension appended after FPCR must be aligned to the next
|
||||
* 128-bit boundary.
|
||||
*/
|
||||
|
||||
#define SVE_PT_SVE_SIZE(vq, flags) \
|
||||
((SVE_PT_SVE_FPCR_OFFSET(vq) + SVE_PT_SVE_FPCR_SIZE \
|
||||
- SVE_PT_SVE_OFFSET + (SVE_VQ_BYTES - 1)) \
|
||||
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||
|
||||
#define SVE_PT_SIZE(vq, flags) \
|
||||
(((flags) & SVE_PT_REGS_MASK) == SVE_PT_REGS_SVE ? \
|
||||
SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, flags) \
|
||||
: SVE_PT_FPSIMD_OFFSET + SVE_PT_FPSIMD_SIZE(vq, flags))
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_PTRACE_H */
|
||||
127
arch/arm64/kernel/include/registers.h
Normal file
127
arch/arm64/kernel/include/registers.h
Normal file
@ -0,0 +1,127 @@
|
||||
/* registers.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#ifndef __HEADER_ARM64_COMMON_REGISTERS_H
|
||||
#define __HEADER_ARM64_COMMON_REGISTERS_H
|
||||
|
||||
#include <types.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <sysreg.h>
|
||||
|
||||
#define RFLAGS_CF (1 << 0)
|
||||
#define RFLAGS_PF (1 << 2)
|
||||
#define RFLAGS_AF (1 << 4)
|
||||
#define RFLAGS_ZF (1 << 6)
|
||||
#define RFLAGS_SF (1 << 7)
|
||||
#define RFLAGS_TF (1 << 8)
|
||||
#define RFLAGS_IF (1 << 9)
|
||||
#define RFLAGS_DF (1 << 10)
|
||||
#define RFLAGS_OF (1 << 11)
|
||||
#define RFLAGS_IOPL (3 << 12)
|
||||
#define RFLAGS_NT (1 << 14)
|
||||
#define RFLAGS_RF (1 << 16)
|
||||
#define RFLAGS_VM (1 << 17)
|
||||
#define RFLAGS_AC (1 << 18)
|
||||
#define RFLAGS_VIF (1 << 19)
|
||||
#define RFLAGS_VIP (1 << 20)
|
||||
#define RFLAGS_ID (1 << 21)
|
||||
|
||||
#define DB6_B0 (1 << 0)
|
||||
#define DB6_B1 (1 << 1)
|
||||
#define DB6_B2 (1 << 2)
|
||||
#define DB6_B3 (1 << 3)
|
||||
#define DB6_BD (1 << 13)
|
||||
#define DB6_BS (1 << 14)
|
||||
#define DB6_BT (1 << 15)
|
||||
|
||||
#define MSR_EFER 0xc0000080
|
||||
#define MSR_STAR 0xc0000081
|
||||
#define MSR_LSTAR 0xc0000082
|
||||
#define MSR_FMASK 0xc0000084
|
||||
#define MSR_FS_BASE 0xc0000100
|
||||
#define MSR_GS_BASE 0xc0000101
|
||||
|
||||
#define MSR_IA32_APIC_BASE 0x000000001b
|
||||
#define MSR_PLATFORM_INFO 0x000000ce
|
||||
#define MSR_IA32_PERF_CTL 0x00000199
|
||||
#define MSR_IA32_MISC_ENABLE 0x000001a0
|
||||
#define MSR_IA32_ENERGY_PERF_BIAS 0x000001b0
|
||||
#define MSR_NHM_TURBO_RATIO_LIMIT 0x000001ad
|
||||
#define MSR_IA32_CR_PAT 0x00000277
|
||||
|
||||
|
||||
#define CVAL(event, mask) \
|
||||
((((event) & 0xf00) << 24) | ((mask) << 8) | ((event) & 0xff))
|
||||
#define CVAL2(event, mask, inv, count) \
|
||||
((((event) & 0xf00) << 24) | ((mask) << 8) | ((event) & 0xff) | \
|
||||
((inv & 1) << 23) | ((count & 0xff) << 24))
|
||||
|
||||
/* AMD */
|
||||
#define MSR_PERF_CTL_0 0xc0010000
|
||||
#define MSR_PERF_CTR_0 0xc0010004
|
||||
|
||||
static unsigned long xgetbv(unsigned int index)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xsetbv(unsigned int index, unsigned long val)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned long rdpmc(unsigned int counter)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long rdmsr(unsigned int index)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl linux4.10.16 */
|
||||
/* arch/arm64/include/asm/arch_timer.h:arch_counter_get_cntvct() */
|
||||
static inline unsigned long rdtsc(void)
|
||||
{
|
||||
isb();
|
||||
return read_sysreg(cntvct_el0);
|
||||
}
|
||||
|
||||
static void set_perfctl(int counter, int event, int mask)
|
||||
{
|
||||
}
|
||||
|
||||
static void start_perfctr(int counter)
|
||||
{
|
||||
}
|
||||
static void stop_perfctr(int counter)
|
||||
{
|
||||
}
|
||||
|
||||
static void clear_perfctl(int counter)
|
||||
{
|
||||
}
|
||||
|
||||
static void set_perfctr(int counter, unsigned long value)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned long read_perfctr(int counter)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define ihk_mc_mb() do {} while(0);
|
||||
|
||||
#define REGS_GET_STACK_POINTER(regs) (((struct pt_regs *)regs)->sp)
|
||||
|
||||
enum arm64_pf_error_code {
|
||||
PF_PROT = 1 << 0,
|
||||
PF_WRITE = 1 << 1,
|
||||
PF_USER = 1 << 2,
|
||||
PF_RSVD = 1 << 3,
|
||||
PF_INSTR = 1 << 4,
|
||||
|
||||
PF_PATCH = 1 << 29,
|
||||
PF_POPULATE = 1 << 30,
|
||||
};
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_REGISTERS_H */
|
||||
100
arch/arm64/kernel/include/rlimit.h
Normal file
100
arch/arm64/kernel/include/rlimit.h
Normal file
@ -0,0 +1,100 @@
|
||||
/* rlimit.h COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
/**
|
||||
* \file rlimit.h
|
||||
* License details are found in the file LICENSE.
|
||||
* \brief
|
||||
* Kinds of resource limit
|
||||
* \author Taku Shimosawa <shimosawa@is.s.u-tokyo.ac.jp> \par
|
||||
* Copyright (C) 2011 - 2012 Taku Shimosawa
|
||||
*/
|
||||
/*
|
||||
* HISTORY
|
||||
*/
|
||||
|
||||
#ifndef __HEADER_ARM64_COMMON_RLIMIT_H
|
||||
#define __HEADER_ARM64_COMMON_RLIMIT_H
|
||||
|
||||
/* Kinds of resource limit. */
|
||||
enum __rlimit_resource
|
||||
{
|
||||
/* Per-process CPU limit, in seconds. */
|
||||
RLIMIT_CPU = 0,
|
||||
#define RLIMIT_CPU RLIMIT_CPU
|
||||
|
||||
/* Largest file that can be created, in bytes. */
|
||||
RLIMIT_FSIZE = 1,
|
||||
#define RLIMIT_FSIZE RLIMIT_FSIZE
|
||||
|
||||
/* Maximum size of data segment, in bytes. */
|
||||
RLIMIT_DATA = 2,
|
||||
#define RLIMIT_DATA RLIMIT_DATA
|
||||
|
||||
/* Maximum size of stack segment, in bytes. */
|
||||
RLIMIT_STACK = 3,
|
||||
#define RLIMIT_STACK RLIMIT_STACK
|
||||
|
||||
/* Largest core file that can be created, in bytes. */
|
||||
RLIMIT_CORE = 4,
|
||||
#define RLIMIT_CORE RLIMIT_CORE
|
||||
|
||||
/* Largest resident set size, in bytes.
|
||||
This affects swapping; processes that are exceeding their
|
||||
resident set size will be more likely to have physical memory
|
||||
taken from them. */
|
||||
__RLIMIT_RSS = 5,
|
||||
#define RLIMIT_RSS __RLIMIT_RSS
|
||||
|
||||
/* Number of open files. */
|
||||
RLIMIT_NOFILE = 7,
|
||||
__RLIMIT_OFILE = RLIMIT_NOFILE, /* BSD name for same. */
|
||||
#define RLIMIT_NOFILE RLIMIT_NOFILE
|
||||
#define RLIMIT_OFILE __RLIMIT_OFILE
|
||||
|
||||
/* Address space limit. */
|
||||
RLIMIT_AS = 9,
|
||||
#define RLIMIT_AS RLIMIT_AS
|
||||
|
||||
/* Number of processes. */
|
||||
__RLIMIT_NPROC = 6,
|
||||
#define RLIMIT_NPROC __RLIMIT_NPROC
|
||||
|
||||
/* Locked-in-memory address space. */
|
||||
__RLIMIT_MEMLOCK = 8,
|
||||
#define RLIMIT_MEMLOCK __RLIMIT_MEMLOCK
|
||||
|
||||
/* Maximum number of file locks. */
|
||||
__RLIMIT_LOCKS = 10,
|
||||
#define RLIMIT_LOCKS __RLIMIT_LOCKS
|
||||
|
||||
/* Maximum number of pending signals. */
|
||||
__RLIMIT_SIGPENDING = 11,
|
||||
#define RLIMIT_SIGPENDING __RLIMIT_SIGPENDING
|
||||
|
||||
/* Maximum bytes in POSIX message queues. */
|
||||
__RLIMIT_MSGQUEUE = 12,
|
||||
#define RLIMIT_MSGQUEUE __RLIMIT_MSGQUEUE
|
||||
|
||||
/* Maximum nice priority allowed to raise to.
|
||||
Nice levels 19 .. -20 correspond to 0 .. 39
|
||||
values of this resource limit. */
|
||||
__RLIMIT_NICE = 13,
|
||||
#define RLIMIT_NICE __RLIMIT_NICE
|
||||
|
||||
/* Maximum realtime priority allowed for non-priviledged
|
||||
processes. */
|
||||
__RLIMIT_RTPRIO = 14,
|
||||
#define RLIMIT_RTPRIO __RLIMIT_RTPRIO
|
||||
|
||||
/* timeout for RT tasks in us */
|
||||
__RLIMIT_RTTIME = 15,
|
||||
#define RLIMIT_RTTIME __RLIMIT_RTTIME
|
||||
|
||||
__RLIMIT_NLIMITS = 16,
|
||||
__RLIM_NLIMITS = __RLIMIT_NLIMITS
|
||||
#define RLIMIT_NLIMITS __RLIMIT_NLIMITS
|
||||
#define RLIM_NLIMITS __RLIM_NLIMITS
|
||||
};
|
||||
|
||||
#include <generic-rlimit.h>
|
||||
|
||||
#endif
|
||||
421
arch/arm64/kernel/include/signal.h
Normal file
421
arch/arm64/kernel/include/signal.h
Normal file
@ -0,0 +1,421 @@
|
||||
/* signal.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_SIGNAL_H
|
||||
#define __HEADER_ARM64_COMMON_SIGNAL_H
|
||||
|
||||
#include <fpsimd.h>
|
||||
#include <ihk/types.h>
|
||||
|
||||
#define _NSIG 64
|
||||
#define _NSIG_BPW 64
|
||||
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
|
||||
|
||||
static inline int valid_signal(unsigned long sig)
|
||||
{
|
||||
return sig <= _NSIG ? 1 : 0;
|
||||
}
|
||||
|
||||
typedef unsigned long int __sigset_t;
|
||||
#define __sigmask(sig) (((__sigset_t) 1) << ((sig) - 1))
|
||||
|
||||
typedef struct {
|
||||
__sigset_t __val[_NSIG_WORDS];
|
||||
} sigset_t;
|
||||
|
||||
#define SIG_BLOCK 0
|
||||
#define SIG_UNBLOCK 1
|
||||
#define SIG_SETMASK 2
|
||||
|
||||
struct sigaction {
|
||||
void (*sa_handler)(int);
|
||||
unsigned long sa_flags;
|
||||
void (*sa_restorer)(int);
|
||||
sigset_t sa_mask;
|
||||
};
|
||||
|
||||
typedef void __sig_fn_t(int);
|
||||
typedef __sig_fn_t *__sig_handler_t;
|
||||
#define SIG_DFL (__sig_handler_t)0
|
||||
#define SIG_IGN (__sig_handler_t)1
|
||||
#define SIG_ERR (__sig_handler_t)-1
|
||||
|
||||
#define SA_NOCLDSTOP 0x00000001U
|
||||
#define SA_NOCLDWAIT 0x00000002U
|
||||
#define SA_NODEFER 0x40000000U
|
||||
#define SA_ONSTACK 0x08000000U
|
||||
#define SA_RESETHAND 0x80000000U
|
||||
#define SA_RESTART 0x10000000U
|
||||
#define SA_SIGINFO 0x00000004U
|
||||
|
||||
/* Required for AArch32 compatibility. */
|
||||
#define SA_RESTORER 0x04000000U
|
||||
|
||||
struct k_sigaction {
|
||||
struct sigaction sa;
|
||||
};
|
||||
|
||||
typedef struct sigaltstack {
|
||||
void *ss_sp;
|
||||
int ss_flags;
|
||||
size_t ss_size;
|
||||
} stack_t;
|
||||
|
||||
#define MINSIGSTKSZ 5120
|
||||
#define SS_ONSTACK 1
|
||||
#define SS_DISABLE 2
|
||||
|
||||
typedef union sigval {
|
||||
int sival_int;
|
||||
void *sival_ptr;
|
||||
} sigval_t;
|
||||
|
||||
#define __SI_MAX_SIZE 128
|
||||
#define __SI_PAD_SIZE ((__SI_MAX_SIZE / sizeof (int)) - 4)
|
||||
|
||||
typedef struct siginfo {
|
||||
int si_signo; /* Signal number. */
|
||||
int si_errno; /* If non-zero, an errno value associated with
|
||||
this signal, as defined in <errno.h>. */
|
||||
int si_code; /* Signal code. */
|
||||
#define SI_USER 0 /* sent by kill, sigsend, raise */
|
||||
#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
|
||||
#define SI_QUEUE -1 /* sent by sigqueue */
|
||||
#define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */
|
||||
#define SI_MESGQ __SI_CODE(__SI_MESGQ,-3) /* sent by real time mesq state change
|
||||
*/
|
||||
#define SI_ASYNCIO -4 /* sent by AIO completion */
|
||||
#define SI_SIGIO -5 /* sent by queued SIGIO */
|
||||
#define SI_TKILL -6 /* sent by tkill system call */
|
||||
#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
|
||||
|
||||
#define ILL_ILLOPC 1 /* illegal opcode */
|
||||
#define ILL_ILLOPN 2 /* illegal operand */
|
||||
#define ILL_ILLADR 3 /* illegal addressing mode */
|
||||
#define ILL_ILLTRP 4 /* illegal trap */
|
||||
#define ILL_PRVOPC 5 /* privileged opcode */
|
||||
#define ILL_PRVREG 6 /* privileged register */
|
||||
#define ILL_COPROC 7 /* coprocessor error */
|
||||
#define ILL_BADSTK 8 /* internal stack error */
|
||||
|
||||
#define FPE_INTDIV 1 /* integer divide by zero */
|
||||
#define FPE_INTOVF 2 /* integer overflow */
|
||||
#define FPE_FLTDIV 3 /* floating point divide by zero */
|
||||
#define FPE_FLTOVF 4 /* floating point overflow */
|
||||
#define FPE_FLTUND 5 /* floating point underflow */
|
||||
#define FPE_FLTRES 6 /* floating point inexact result */
|
||||
#define FPE_FLTINV 7 /* floating point invalid operation */
|
||||
#define FPE_FLTSUB 8 /* subscript out of range */
|
||||
|
||||
#define SEGV_MAPERR 1 /* address not mapped to object */
|
||||
#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
|
||||
|
||||
#define BUS_ADRALN 1 /* invalid address alignment */
|
||||
#define BUS_ADRERR 2 /* non-existant physical address */
|
||||
#define BUS_OBJERR 3 /* object specific hardware error */
|
||||
/* hardware memory error consumed on a machine check: action required */
|
||||
#define BUS_MCEERR_AR 4
|
||||
/* hardware memory error detected in process but not consumed: action optional*/
|
||||
#define BUS_MCEERR_AO 5
|
||||
|
||||
#define TRAP_BRKPT 1 /* process breakpoint */
|
||||
#define TRAP_TRACE 2 /* process trace trap */
|
||||
#define TRAP_BRANCH 3 /* process taken branch trap */
|
||||
#define TRAP_HWBKPT 4 /* hardware breakpoint/watchpoint */
|
||||
|
||||
#define CLD_EXITED 1 /* child has exited */
|
||||
#define CLD_KILLED 2 /* child was killed */
|
||||
#define CLD_DUMPED 3 /* child terminated abnormally */
|
||||
#define CLD_TRAPPED 4 /* traced child has trapped */
|
||||
#define CLD_STOPPED 5 /* child has stopped */
|
||||
#define CLD_CONTINUED 6 /* stopped child has continued */
|
||||
|
||||
#define POLL_IN 1 /* data input available */
|
||||
#define POLL_OUT 2 /* output buffers available */
|
||||
#define POLL_MSG 3 /* input message available */
|
||||
#define POLL_ERR 4 /* i/o error */
|
||||
#define POLL_PRI 5 /* high priority input available */
|
||||
#define POLL_HUP 6 /* device disconnected */
|
||||
|
||||
#define SIGEV_SIGNAL 0 /* notify via signal */
|
||||
#define SIGEV_NONE 1 /* other notification: meaningless */
|
||||
#define SIGEV_THREAD 2 /* deliver via thread creation */
|
||||
#define SIGEV_THREAD_ID 4 /* deliver to thread */
|
||||
|
||||
union {
|
||||
int _pad[__SI_PAD_SIZE];
|
||||
|
||||
/* kill(). */
|
||||
struct {
|
||||
int si_pid;/* Sending process ID. */
|
||||
int si_uid;/* Real user ID of sending process. */
|
||||
} _kill;
|
||||
|
||||
/* POSIX.1b timers. */
|
||||
struct {
|
||||
int si_tid; /* Timer ID. */
|
||||
int si_overrun; /* Overrun count. */
|
||||
sigval_t si_sigval; /* Signal value. */
|
||||
} _timer;
|
||||
|
||||
/* POSIX.1b signals. */
|
||||
struct {
|
||||
int si_pid; /* Sending process ID. */
|
||||
int si_uid; /* Real user ID of sending process. */
|
||||
sigval_t si_sigval; /* Signal value. */
|
||||
} _rt;
|
||||
|
||||
/* SIGCHLD. */
|
||||
struct {
|
||||
int si_pid; /* Which child. */
|
||||
int si_uid; /* Real user ID of sending process. */
|
||||
int si_status; /* Exit value or signal. */
|
||||
long si_utime;
|
||||
long si_stime;
|
||||
} _sigchld;
|
||||
|
||||
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS. */
|
||||
struct {
|
||||
void *si_addr; /* Faulting insn/memory ref. */
|
||||
} _sigfault;
|
||||
|
||||
/* SIGPOLL. */
|
||||
struct {
|
||||
long int si_band; /* Band event for SIGPOLL. */
|
||||
int si_fd;
|
||||
} _sigpoll;
|
||||
} _sifields;
|
||||
} siginfo_t;
|
||||
|
||||
struct signalfd_siginfo {
|
||||
unsigned int ssi_signo;
|
||||
int ssi_errno;
|
||||
int ssi_code;
|
||||
unsigned int ssi_pid;
|
||||
unsigned int ssi_uid;
|
||||
int ssi_fd;
|
||||
unsigned int ssi_tid;
|
||||
unsigned int ssi_band;
|
||||
unsigned int ssi_overrun;
|
||||
unsigned int ssi_trapno;
|
||||
int ssi_status;
|
||||
int ssi_int;
|
||||
unsigned long ssi_ptr;
|
||||
unsigned long ssi_utime;
|
||||
unsigned long ssi_stime;
|
||||
unsigned long ssi_addr;
|
||||
unsigned short ssi_addr_lsb;
|
||||
|
||||
char __pad[46];
|
||||
};
|
||||
|
||||
|
||||
#define SIGHUP 1
|
||||
#define SIGINT 2
|
||||
#define SIGQUIT 3
|
||||
#define SIGILL 4
|
||||
#define SIGTRAP 5
|
||||
#define SIGABRT 6
|
||||
#define SIGIOT 6
|
||||
#define SIGBUS 7
|
||||
#define SIGFPE 8
|
||||
#define SIGKILL 9
|
||||
#define SIGUSR1 10
|
||||
#define SIGSEGV 11
|
||||
#define SIGUSR2 12
|
||||
#define SIGPIPE 13
|
||||
#define SIGALRM 14
|
||||
#define SIGTERM 15
|
||||
#define SIGSTKFLT 16
|
||||
#define SIGCHLD 17
|
||||
#define SIGCONT 18
|
||||
#define SIGSTOP 19
|
||||
#define SIGTSTP 20
|
||||
#define SIGTTIN 21
|
||||
#define SIGTTOU 22
|
||||
#define SIGURG 23
|
||||
#define SIGXCPU 24
|
||||
#define SIGXFSZ 25
|
||||
#define SIGVTALRM 26
|
||||
#define SIGPROF 27
|
||||
#define SIGWINCH 28
|
||||
#define SIGIO 29
|
||||
#define SIGPOLL SIGIO
|
||||
#define SIGPWR 30
|
||||
#define SIGSYS 31
|
||||
#define SIGUNUSED 31
|
||||
#define SIGRTMIN 32
|
||||
|
||||
#ifndef SIGRTMAX
|
||||
#define SIGRTMAX _NSIG
|
||||
#endif
|
||||
|
||||
#define PTRACE_EVENT_EXEC 4
|
||||
|
||||
/*
|
||||
* @ref.impl linux-linaro/arch/arm64/include/uapi/asm/sigcontext.h
|
||||
*/
|
||||
struct sigcontext {
|
||||
unsigned long fault_address;
|
||||
/* AArch64 registers */
|
||||
unsigned long regs[31];
|
||||
unsigned long sp;
|
||||
unsigned long pc;
|
||||
unsigned long pstate;
|
||||
/* 4K reserved for FP/SIMD state and future expansion */
|
||||
unsigned char __reserved[4096] /*__attribute__((__aligned__(16)))*/;
|
||||
};
|
||||
|
||||
/*
|
||||
* Header to be used at the beginning of structures extending the user
|
||||
* context. Such structures must be placed after the rt_sigframe on the stack
|
||||
* and be 16-byte aligned. The last structure must be a dummy one with the
|
||||
* magic and size set to 0.
|
||||
*/
|
||||
struct _aarch64_ctx {
|
||||
unsigned int magic;
|
||||
unsigned int size;
|
||||
};
|
||||
|
||||
#define FPSIMD_MAGIC 0x46508001
|
||||
|
||||
struct fpsimd_context {
|
||||
struct _aarch64_ctx head;
|
||||
unsigned int fpsr;
|
||||
unsigned int fpcr;
|
||||
__uint128_t vregs[32];
|
||||
};
|
||||
|
||||
/* ESR_EL1 context */
|
||||
#define ESR_MAGIC 0x45535201
|
||||
|
||||
struct esr_context {
|
||||
struct _aarch64_ctx head;
|
||||
unsigned long esr;
|
||||
};
|
||||
|
||||
#define EXTRA_MAGIC 0x45585401
|
||||
|
||||
struct extra_context {
|
||||
struct _aarch64_ctx head;
|
||||
void *data; /* 16-byte aligned pointer to the extra space */
|
||||
uint32_t size; /* size in bytes of the extra space */
|
||||
uint32_t __reserved[3];
|
||||
};
|
||||
|
||||
#define SVE_MAGIC 0x53564501
|
||||
|
||||
#define fpsimd_sve_state(vq) { \
|
||||
__uint128_t zregs[32][vq]; \
|
||||
uint16_t pregs[16][vq]; \
|
||||
uint16_t ffr[vq]; \
|
||||
}
|
||||
|
||||
struct sve_context {
|
||||
struct _aarch64_ctx head;
|
||||
uint16_t vl;
|
||||
uint16_t __reserved[3];
|
||||
};
|
||||
|
||||
/*
|
||||
* The SVE architecture leaves space for future expansion of the
|
||||
* vector length beyond its initial architectural limit of 2048 bits
|
||||
* (16 quadwords).
|
||||
*
|
||||
* See linux/Documentation/arm64/sve.txt for a description of the VL/VQ
|
||||
* terminology.
|
||||
*/
|
||||
#define SVE_VQ_BYTES 16 /* number of bytes per quadword */
|
||||
|
||||
#define SVE_VQ_MIN 1
|
||||
#define SVE_VQ_MAX 512
|
||||
|
||||
#define SVE_VL_MIN (SVE_VQ_MIN * SVE_VQ_BYTES)
|
||||
#define SVE_VL_MAX (SVE_VQ_MAX * SVE_VQ_BYTES)
|
||||
|
||||
#define SVE_NUM_ZREGS 32
|
||||
#define SVE_NUM_PREGS 16
|
||||
|
||||
#define sve_vl_valid(vl) \
|
||||
((vl) % SVE_VQ_BYTES == 0 && (vl) >= SVE_VL_MIN && (vl) <= SVE_VL_MAX)
|
||||
#define sve_vq_from_vl(vl) ((vl) / SVE_VQ_BYTES)
|
||||
#define sve_vl_from_vq(vq) ((vq) * SVE_VQ_BYTES)
|
||||
|
||||
/*
|
||||
* The total size of meaningful data in the SVE context in bytes,
|
||||
* including the header, is given by SVE_SIG_CONTEXT_SIZE(vq).
|
||||
*
|
||||
* Note: for all these macros, the "vq" argument denotes the SVE
|
||||
* vector length in quadwords (i.e., units of 128 bits).
|
||||
*
|
||||
* The correct way to obtain vq is to use sve_vq_from_vl(vl). The
|
||||
* result is valid if and only if sve_vl_valid(vl) is true. This is
|
||||
* guaranteed for a struct sve_context written by the kernel.
|
||||
*
|
||||
*
|
||||
* Additional macros describe the contents and layout of the payload.
|
||||
* For each, SVE_SIG_x_OFFSET(args) is the start offset relative to
|
||||
* the start of struct sve_context, and SVE_SIG_x_SIZE(args) is the
|
||||
* size in bytes:
|
||||
*
|
||||
*
|
||||
* x type description
|
||||
* - ---- -----------
|
||||
* REGS the entire SVE context
|
||||
*
|
||||
* ZREGS __uint128_t[SVE_NUM_ZREGS][vq] all Z-registers
|
||||
* ZREG __uint128_t[vq] individual Z-register Zn
|
||||
*
|
||||
* PREGS uint16_t[SVE_NUM_PREGS][vq] all P-registers
|
||||
* PREG uint16_t[vq] individual P-register Pn
|
||||
*
|
||||
* FFR uint16_t[vq] first-fault status register
|
||||
*
|
||||
* Additional data might be appended in the future.
|
||||
*/
|
||||
|
||||
#define SVE_SIG_ZREG_SIZE(vq) ((uint32_t)(vq) * SVE_VQ_BYTES)
|
||||
#define SVE_SIG_PREG_SIZE(vq) ((uint32_t)(vq) * (SVE_VQ_BYTES / 8))
|
||||
#define SVE_SIG_FFR_SIZE(vq) SVE_SIG_PREG_SIZE(vq)
|
||||
|
||||
#define SVE_SIG_REGS_OFFSET \
|
||||
((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
|
||||
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
|
||||
|
||||
#define SVE_SIG_ZREGS_OFFSET SVE_SIG_REGS_OFFSET
|
||||
#define SVE_SIG_ZREG_OFFSET(vq, n) \
|
||||
(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREG_SIZE(vq) * (n))
|
||||
#define SVE_SIG_ZREGS_SIZE(vq) \
|
||||
(SVE_SIG_ZREG_OFFSET(vq, SVE_NUM_ZREGS) - SVE_SIG_ZREGS_OFFSET)
|
||||
|
||||
#define SVE_SIG_PREGS_OFFSET(vq) \
|
||||
(SVE_SIG_ZREGS_OFFSET + SVE_SIG_ZREGS_SIZE(vq))
|
||||
#define SVE_SIG_PREG_OFFSET(vq, n) \
|
||||
(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREG_SIZE(vq) * (n))
|
||||
#define SVE_SIG_PREGS_SIZE(vq) \
|
||||
(SVE_SIG_PREG_OFFSET(vq, SVE_NUM_PREGS) - SVE_SIG_PREGS_OFFSET(vq))
|
||||
|
||||
#define SVE_SIG_FFR_OFFSET(vq) \
|
||||
(SVE_SIG_PREGS_OFFSET(vq) + SVE_SIG_PREGS_SIZE(vq))
|
||||
|
||||
#define SVE_SIG_REGS_SIZE(vq) \
|
||||
(SVE_SIG_FFR_OFFSET(vq) + SVE_SIG_FFR_SIZE(vq) - SVE_SIG_REGS_OFFSET)
|
||||
|
||||
#define SVE_SIG_CONTEXT_SIZE(vq) (SVE_SIG_REGS_OFFSET + SVE_SIG_REGS_SIZE(vq))
|
||||
|
||||
/*
|
||||
* @ref.impl linux-linaro/arch/arm64/include/asm/ucontext.h
|
||||
*/
|
||||
struct ucontext {
|
||||
unsigned long uc_flags;
|
||||
struct ucontext *uc_link;
|
||||
stack_t uc_stack;
|
||||
sigset_t uc_sigmask;
|
||||
/* glibc uses a 1024-bit sigset_t */
|
||||
unsigned char __unused[1024 / 8 - sizeof(sigset_t)];
|
||||
/* last for future expansion */
|
||||
struct sigcontext uc_mcontext;
|
||||
};
|
||||
|
||||
void arm64_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, int err);
|
||||
void check_signal_irq_disabled(unsigned long rc, void *regs, int num);
|
||||
|
||||
#endif /* __HEADER_ARM64_COMMON_SIGNAL_H */
|
||||
23
arch/arm64/kernel/include/smp.h
Normal file
23
arch/arm64/kernel/include/smp.h
Normal file
@ -0,0 +1,23 @@
|
||||
/* smp.h COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
#ifndef __HEADER_ARM64_COMMON_SMP_H
|
||||
#define __HEADER_ARM64_COMMON_SMP_H
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* Initial data for bringing up a secondary CPU.
|
||||
*/
|
||||
struct secondary_data {
|
||||
void *stack;
|
||||
unsigned long next_pc;
|
||||
unsigned long arg;
|
||||
};
|
||||
extern struct secondary_data secondary_data;
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/* struct secondary_data offsets */
|
||||
#define SECONDARY_DATA_STACK 0x00
|
||||
#define SECONDARY_DATA_NEXT_PC 0x08
|
||||
#define SECONDARY_DATA_ARG 0x10
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_SMP_H */
|
||||
17
arch/arm64/kernel/include/stringify.h
Normal file
17
arch/arm64/kernel/include/stringify.h
Normal file
@ -0,0 +1,17 @@
|
||||
/* stringify.h COPYRIGHT FUJITSU LIMITED 2017 */
|
||||
|
||||
/**
|
||||
* @ref.impl host-kernel/include/linux/stringify.h
|
||||
*/
|
||||
#ifndef __LINUX_STRINGIFY_H
|
||||
#define __LINUX_STRINGIFY_H
|
||||
|
||||
/* Indirect stringification. Doing two levels allows the parameter to be a
|
||||
* macro itself. For example, compile with -DFOO=bar, __stringify(FOO)
|
||||
* converts to "bar".
|
||||
*/
|
||||
|
||||
#define __stringify_1(x...)#x
|
||||
#define __stringify(x...)__stringify_1(x)
|
||||
|
||||
#endif/* !__LINUX_STRINGIFY_H */
|
||||
152
arch/arm64/kernel/include/syscall_list.h
Normal file
152
arch/arm64/kernel/include/syscall_list.h
Normal file
@ -0,0 +1,152 @@
|
||||
/* syscall_list.h COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
|
||||
SYSCALL_DELEGATED(4, io_getevents)
|
||||
SYSCALL_DELEGATED(17, getcwd)
|
||||
SYSCALL_HANDLED(22, epoll_pwait)
|
||||
SYSCALL_DELEGATED(25, fcntl)
|
||||
SYSCALL_HANDLED(29, ioctl)
|
||||
SYSCALL_DELEGATED(35, unlinkat)
|
||||
SYSCALL_DELEGATED(43, statfs)
|
||||
SYSCALL_DELEGATED(44, fstatfs)
|
||||
SYSCALL_HANDLED(56, openat)
|
||||
SYSCALL_HANDLED(57, close)
|
||||
SYSCALL_DELEGATED(61, getdents64)
|
||||
SYSCALL_DELEGATED(62, lseek)
|
||||
SYSCALL_HANDLED(63, read)
|
||||
SYSCALL_DELEGATED(64, write)
|
||||
SYSCALL_DELEGATED(66, writev)
|
||||
SYSCALL_DELEGATED(67, pread64)
|
||||
SYSCALL_DELEGATED(68, pwrite64)
|
||||
SYSCALL_HANDLED(72, pselect6)
|
||||
SYSCALL_HANDLED(73, ppoll)
|
||||
SYSCALL_HANDLED(74, signalfd4)
|
||||
SYSCALL_DELEGATED(78, readlinkat)
|
||||
SYSCALL_DELEGATED(80, fstat)
|
||||
SYSCALL_HANDLED(93, exit)
|
||||
SYSCALL_HANDLED(94, exit_group)
|
||||
SYSCALL_HANDLED(95, waitid)
|
||||
SYSCALL_HANDLED(96, set_tid_address)
|
||||
SYSCALL_HANDLED(98, futex)
|
||||
SYSCALL_HANDLED(99, set_robust_list)
|
||||
SYSCALL_HANDLED(101, nanosleep)
|
||||
SYSCALL_HANDLED(102, getitimer)
|
||||
SYSCALL_HANDLED(103, setitimer)
|
||||
SYSCALL_HANDLED(113, clock_gettime)
|
||||
SYSCALL_DELEGATED(114, clock_getres)
|
||||
SYSCALL_DELEGATED(115, clock_nanosleep)
|
||||
SYSCALL_HANDLED(117, ptrace)
|
||||
SYSCALL_HANDLED(118, sched_setparam)
|
||||
SYSCALL_HANDLED(119, sched_setscheduler)
|
||||
SYSCALL_HANDLED(120, sched_getscheduler)
|
||||
SYSCALL_HANDLED(121, sched_getparam)
|
||||
SYSCALL_HANDLED(122, sched_setaffinity)
|
||||
SYSCALL_HANDLED(123, sched_getaffinity)
|
||||
SYSCALL_HANDLED(124, sched_yield)
|
||||
SYSCALL_HANDLED(125, sched_get_priority_max)
|
||||
SYSCALL_HANDLED(126, sched_get_priority_min)
|
||||
SYSCALL_HANDLED(127, sched_rr_get_interval)
|
||||
SYSCALL_HANDLED(129, kill)
|
||||
SYSCALL_HANDLED(130, tkill)
|
||||
SYSCALL_HANDLED(131, tgkill)
|
||||
SYSCALL_HANDLED(132, sigaltstack)
|
||||
SYSCALL_HANDLED(133, rt_sigsuspend)
|
||||
SYSCALL_HANDLED(134, rt_sigaction)
|
||||
SYSCALL_HANDLED(135, rt_sigprocmask)
|
||||
SYSCALL_HANDLED(136, rt_sigpending)
|
||||
SYSCALL_HANDLED(137, rt_sigtimedwait)
|
||||
SYSCALL_HANDLED(138, rt_sigqueueinfo)
|
||||
SYSCALL_HANDLED(139, rt_sigreturn)
|
||||
SYSCALL_HANDLED(143, setregid)
|
||||
SYSCALL_HANDLED(144, setgid)
|
||||
SYSCALL_HANDLED(145, setreuid)
|
||||
SYSCALL_HANDLED(146, setuid)
|
||||
SYSCALL_HANDLED(147, setresuid)
|
||||
SYSCALL_HANDLED(148, getresuid)
|
||||
SYSCALL_HANDLED(149, setresgid)
|
||||
SYSCALL_HANDLED(150, getresgid)
|
||||
SYSCALL_HANDLED(151, setfsuid)
|
||||
SYSCALL_HANDLED(152, setfsgid)
|
||||
SYSCALL_HANDLED(153, times)
|
||||
SYSCALL_HANDLED(154, setpgid)
|
||||
SYSCALL_DELEGATED(160, uname)
|
||||
SYSCALL_HANDLED(163, getrlimit)
|
||||
SYSCALL_HANDLED(164, setrlimit)
|
||||
SYSCALL_HANDLED(165, getrusage)
|
||||
SYSCALL_HANDLED(167, prctl)
|
||||
SYSCALL_HANDLED(168, getcpu)
|
||||
SYSCALL_HANDLED(169, gettimeofday)
|
||||
SYSCALL_HANDLED(170, settimeofday)
|
||||
SYSCALL_HANDLED(172, getpid)
|
||||
SYSCALL_HANDLED(173, getppid)
|
||||
SYSCALL_HANDLED(174, getuid)
|
||||
SYSCALL_HANDLED(175, geteuid)
|
||||
SYSCALL_HANDLED(176, getgid)
|
||||
SYSCALL_HANDLED(177, getegid)
|
||||
SYSCALL_HANDLED(178, gettid)
|
||||
SYSCALL_HANDLED(179, sysinfo)
|
||||
SYSCALL_DELEGATED(188, msgrcv)
|
||||
SYSCALL_DELEGATED(189, msgsnd)
|
||||
SYSCALL_DELEGATED(192, semtimedop)
|
||||
SYSCALL_DELEGATED(193, semop)
|
||||
SYSCALL_HANDLED(194, shmget)
|
||||
SYSCALL_HANDLED(195, shmctl)
|
||||
SYSCALL_HANDLED(196, shmat)
|
||||
SYSCALL_HANDLED(197, shmdt)
|
||||
SYSCALL_HANDLED(214, brk)
|
||||
SYSCALL_HANDLED(215, munmap)
|
||||
SYSCALL_HANDLED(216, mremap)
|
||||
SYSCALL_HANDLED(220, clone)
|
||||
SYSCALL_HANDLED(221, execve)
|
||||
SYSCALL_HANDLED(222, mmap)
|
||||
SYSCALL_HANDLED(226, mprotect)
|
||||
SYSCALL_HANDLED(227, msync)
|
||||
SYSCALL_HANDLED(228, mlock)
|
||||
SYSCALL_HANDLED(229, munlock)
|
||||
SYSCALL_HANDLED(230, mlockall)
|
||||
SYSCALL_HANDLED(231, munlockall)
|
||||
SYSCALL_HANDLED(232, mincore)
|
||||
SYSCALL_HANDLED(233, madvise)
|
||||
SYSCALL_HANDLED(234, remap_file_pages)
|
||||
SYSCALL_HANDLED(235, mbind)
|
||||
SYSCALL_HANDLED(236, get_mempolicy)
|
||||
SYSCALL_HANDLED(237, set_mempolicy)
|
||||
SYSCALL_HANDLED(238, migrate_pages)
|
||||
SYSCALL_HANDLED(239, move_pages)
|
||||
#ifdef ENABLE_PERF
|
||||
SYSCALL_HANDLED(241, perf_event_open)
|
||||
#else // PERF_ENABLE
|
||||
SYSCALL_DELEGATED(241, perf_event_open)
|
||||
#endif // PERF_ENABLE
|
||||
SYSCALL_HANDLED(260, wait4)
|
||||
SYSCALL_HANDLED(261, prlimit64)
|
||||
SYSCALL_HANDLED(270, process_vm_readv)
|
||||
SYSCALL_HANDLED(271, process_vm_writev)
|
||||
SYSCALL_HANDLED(281, execveat)
|
||||
SYSCALL_HANDLED(700, get_cpu_id)
|
||||
#ifdef PROFILE_ENABLE
|
||||
SYSCALL_HANDLED(__NR_profile, profile)
|
||||
#endif // PROFILE_ENABLE
|
||||
SYSCALL_HANDLED(730, util_migrate_inter_kernel)
|
||||
SYSCALL_HANDLED(731, util_indicate_clone)
|
||||
SYSCALL_HANDLED(732, get_system)
|
||||
SYSCALL_HANDLED(733, util_register_desc)
|
||||
|
||||
/* McKernel Specific */
|
||||
SYSCALL_HANDLED(801, swapout)
|
||||
SYSCALL_HANDLED(802, linux_mlock)
|
||||
SYSCALL_HANDLED(803, suspend_threads)
|
||||
SYSCALL_HANDLED(804, resume_threads)
|
||||
SYSCALL_HANDLED(811, linux_spawn)
|
||||
|
||||
SYSCALL_DELEGATED(1024, open)
|
||||
SYSCALL_DELEGATED(1035, readlink)
|
||||
SYSCALL_HANDLED(1045, signalfd)
|
||||
SYSCALL_DELEGATED(1049, stat)
|
||||
SYSCALL_DELEGATED(1060, getpgrp)
|
||||
SYSCALL_HANDLED(1062, time)
|
||||
SYSCALL_DELEGATED(1069, epoll_wait)
|
||||
|
||||
/* Do not edit the lines including this comment and
|
||||
* EOF just after it because those are used as a
|
||||
* robust marker for the autotest patch.
|
||||
*/
|
||||
403
arch/arm64/kernel/include/sysreg.h
Normal file
403
arch/arm64/kernel/include/sysreg.h
Normal file
@ -0,0 +1,403 @@
|
||||
/* sysreg.h COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
/*
|
||||
* Macros for accessing system registers with older binutils.
|
||||
*
|
||||
* Copyright (C) 2014 ARM Ltd.
|
||||
* Author: Catalin Marinas <catalin.marinas@arm.com>
|
||||
*
|
||||
* This program is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_SYSREG_H
|
||||
#define __ASM_SYSREG_H
|
||||
|
||||
#include <types.h>
|
||||
#include <stringify.h>
|
||||
#include <ihk/types.h>
|
||||
|
||||
/*
|
||||
* ARMv8 ARM reserves the following encoding for system registers:
|
||||
* (Ref: ARMv8 ARM, Section: "System instruction class encoding overview",
|
||||
* C5.2, version:ARM DDI 0487A.f)
|
||||
* [20-19] : Op0
|
||||
* [18-16] : Op1
|
||||
* [15-12] : CRn
|
||||
* [11-8] : CRm
|
||||
* [7-5] : Op2
|
||||
*/
|
||||
#define Op0_shift 19
|
||||
#define Op0_mask 0x3
|
||||
#define Op1_shift 16
|
||||
#define Op1_mask 0x7
|
||||
#define CRn_shift 12
|
||||
#define CRn_mask 0xf
|
||||
#define CRm_shift 8
|
||||
#define CRm_mask 0xf
|
||||
#define Op2_shift 5
|
||||
#define Op2_mask 0x7
|
||||
|
||||
#define sys_reg(op0, op1, crn, crm, op2) \
|
||||
(((op0) << Op0_shift) | ((op1) << Op1_shift) | \
|
||||
((crn) << CRn_shift) | ((crm) << CRm_shift) | \
|
||||
((op2) << Op2_shift))
|
||||
|
||||
#define sys_reg_Op0(id) (((id) >> Op0_shift) & Op0_mask)
|
||||
#define sys_reg_Op1(id) (((id) >> Op1_shift) & Op1_mask)
|
||||
#define sys_reg_CRn(id) (((id) >> CRn_shift) & CRn_mask)
|
||||
#define sys_reg_CRm(id) (((id) >> CRm_shift) & CRm_mask)
|
||||
#define sys_reg_Op2(id) (((id) >> Op2_shift) & Op2_mask)
|
||||
|
||||
#define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0)
|
||||
#define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5)
|
||||
#define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6)
|
||||
|
||||
#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0)
|
||||
#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1)
|
||||
#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2)
|
||||
#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4)
|
||||
#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5)
|
||||
#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6)
|
||||
#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7)
|
||||
|
||||
#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0)
|
||||
#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1)
|
||||
#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2)
|
||||
#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3)
|
||||
#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4)
|
||||
#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5)
|
||||
#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6)
|
||||
|
||||
#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0)
|
||||
#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1)
|
||||
#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2)
|
||||
|
||||
#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0)
|
||||
#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1)
|
||||
#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4)
|
||||
|
||||
#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0)
|
||||
#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1)
|
||||
|
||||
#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0)
|
||||
#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1)
|
||||
|
||||
#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0)
|
||||
#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1)
|
||||
#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2)
|
||||
|
||||
#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0)
|
||||
#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0)
|
||||
|
||||
#define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0)
|
||||
#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1)
|
||||
#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7)
|
||||
|
||||
#define REG_PSTATE_PAN_IMM sys_reg(0, 0, 4, 0, 4)
|
||||
#define REG_PSTATE_UAO_IMM sys_reg(0, 0, 4, 0, 3)
|
||||
|
||||
/*
|
||||
#define SET_PSTATE_PAN(x) __inst_arm(0xd5000000 | REG_PSTATE_PAN_IMM | \
|
||||
(!!x)<<8 | 0x1f)
|
||||
#define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM | \
|
||||
(!!x)<<8 | 0x1f)
|
||||
*/
|
||||
|
||||
/* Common SCTLR_ELx flags. */
|
||||
#define SCTLR_ELx_EE (1 << 25)
|
||||
#define SCTLR_ELx_I (1 << 12)
|
||||
#define SCTLR_ELx_SA (1 << 3)
|
||||
#define SCTLR_ELx_C (1 << 2)
|
||||
#define SCTLR_ELx_A (1 << 1)
|
||||
#define SCTLR_ELx_M 1
|
||||
|
||||
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
||||
SCTLR_ELx_SA | SCTLR_ELx_I)
|
||||
|
||||
/* SCTLR_EL1 specific flags. */
|
||||
#define SCTLR_EL1_UCI (1 << 26)
|
||||
#define SCTLR_EL1_SPAN (1 << 23)
|
||||
#define SCTLR_EL1_UCT (1 << 15)
|
||||
#define SCTLR_EL1_SED (1 << 8)
|
||||
#define SCTLR_EL1_CP15BEN (1 << 5)
|
||||
|
||||
/* id_aa64isar0 */
|
||||
#define ID_AA64ISAR0_RDM_SHIFT 28
|
||||
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
|
||||
#define ID_AA64ISAR0_CRC32_SHIFT 16
|
||||
#define ID_AA64ISAR0_SHA2_SHIFT 12
|
||||
#define ID_AA64ISAR0_SHA1_SHIFT 8
|
||||
#define ID_AA64ISAR0_AES_SHIFT 4
|
||||
|
||||
/* id_aa64isar1 */
|
||||
#define ID_AA64ISAR1_LRCPC_SHIFT 20
|
||||
#define ID_AA64ISAR1_FCMA_SHIFT 16
|
||||
#define ID_AA64ISAR1_JSCVT_SHIFT 12
|
||||
#define ID_AA64ISAR1_DPB_SHIFT 0
|
||||
|
||||
/* id_aa64pfr0 */
|
||||
#define ID_AA64PFR0_SVE_SHIFT 32
|
||||
#define ID_AA64PFR0_GIC_SHIFT 24
|
||||
#define ID_AA64PFR0_ASIMD_SHIFT 20
|
||||
#define ID_AA64PFR0_FP_SHIFT 16
|
||||
#define ID_AA64PFR0_EL3_SHIFT 12
|
||||
#define ID_AA64PFR0_EL2_SHIFT 8
|
||||
#define ID_AA64PFR0_EL1_SHIFT 4
|
||||
#define ID_AA64PFR0_EL0_SHIFT 0
|
||||
|
||||
#define ID_AA64PFR0_SVE 0x1
|
||||
#define ID_AA64PFR0_FP_NI 0xf
|
||||
#define ID_AA64PFR0_FP_SUPPORTED 0x0
|
||||
#define ID_AA64PFR0_ASIMD_NI 0xf
|
||||
#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0
|
||||
#define ID_AA64PFR0_EL1_64BIT_ONLY 0x1
|
||||
#define ID_AA64PFR0_EL0_64BIT_ONLY 0x1
|
||||
#define ID_AA64PFR0_EL0_32BIT_64BIT 0x2
|
||||
|
||||
/* id_aa64mmfr0 */
|
||||
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
|
||||
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
|
||||
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
|
||||
#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16
|
||||
#define ID_AA64MMFR0_SNSMEM_SHIFT 12
|
||||
#define ID_AA64MMFR0_BIGENDEL_SHIFT 8
|
||||
#define ID_AA64MMFR0_ASID_SHIFT 4
|
||||
#define ID_AA64MMFR0_PARANGE_SHIFT 0
|
||||
|
||||
#define ID_AA64MMFR0_TGRAN4_NI 0xf
|
||||
#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
|
||||
#define ID_AA64MMFR0_TGRAN64_NI 0xf
|
||||
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
|
||||
#define ID_AA64MMFR0_TGRAN16_NI 0x0
|
||||
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
|
||||
#define ID_AA64MMFR0_PARANGE_48 0x5
|
||||
#define ID_AA64MMFR0_PARANGE_52 0x6
|
||||
|
||||
#ifdef CONFIG_ARM64_PA_BITS_52
|
||||
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52
|
||||
#else
|
||||
#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48
|
||||
#endif
|
||||
|
||||
/* id_aa64mmfr1 */
|
||||
#define ID_AA64MMFR1_PAN_SHIFT 20
|
||||
#define ID_AA64MMFR1_LOR_SHIFT 16
|
||||
#define ID_AA64MMFR1_HPD_SHIFT 12
|
||||
#define ID_AA64MMFR1_VHE_SHIFT 8
|
||||
#define ID_AA64MMFR1_VMIDBITS_SHIFT 4
|
||||
#define ID_AA64MMFR1_HADBS_SHIFT 0
|
||||
|
||||
#define ID_AA64MMFR1_VMIDBITS_8 0
|
||||
#define ID_AA64MMFR1_VMIDBITS_16 2
|
||||
|
||||
/* id_aa64mmfr2 */
|
||||
#define ID_AA64MMFR2_LVA_SHIFT 16
|
||||
#define ID_AA64MMFR2_IESB_SHIFT 12
|
||||
#define ID_AA64MMFR2_LSM_SHIFT 8
|
||||
#define ID_AA64MMFR2_UAO_SHIFT 4
|
||||
#define ID_AA64MMFR2_CNP_SHIFT 0
|
||||
|
||||
/* id_aa64dfr0 */
|
||||
#define ID_AA64DFR0_PMSVER_SHIFT 32
|
||||
#define ID_AA64DFR0_CTX_CMPS_SHIFT 28
|
||||
#define ID_AA64DFR0_WRPS_SHIFT 20
|
||||
#define ID_AA64DFR0_BRPS_SHIFT 12
|
||||
#define ID_AA64DFR0_PMUVER_SHIFT 8
|
||||
#define ID_AA64DFR0_TRACEVER_SHIFT 4
|
||||
#define ID_AA64DFR0_DEBUGVER_SHIFT 0
|
||||
|
||||
#define ID_ISAR5_RDM_SHIFT 24
|
||||
#define ID_ISAR5_CRC32_SHIFT 16
|
||||
#define ID_ISAR5_SHA2_SHIFT 12
|
||||
#define ID_ISAR5_SHA1_SHIFT 8
|
||||
#define ID_ISAR5_AES_SHIFT 4
|
||||
#define ID_ISAR5_SEVL_SHIFT 0
|
||||
|
||||
#define MVFR0_FPROUND_SHIFT 28
|
||||
#define MVFR0_FPSHVEC_SHIFT 24
|
||||
#define MVFR0_FPSQRT_SHIFT 20
|
||||
#define MVFR0_FPDIVIDE_SHIFT 16
|
||||
#define MVFR0_FPTRAP_SHIFT 12
|
||||
#define MVFR0_FPDP_SHIFT 8
|
||||
#define MVFR0_FPSP_SHIFT 4
|
||||
#define MVFR0_SIMD_SHIFT 0
|
||||
|
||||
#define MVFR1_SIMDFMAC_SHIFT 28
|
||||
#define MVFR1_FPHP_SHIFT 24
|
||||
#define MVFR1_SIMDHP_SHIFT 20
|
||||
#define MVFR1_SIMDSP_SHIFT 16
|
||||
#define MVFR1_SIMDINT_SHIFT 12
|
||||
#define MVFR1_SIMDLS_SHIFT 8
|
||||
#define MVFR1_FPDNAN_SHIFT 4
|
||||
#define MVFR1_FPFTZ_SHIFT 0
|
||||
|
||||
#define ID_AA64MMFR0_TGRAN4_SHIFT 28
|
||||
#define ID_AA64MMFR0_TGRAN64_SHIFT 24
|
||||
#define ID_AA64MMFR0_TGRAN16_SHIFT 20
|
||||
|
||||
#define ID_AA64MMFR0_TGRAN4_NI 0xf
|
||||
#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0
|
||||
#define ID_AA64MMFR0_TGRAN64_NI 0xf
|
||||
#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0
|
||||
#define ID_AA64MMFR0_TGRAN16_NI 0x0
|
||||
#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1
|
||||
|
||||
#if defined(CONFIG_ARM64_4K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN4_SUPPORTED
|
||||
#elif defined(CONFIG_ARM64_16K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN16_SUPPORTED
|
||||
#elif defined(CONFIG_ARM64_64K_PAGES)
|
||||
#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT
|
||||
#define ID_AA64MMFR0_TGRAN_SUPPORTED ID_AA64MMFR0_TGRAN64_SUPPORTED
|
||||
#endif
|
||||
|
||||
#define ZCR_EL1_LEN_SHIFT 0
|
||||
#define ZCR_EL1_LEN_SIZE 9
|
||||
#define ZCR_EL1_LEN_MASK 0x1ff
|
||||
|
||||
#define CPACR_EL1_ZEN_EL1EN (1 << 16)
|
||||
#define CPACR_EL1_ZEN_EL0EN (1 << 17)
|
||||
#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
|
||||
|
||||
/* Safe value for MPIDR_EL1: Bit31:RES1, Bit30:U:0, Bit24:MT:0 */
|
||||
#define SYS_MPIDR_SAFE_VAL (1UL << 31)
|
||||
|
||||
/* SYS_MIDR_EL1 */
|
||||
//mask
|
||||
#define SYS_MIDR_EL1_IMPLEMENTER_MASK (0xFFUL)
|
||||
#define SYS_MIDR_EL1_PPNUM_MASK (0xFFFUL)
|
||||
//shift
|
||||
#define SYS_MIDR_EL1_IMPLEMENTER_SHIFT (24)
|
||||
#define SYS_MIDR_EL1_PPNUM_SHIFT (0x4)
|
||||
//val
|
||||
#define SYS_MIDR_EL1_IMPLEMENTER_FJ (0x46)
|
||||
#define SYS_MIDR_EL1_PPNUM_TCHIP (0x1)
|
||||
|
||||
#define READ_ACCESS (0)
|
||||
#define WRITE_ACCESS (1)
|
||||
#define ACCESS_REG_FUNC(name, reg) \
|
||||
static void xos_access_##name(uint8_t flag, uint64_t *reg_value) \
|
||||
{ \
|
||||
if (flag == READ_ACCESS) { \
|
||||
__asm__ __volatile__("mrs_s %0," __stringify(reg) "\n\t" \
|
||||
:"=&r"(*reg_value)::); \
|
||||
} \
|
||||
else if (flag == WRITE_ACCESS) { \
|
||||
__asm__ __volatile__("msr_s" __stringify(reg) ", %0\n\t" \
|
||||
::"r"(*reg_value):); \
|
||||
} else { \
|
||||
; \
|
||||
} \
|
||||
}
|
||||
|
||||
#define XOS_FALSE (0)
|
||||
#define XOS_TRUE (1)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
#define __emit_inst(x).inst (x)
|
||||
.irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
|
||||
.equ .L__reg_num_x\num, \num
|
||||
.endr
|
||||
.equ .L__reg_num_xzr, 31
|
||||
|
||||
.macro mrs_s, rt, sreg
|
||||
__emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt))
|
||||
.endm
|
||||
|
||||
.macro msr_s, sreg, rt
|
||||
__emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt))
|
||||
.endm
|
||||
|
||||
#else
|
||||
#define __emit_inst(x)".inst " __stringify((x)) "\n\t"
|
||||
asm(
|
||||
" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
|
||||
" .equ .L__reg_num_x\\num, \\num\n"
|
||||
" .endr\n"
|
||||
" .equ .L__reg_num_xzr, 31\n"
|
||||
"\n"
|
||||
" .macro mrs_s, rt, sreg\n"
|
||||
__emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt))
|
||||
" .endm\n"
|
||||
"\n"
|
||||
" .macro msr_s, sreg, rt\n"
|
||||
__emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt))
|
||||
" .endm\n"
|
||||
);
|
||||
|
||||
ACCESS_REG_FUNC(midr_el1, SYS_MIDR_EL1);
|
||||
static int xos_is_tchip(void)
|
||||
{
|
||||
uint64_t reg = 0;
|
||||
int ret = 0, impl = 0, part = 0;
|
||||
|
||||
xos_access_midr_el1(READ_ACCESS, ®);
|
||||
|
||||
impl = (reg >> SYS_MIDR_EL1_IMPLEMENTER_SHIFT) &
|
||||
SYS_MIDR_EL1_IMPLEMENTER_MASK;
|
||||
part = (reg >> SYS_MIDR_EL1_PPNUM_SHIFT) & SYS_MIDR_EL1_PPNUM_MASK;
|
||||
|
||||
if ((impl == SYS_MIDR_EL1_IMPLEMENTER_FJ) &&
|
||||
(part == SYS_MIDR_EL1_PPNUM_TCHIP)) {
|
||||
ret = XOS_TRUE;
|
||||
}
|
||||
else {
|
||||
ret = XOS_FALSE;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Unlike read_cpuid, calls to read_sysreg are never expected to be
|
||||
* optimized away or replaced with synthetic values.
|
||||
*/
|
||||
#define read_sysreg(r) ({ \
|
||||
uint64_t __val; \
|
||||
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
/*
|
||||
* The "Z" constraint normally means a zero immediate, but when combined with
|
||||
* the "%x0" template means XZR.
|
||||
*/
|
||||
#define write_sysreg(v, r) do { \
|
||||
uint64_t __val = (uint64_t)v; \
|
||||
asm volatile("msr " __stringify(r) ", %x0" \
|
||||
: : "rZ" (__val)); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* For registers without architectural names, or simply unsupported by
|
||||
* GAS.
|
||||
*/
|
||||
#define read_sysreg_s(r) ({ \
|
||||
uint64_t __val; \
|
||||
asm volatile("mrs_s %0, " __stringify(r) : "=r" (__val)); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define write_sysreg_s(v, r) do { \
|
||||
uint64_t __val = (uint64_t)v; \
|
||||
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
|
||||
} while (0)
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/kvm_arm.h */
|
||||
#define CPTR_EL2_TZ (1 << 8)
|
||||
|
||||
#include "imp-sysreg.h"
|
||||
|
||||
#endif /* __ASM_SYSREG_H */
|
||||
107
arch/arm64/kernel/include/thread_info.h
Normal file
107
arch/arm64/kernel/include/thread_info.h
Normal file
@ -0,0 +1,107 @@
|
||||
/* thread_info.h COPYRIGHT FUJITSU LIMITED 2015-2019 */
|
||||
#ifndef __HEADER_ARM64_COMMON_THREAD_INFO_H
|
||||
#define __HEADER_ARM64_COMMON_THREAD_INFO_H
|
||||
|
||||
#define MIN_KERNEL_STACK_SHIFT 15
|
||||
|
||||
#include <arch-memory.h>
|
||||
|
||||
#if (MIN_KERNEL_STACK_SHIFT < PAGE_SHIFT)
|
||||
#define KERNEL_STACK_SHIFT PAGE_SHIFT
|
||||
#else
|
||||
#define KERNEL_STACK_SHIFT MIN_KERNEL_STACK_SHIFT
|
||||
#endif
|
||||
|
||||
#define KERNEL_STACK_SIZE (UL(1) << KERNEL_STACK_SHIFT)
|
||||
#define THREAD_START_SP KERNEL_STACK_SIZE - 16
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <process.h>
|
||||
#include <prctl.h>
|
||||
|
||||
struct cpu_context {
|
||||
unsigned long x19;
|
||||
unsigned long x20;
|
||||
unsigned long x21;
|
||||
unsigned long x22;
|
||||
unsigned long x23;
|
||||
unsigned long x24;
|
||||
unsigned long x25;
|
||||
unsigned long x26;
|
||||
unsigned long x27;
|
||||
unsigned long x28;
|
||||
unsigned long fp;
|
||||
unsigned long sp;
|
||||
unsigned long pc;
|
||||
};
|
||||
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
// mm_segment_t addr_limit; /* address limit */
|
||||
// struct task_struct *task; /* main task structure */
|
||||
// struct exec_domain *exec_domain; /* execution domain */
|
||||
// struct restart_block restart_block;
|
||||
// int preempt_count; /* 0 => preemptable, <0 => bug */
|
||||
int cpu; /* cpu */
|
||||
struct cpu_context cpu_context; /* kernel_context */
|
||||
void *sve_state; /* SVE registers, if any */
|
||||
unsigned int sve_vl; /* SVE vector length */
|
||||
unsigned int sve_vl_onexec; /* SVE vl after next exec */
|
||||
unsigned long sve_flags; /* SVE related flags */
|
||||
unsigned long fault_address; /* fault info */
|
||||
unsigned long fault_code; /* ESR_EL1 value */
|
||||
};
|
||||
|
||||
/* Flags for sve_flags (intentionally defined to match the prctl flags) */
|
||||
|
||||
/* Inherit sve_vl and sve_flags across execve(): */
|
||||
#define THREAD_VL_INHERIT PR_SVE_VL_INHERIT
|
||||
|
||||
struct arm64_cpu_local_thread {
|
||||
struct thread_info thread_info;
|
||||
unsigned long paniced;
|
||||
uint64_t panic_regs[34];
|
||||
};
|
||||
|
||||
union arm64_cpu_local_variables {
|
||||
struct arm64_cpu_local_thread arm64_cpu_local_thread;
|
||||
unsigned long stack[KERNEL_STACK_SIZE / sizeof(unsigned long)];
|
||||
};
|
||||
extern union arm64_cpu_local_variables init_thread_info;
|
||||
|
||||
/*
|
||||
* how to get the current stack pointer from C
|
||||
*/
|
||||
register unsigned long current_stack_pointer asm ("sp");
|
||||
|
||||
/*
|
||||
* how to get the thread information struct from C
|
||||
*/
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
unsigned long ti = 0;
|
||||
|
||||
ti = ALIGN_DOWN(current_stack_pointer, KERNEL_STACK_SIZE);
|
||||
|
||||
return (struct thread_info *)ti;
|
||||
}
|
||||
|
||||
/*
|
||||
* how to get the pt_regs struct from C
|
||||
*/
|
||||
static inline struct pt_regs *current_pt_regs(void)
|
||||
{
|
||||
unsigned long regs = 0;
|
||||
|
||||
regs = ALIGN_DOWN(current_stack_pointer, KERNEL_STACK_SIZE);
|
||||
regs += THREAD_START_SP - sizeof(struct pt_regs);
|
||||
|
||||
return (struct pt_regs *)regs;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define TIF_SINGLESTEP 21
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_THREAD_INFO_H */
|
||||
28
arch/arm64/kernel/include/traps.h
Normal file
28
arch/arm64/kernel/include/traps.h
Normal file
@ -0,0 +1,28 @@
|
||||
/* traps.h COPYRIGHT FUJITSU LIMITED 2017 */
|
||||
|
||||
#ifndef __ASM_TRAP_H
|
||||
#define __ASM_TRAP_H
|
||||
|
||||
#include <types.h>
|
||||
#include <arch-lock.h>
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/traps.h */
|
||||
struct undef_hook {
|
||||
struct list_head node;
|
||||
uint32_t instr_mask;
|
||||
uint32_t instr_val;
|
||||
uint64_t pstate_mask;
|
||||
uint64_t pstate_val;
|
||||
int (*fn)(struct pt_regs *regs, uint32_t instr);
|
||||
};
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/traps.h */
|
||||
void register_undef_hook(struct undef_hook *hook);
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/traps.h */
|
||||
void unregister_undef_hook(struct undef_hook *hook);
|
||||
|
||||
#endif /* __ASM_TRAP_H */
|
||||
|
||||
30
arch/arm64/kernel/include/vdso.h
Normal file
30
arch/arm64/kernel/include/vdso.h
Normal file
@ -0,0 +1,30 @@
|
||||
/* vdso.h COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
#ifndef __HEADER_ARM64_COMMON_VDSO_H
|
||||
#define __HEADER_ARM64_COMMON_VDSO_H
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/vsdo.h::VDSO_LBASE */
|
||||
/*
|
||||
* Default link address for the vDSO.
|
||||
* Since we randomise the VDSO mapping, there's little point in trying
|
||||
* to prelink this.
|
||||
*/
|
||||
#define VDSO_LBASE 0x0
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <vdso-offsets.h>
|
||||
|
||||
/* @ref.impl arch/arm64/include/asm/vsdo.h::VDSO_SYMBOL */
|
||||
#define VDSO_SYMBOL(base, name) vdso_symbol_##name((unsigned long)(base))
|
||||
void* vdso_symbol_sigtramp(unsigned long base);
|
||||
|
||||
int add_vdso_pages(struct thread *thread);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_VDSO_H */
|
||||
|
||||
22
arch/arm64/kernel/include/virt.h
Normal file
22
arch/arm64/kernel/include/virt.h
Normal file
@ -0,0 +1,22 @@
|
||||
/* virt.h COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
#ifndef __HEADER_ARM64_COMMON_VIRT_H
|
||||
#define __HEADER_ARM64_COMMON_VIRT_H
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/virt.h */
|
||||
#define BOOT_CPU_MODE_EL1 (0xe11)
|
||||
#define BOOT_CPU_MODE_EL2 (0xe12)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <sysreg.h>
|
||||
#include <ptrace.h>
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/virt.h */
|
||||
static inline int is_kernel_in_hyp_mode(void)
|
||||
{
|
||||
return read_sysreg(CurrentEL) == CurrentEL_EL2;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* !__HEADER_ARM64_COMMON_VIRT_H */
|
||||
176
arch/arm64/kernel/irq-gic-v2.c
Normal file
176
arch/arm64/kernel/irq-gic-v2.c
Normal file
@ -0,0 +1,176 @@
|
||||
/* irq-gic-v2.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#include <ihk/cpu.h>
|
||||
#include <irq.h>
|
||||
#include <arm-gic-v2.h>
|
||||
#include <io.h>
|
||||
#include <arch/cpu.h>
|
||||
#include <memory.h>
|
||||
#include <affinity.h>
|
||||
#include <syscall.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <arch-timer.h>
|
||||
#include <cls.h>
|
||||
|
||||
// #define DEBUG_GICV2
|
||||
|
||||
#ifdef DEBUG_GICV2
|
||||
#undef DDEBUG_DEFAULT
|
||||
#define DDEBUG_DEFAULT DDEBUG_PRINT
|
||||
#endif
|
||||
|
||||
void *dist_base;
|
||||
void *cpu_base;
|
||||
|
||||
#define gic_hwid_to_affinity(hw_cpuid) (1UL << hw_cpuid)
|
||||
|
||||
/**
|
||||
* arm64_raise_sgi_gicv2
|
||||
* @ref.impl drivers/irqchip/irq-gic.c:gic_raise_softirq
|
||||
*
|
||||
* @note Because it performs interrupt control at a higher
|
||||
* function, it is not necessary to perform the disable/enable
|
||||
* interrupts in this function as gic_raise_softirq() .
|
||||
*/
|
||||
static void __arm64_raise_sgi_gicv2(unsigned int hw_cpuid, unsigned int vector)
|
||||
{
|
||||
/* Build interrupt destination of the target cpu */
|
||||
uint8_t cpu_target_list = gic_hwid_to_affinity(hw_cpuid);
|
||||
|
||||
/*
|
||||
* Ensure that stores to Normal memory are visible to the
|
||||
* other CPUs before they observe us issuing the IPI.
|
||||
*/
|
||||
dmb(ishst);
|
||||
|
||||
/* write to GICD_SGIR */
|
||||
writel_relaxed(
|
||||
cpu_target_list << 16 | vector,
|
||||
(void *)(dist_base + GIC_DIST_SOFTINT)
|
||||
);
|
||||
}
|
||||
|
||||
static void arm64_raise_sgi_gicv2(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
/* Build interrupt destination of the target CPU */
|
||||
uint32_t hw_cpuid = ihk_mc_get_cpu_info()->hw_ids[cpuid];
|
||||
|
||||
__arm64_raise_sgi_gicv2(hw_cpuid, vector);
|
||||
}
|
||||
|
||||
static void arm64_raise_sgi_to_host_gicv2(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
/* Build interrupt destination of the target Linux/host CPU */
|
||||
uint32_t hw_cpuid = ihk_mc_get_apicid(cpuid);
|
||||
|
||||
__arm64_raise_sgi_gicv2(hw_cpuid, vector);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* arm64_raise_spi_gicv2
|
||||
* @ref.impl nothing.
|
||||
*/
|
||||
static void arm64_raise_spi_gicv2(unsigned int cpuid, unsigned int vector)
|
||||
{
|
||||
uint64_t spi_reg_offset;
|
||||
uint32_t spi_set_pending_bitpos;
|
||||
|
||||
/**
|
||||
* calculates register offset and bit position corresponding to the numbers.
|
||||
*
|
||||
* For interrupt vector m,
|
||||
* - the corresponding GICD_ISPENDR number, n, is given by n = m / 32
|
||||
* - the offset of the required GICD_ISPENDR is (0x200 + (4*n))
|
||||
* - the bit number of the required Set-pending bit in this register is m % 32.
|
||||
*/
|
||||
spi_reg_offset = vector / 32 * 4;
|
||||
spi_set_pending_bitpos = vector % 32;
|
||||
|
||||
/* write to GICD_ISPENDR */
|
||||
writel_relaxed(
|
||||
1 << spi_set_pending_bitpos,
|
||||
(void *)(dist_base + GIC_DIST_PENDING_SET + spi_reg_offset)
|
||||
);
|
||||
}
|
||||
|
||||
void arm64_issue_host_ipi_gicv2(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
arm64_raise_sgi_to_host_gicv2(cpuid, vector);
|
||||
}
|
||||
|
||||
/**
|
||||
* arm64_issue_ipi_gicv2
|
||||
* @param cpuid : hardware cpu id
|
||||
* @param vector : interrupt vector number
|
||||
*/
|
||||
void arm64_issue_ipi_gicv2(unsigned int cpuid, unsigned int vector)
|
||||
{
|
||||
dkprintf("Send irq#%d to cpuid=%d\n", vector, cpuid);
|
||||
|
||||
if(vector < 16){
|
||||
// send SGI
|
||||
arm64_raise_sgi_gicv2(cpuid, vector);
|
||||
} else if (32 <= vector && vector < 1020) {
|
||||
// send SPI (allow only to host)
|
||||
arm64_raise_spi_gicv2(cpuid, vector);
|
||||
} else {
|
||||
ekprintf("#%d is bad irq number.", vector);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* handle_interrupt_gicv2
|
||||
* @ref.impl drivers/irqchip/irq-gic.c:gic_handle_irq
|
||||
*/
|
||||
extern int interrupt_from_user(void *);
|
||||
void handle_interrupt_gicv2(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int irqstat, irqnr;
|
||||
const int from_user = interrupt_from_user(regs);
|
||||
|
||||
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
|
||||
do {
|
||||
// get GICC_IAR.InterruptID
|
||||
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
|
||||
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
|
||||
|
||||
if (irqnr < 32) {
|
||||
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
|
||||
handle_IPI(irqnr, regs);
|
||||
continue;
|
||||
} else if (irqnr != 1023) {
|
||||
panic("PANIC: handle_interrupt_gicv2(): catch invalid interrupt.");
|
||||
}
|
||||
|
||||
/*
|
||||
* If another interrupt is not pending, GICC_IAR.InterruptID
|
||||
* returns 1023 (see GICv2 spec. Chap. 4.4.4) .
|
||||
*/
|
||||
break;
|
||||
} while (1);
|
||||
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
|
||||
|
||||
/* for migration by IPI */
|
||||
if (get_this_cpu_local_var()->flags & CPU_FLAG_NEED_MIGRATE) {
|
||||
schedule();
|
||||
check_signal(0, regs, 0);
|
||||
}
|
||||
}
|
||||
|
||||
void gic_dist_init_gicv2(unsigned long dist_base_pa, unsigned long size)
|
||||
{
|
||||
dist_base = map_fixed_area(dist_base_pa, size, 1 /*non chachable*/);
|
||||
}
|
||||
|
||||
void gic_cpu_init_gicv2(unsigned long cpu_base_pa, unsigned long size)
|
||||
{
|
||||
cpu_base = map_fixed_area(cpu_base_pa, size, 1 /*non chachable*/);
|
||||
}
|
||||
|
||||
void gic_enable_gicv2(void)
|
||||
{
|
||||
unsigned int enable_ppi_sgi = 0;
|
||||
|
||||
enable_ppi_sgi |= GICD_ENABLE << get_timer_intrid();
|
||||
writel_relaxed(enable_ppi_sgi, dist_base + GIC_DIST_ENABLE_SET);
|
||||
}
|
||||
514
arch/arm64/kernel/irq-gic-v3.c
Normal file
514
arch/arm64/kernel/irq-gic-v3.c
Normal file
@ -0,0 +1,514 @@
|
||||
/* irq-gic-v3.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#include <irq.h>
|
||||
#include <arm-gic-v2.h>
|
||||
#include <arm-gic-v3.h>
|
||||
#include <io.h>
|
||||
#include <cputype.h>
|
||||
#include <process.h>
|
||||
#include <syscall.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <arch-timer.h>
|
||||
#include <cls.h>
|
||||
|
||||
//#define DEBUG_GICV3
|
||||
|
||||
#define USE_CAVIUM_THUNDER_X
|
||||
|
||||
#ifdef DEBUG_GICV3
|
||||
#undef DDEBUG_DEFAULT
|
||||
#define DDEBUG_DEFAULT DDEBUG_PRINT
|
||||
#endif
|
||||
|
||||
#ifdef USE_CAVIUM_THUNDER_X
|
||||
static char is_cavium_thunderx = 0;
|
||||
#endif
|
||||
|
||||
void *dist_base;
|
||||
void *rdist_base[NR_CPUS];
|
||||
|
||||
extern uint64_t ihk_param_cpu_logical_map;
|
||||
static uint64_t *__cpu_logical_map = &ihk_param_cpu_logical_map;
|
||||
|
||||
extern uint64_t ihk_param_gic_rdist_base_pa[NR_CPUS];
|
||||
|
||||
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
|
||||
|
||||
/* Our default, arbitrary priority value. Linux only uses one anyway. */
|
||||
#define DEFAULT_PMR_VALUE 0xf0
|
||||
|
||||
/**
|
||||
* Low level accessors
|
||||
* @ref.impl host-kernel/drivers/irqchip/irq-gic-v3.c
|
||||
*/
|
||||
static uint64_t gic_read_iar_common(void)
|
||||
{
|
||||
uint64_t irqstat;
|
||||
|
||||
#ifdef CONFIG_HAS_NMI
|
||||
uint64_t daif;
|
||||
uint64_t pmr;
|
||||
uint64_t default_pmr_value = DEFAULT_PMR_VALUE;
|
||||
|
||||
/*
|
||||
* The PMR may be configured to mask interrupts when this code is
|
||||
* called, thus in order to acknowledge interrupts we must set the
|
||||
* PMR to its default value before reading from the IAR.
|
||||
*
|
||||
* To do this without taking an interrupt we also ensure the I bit
|
||||
* is set whilst we are interfering with the value of the PMR.
|
||||
*/
|
||||
asm volatile(
|
||||
"mrs %1, daif\n\t" /* save I bit */
|
||||
"msr daifset, #2\n\t" /* set I bit */
|
||||
"mrs_s %2, " __stringify(ICC_PMR_EL1) "\n\t" /* save PMR */
|
||||
"msr_s " __stringify(ICC_PMR_EL1) ",%3\n\t" /* set PMR */
|
||||
"mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t" /* ack int */
|
||||
"msr_s " __stringify(ICC_PMR_EL1) ",%2\n\t" /* restore PMR */
|
||||
"isb\n\t"
|
||||
"msr daif, %1" /* restore I */
|
||||
: "=r" (irqstat), "=&r" (daif), "=&r" (pmr)
|
||||
: "r" (default_pmr_value));
|
||||
#else /* CONFIG_HAS_NMI */
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
|
||||
#endif /* CONFIG_HAS_NMI */
|
||||
|
||||
return irqstat;
|
||||
}
|
||||
|
||||
#ifdef USE_CAVIUM_THUNDER_X
|
||||
/* Cavium ThunderX erratum 23154 */
|
||||
static uint64_t gic_read_iar_cavium_thunderx(void)
|
||||
{
|
||||
uint64_t irqstat;
|
||||
|
||||
#ifdef CONFIG_HAS_NMI
|
||||
uint64_t daif;
|
||||
uint64_t pmr;
|
||||
uint64_t default_pmr_value = DEFAULT_PMR_VALUE;
|
||||
|
||||
/*
|
||||
* The PMR may be configured to mask interrupts when this code is
|
||||
* called, thus in order to acknowledge interrupts we must set the
|
||||
* PMR to its default value before reading from the IAR.
|
||||
*
|
||||
* To do this without taking an interrupt we also ensure the I bit
|
||||
* is set whilst we are interfering with the value of the PMR.
|
||||
*/
|
||||
asm volatile(
|
||||
"mrs %1, daif\n\t" /* save I bit */
|
||||
"msr daifset, #2\n\t" /* set I bit */
|
||||
"mrs_s %2, " __stringify(ICC_PMR_EL1) "\n\t" /* save PMR */
|
||||
"msr_s " __stringify(ICC_PMR_EL1) ",%3\n\t" /* set PMR */
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"mrs_s %0, " __stringify(ICC_IAR1_EL1) "\n\t" /* ack int */
|
||||
"nop;nop;nop;nop\n\t"
|
||||
"msr_s " __stringify(ICC_PMR_EL1) ",%2\n\t" /* restore PMR */
|
||||
"isb\n\t"
|
||||
"msr daif, %1" /* restore I */
|
||||
: "=r" (irqstat), "=&r" (daif), "=&r" (pmr)
|
||||
: "r" (default_pmr_value));
|
||||
#else /* CONFIG_HAS_NMI */
|
||||
asm volatile("nop;nop;nop;nop;");
|
||||
asm volatile("nop;nop;nop;nop;");
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
|
||||
asm volatile("nop;nop;nop;nop;");
|
||||
#endif /* CONFIG_HAS_NMI */
|
||||
mb();
|
||||
|
||||
return irqstat;
|
||||
}
|
||||
#endif
|
||||
|
||||
static uint64_t gic_read_iar(void)
|
||||
{
|
||||
#ifdef USE_CAVIUM_THUNDER_X
|
||||
if (is_cavium_thunderx)
|
||||
return gic_read_iar_cavium_thunderx();
|
||||
else
|
||||
#endif
|
||||
return gic_read_iar_common();
|
||||
|
||||
}
|
||||
|
||||
static void gic_write_pmr(uint64_t val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
|
||||
}
|
||||
|
||||
static void gic_write_ctlr(uint64_t val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static void gic_write_grpen1(uint64_t val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static inline void gic_write_eoir(uint64_t irq)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
|
||||
isb();
|
||||
}
|
||||
|
||||
static void gic_write_sgi1r(uint64_t val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
|
||||
}
|
||||
|
||||
static inline uint32_t gic_read_sre(void)
|
||||
{
|
||||
uint64_t val;
|
||||
|
||||
asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void gic_write_sre(uint32_t val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" ((uint64_t)val));
|
||||
isb();
|
||||
}
|
||||
|
||||
static uint32_t gic_enable_sre(void)
|
||||
{
|
||||
uint32_t val;
|
||||
|
||||
val = gic_read_sre();
|
||||
if (val & ICC_SRE_EL1_SRE)
|
||||
return 1; /*ok*/
|
||||
|
||||
val |= ICC_SRE_EL1_SRE;
|
||||
gic_write_sre(val);
|
||||
val = gic_read_sre();
|
||||
|
||||
return !!(val & ICC_SRE_EL1_SRE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HAS_NMI
|
||||
static inline void gic_write_bpr1(uint32_t val)
|
||||
{
|
||||
asm volatile("msr_s " __stringify(ICC_BPR1_EL1) ", %0" : : "r" (val));
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __arm64_raise_sgi_gicv3(uint32_t hw_cpuid, uint32_t vector)
|
||||
{
|
||||
uint64_t mpidr, cluster_id;
|
||||
uint16_t tlist;
|
||||
uint64_t val;
|
||||
|
||||
/*
|
||||
* Ensure that stores to Normal memory are visible to the
|
||||
* other CPUs before issuing the IPI.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
mpidr = cpu_logical_map(hw_cpuid);
|
||||
if((mpidr & 0xffUL) < 16) {
|
||||
cluster_id = cpu_logical_map(hw_cpuid) & ~0xffUL;
|
||||
tlist = (uint16_t)(1 << (mpidr & 0xf));
|
||||
|
||||
#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
|
||||
(MPIDR_AFFINITY_LEVEL(cluster_id, level) \
|
||||
<< ICC_SGI1R_AFFINITY_## level ##_SHIFT)
|
||||
|
||||
val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
|
||||
MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
|
||||
vector << ICC_SGI1R_SGI_ID_SHIFT |
|
||||
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
|
||||
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
|
||||
|
||||
dkprintf("CPU%d: ICC_SGI1R_EL1 %llx\n", ihk_mc_get_processor_id(), val);
|
||||
gic_write_sgi1r(val);
|
||||
|
||||
/* Force the above writes to ICC_SGI1R_EL1 to be executed */
|
||||
isb();
|
||||
} else {
|
||||
/*
|
||||
* If we ever get a cluster of more than 16 CPUs, just
|
||||
* scream and skip that CPU.
|
||||
*/
|
||||
ekprintf("GICv3 can't send SGI for TargetList=%d\n", (mpidr & 0xffUL));
|
||||
}
|
||||
}
|
||||
|
||||
static void arm64_raise_sgi_gicv3(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
/* Build interrupt destination of the target CPU */
|
||||
uint32_t hw_cpuid = ihk_mc_get_cpu_info()->hw_ids[cpuid];
|
||||
|
||||
__arm64_raise_sgi_gicv3(hw_cpuid, vector);
|
||||
}
|
||||
|
||||
static void arm64_raise_sgi_to_host_gicv3(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
/* Build interrupt destination of the target Linux/host CPU */
|
||||
uint32_t hw_cpuid = ihk_mc_get_apicid(cpuid);
|
||||
|
||||
__arm64_raise_sgi_gicv3(hw_cpuid, vector);
|
||||
}
|
||||
|
||||
static void arm64_raise_spi_gicv3(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
uint64_t spi_reg_offset;
|
||||
uint32_t spi_set_pending_bitpos;
|
||||
|
||||
/**
|
||||
* calculates register offset and bit position corresponding to the numbers.
|
||||
*
|
||||
* For interrupt vector m,
|
||||
* - the corresponding GICD_ISPENDR number, n, is given by n = m / 32
|
||||
* - the offset of the required GICD_ISPENDR is (0x200 + (4*n))
|
||||
* - the bit number of the required Set-pending bit in this register is m % 32.
|
||||
*/
|
||||
spi_reg_offset = vector / 32 * 4;
|
||||
spi_set_pending_bitpos = vector % 32;
|
||||
|
||||
/* write to GICD_ISPENDR */
|
||||
writel_relaxed(
|
||||
1 << spi_set_pending_bitpos,
|
||||
(void *)(dist_base + GICD_ISPENDR + spi_reg_offset)
|
||||
);
|
||||
}
|
||||
|
||||
static void arm64_raise_lpi_gicv3(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
// @todo.impl
|
||||
ekprintf("%s called.\n", __func__);
|
||||
}
|
||||
|
||||
void arm64_issue_host_ipi_gicv3(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
arm64_raise_sgi_to_host_gicv3(cpuid, vector);
|
||||
}
|
||||
|
||||
void arm64_issue_ipi_gicv3(uint32_t cpuid, uint32_t vector)
|
||||
{
|
||||
dkprintf("Send irq#%d to cpuid=%d\n", vector, cpuid);
|
||||
|
||||
barrier();
|
||||
if(vector < 16){
|
||||
// send SGI
|
||||
arm64_raise_sgi_gicv3(cpuid, vector);
|
||||
} else if (32 <= vector && vector < 1020) {
|
||||
// send SPI (allow only to host)
|
||||
arm64_raise_spi_gicv3(cpuid, vector);
|
||||
} else if (8192 <= vector) {
|
||||
// send LPI (allow only to host)
|
||||
arm64_raise_lpi_gicv3(cpuid, vector);
|
||||
} else {
|
||||
ekprintf("#%d is bad irq number.\n", vector);
|
||||
}
|
||||
}
|
||||
|
||||
extern int interrupt_from_user(void *);
|
||||
void handle_interrupt_gicv3(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t irqnr;
|
||||
const int from_user = interrupt_from_user(regs);
|
||||
struct cpu_local_var *v = get_this_cpu_local_var();
|
||||
//unsigned long irqflags;
|
||||
int do_check = 0;
|
||||
|
||||
irqnr = gic_read_iar();
|
||||
cpu_enable_nmi();
|
||||
set_cputime(from_user ? CPUTIME_MODE_U2K : CPUTIME_MODE_K2K_IN);
|
||||
while (irqnr != ICC_IAR1_EL1_SPURIOUS) {
|
||||
if ((irqnr < 1020) || (irqnr >= 8192)) {
|
||||
gic_write_eoir(irqnr);
|
||||
handle_IPI(irqnr, regs);
|
||||
}
|
||||
irqnr = gic_read_iar();
|
||||
}
|
||||
set_cputime(from_user ? CPUTIME_MODE_K2U : CPUTIME_MODE_K2K_OUT);
|
||||
|
||||
//irqflags = ihk_mc_spinlock_lock(&v->runq_lock);
|
||||
/* For migration by IPI or by timesharing */
|
||||
if (v->flags &
|
||||
(CPU_FLAG_NEED_MIGRATE | CPU_FLAG_NEED_RESCHED)) {
|
||||
v->flags &= ~CPU_FLAG_NEED_RESCHED;
|
||||
do_check = 1;
|
||||
}
|
||||
//ihk_mc_spinlock_unlock(&v->runq_lock, irqflags);
|
||||
|
||||
if (do_check) {
|
||||
check_signal(0, regs, 0);
|
||||
schedule();
|
||||
}
|
||||
}
|
||||
|
||||
static uint64_t gic_mpidr_to_affinity(unsigned long mpidr)
|
||||
{
|
||||
uint64_t aff;
|
||||
|
||||
aff = ((uint64_t)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
|
||||
MPIDR_AFFINITY_LEVEL(mpidr, 0));
|
||||
return aff;
|
||||
}
|
||||
|
||||
static void init_spi_routing(uint32_t irq, uint32_t linux_cpu)
|
||||
{
|
||||
uint64_t spi_route_reg_val, spi_route_reg_offset;
|
||||
|
||||
if (irq < 32 || 1020 <= irq) {
|
||||
ekprintf("%s: irq is not spi number. (irq=%d)\n",
|
||||
__func__, irq);
|
||||
return;
|
||||
}
|
||||
|
||||
/* write to GICD_IROUTER */
|
||||
spi_route_reg_offset = irq * 8;
|
||||
spi_route_reg_val = gic_mpidr_to_affinity(cpu_logical_map(linux_cpu));
|
||||
|
||||
writeq_relaxed(spi_route_reg_val,
|
||||
(void *)(dist_base + GICD_IROUTER +
|
||||
spi_route_reg_offset));
|
||||
}
|
||||
|
||||
void gic_dist_init_gicv3(unsigned long dist_base_pa, unsigned long size)
|
||||
{
|
||||
#ifndef IHK_IKC_USE_LINUX_WORK_IRQ
|
||||
extern int spi_table[];
|
||||
extern int nr_spi_table;
|
||||
int i;
|
||||
#endif // !IHK_IKC_USE_LINUX_WORK_IRQ
|
||||
|
||||
dist_base = map_fixed_area(dist_base_pa, size, 1 /*non chachable*/);
|
||||
|
||||
#ifdef USE_CAVIUM_THUNDER_X
|
||||
/* Cavium ThunderX erratum 23154 */
|
||||
if (MIDR_IMPLEMENTOR(read_cpuid_id()) == ARM_CPU_IMP_CAVIUM) {
|
||||
is_cavium_thunderx = 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef IHK_IKC_USE_LINUX_WORK_IRQ
|
||||
/* initialize spi routing */
|
||||
for (i = 0; i < nr_spi_table; i++) {
|
||||
if (spi_table[i] == -1) {
|
||||
continue;
|
||||
}
|
||||
init_spi_routing(spi_table[i], i);
|
||||
}
|
||||
#endif // !IHK_IKC_USE_LINUX_WORK_IRQ
|
||||
}
|
||||
|
||||
void gic_cpu_init_gicv3(unsigned long cpu_base_pa, unsigned long size)
|
||||
{
|
||||
int32_t cpuid, hw_cpuid;
|
||||
struct ihk_mc_cpu_info *cpu_info = ihk_mc_get_cpu_info();
|
||||
|
||||
for(cpuid = 0; cpuid < cpu_info->ncpus; cpuid++) {
|
||||
hw_cpuid = cpu_info->hw_ids[cpuid];
|
||||
if(ihk_param_gic_rdist_base_pa[hw_cpuid] != 0) {
|
||||
rdist_base[hw_cpuid] =
|
||||
map_fixed_area(ihk_param_gic_rdist_base_pa[hw_cpuid], size, 1 /*non chachable*/);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gic_do_wait_for_rwp(void *base)
|
||||
{
|
||||
uint32_t count = 1000000; /* 1s! */
|
||||
|
||||
while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
|
||||
count--;
|
||||
if (!count) {
|
||||
ekprintf("RWP timeout, gone fishing\n");
|
||||
return;
|
||||
}
|
||||
cpu_pause();
|
||||
};
|
||||
}
|
||||
|
||||
void gic_enable_gicv3(void)
|
||||
{
|
||||
void *rbase = rdist_base[ihk_mc_get_hardware_processor_id()];
|
||||
void *rd_sgi_base = rbase + 0x10000 /* SZ_64K */;
|
||||
int i;
|
||||
unsigned int enable_ppi_sgi = GICD_INT_EN_SET_SGI;
|
||||
extern int ihk_param_nr_pmu_irq_affi;
|
||||
extern int ihk_param_pmu_irq_affi[CONFIG_SMP_MAX_CORES];
|
||||
|
||||
enable_ppi_sgi |= GICD_ENABLE << get_timer_intrid();
|
||||
|
||||
if (0 < ihk_param_nr_pmu_irq_affi) {
|
||||
for (i = 0; i < ihk_param_nr_pmu_irq_affi; i++) {
|
||||
if ((0 <= ihk_param_pmu_irq_affi[i]) &&
|
||||
(ihk_param_pmu_irq_affi[i] <
|
||||
sizeof(enable_ppi_sgi) * BITS_PER_BYTE)) {
|
||||
enable_ppi_sgi |= GICD_ENABLE <<
|
||||
ihk_param_pmu_irq_affi[i];
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
enable_ppi_sgi |= GICD_ENABLE << INTRID_PERF_OVF;
|
||||
}
|
||||
|
||||
/*
|
||||
* Deal with the banked PPI and SGI interrupts - disable all
|
||||
* PPI interrupts, ensure all SGI interrupts are enabled.
|
||||
*/
|
||||
writel_relaxed(~enable_ppi_sgi, rd_sgi_base + GIC_DIST_ENABLE_CLEAR);
|
||||
writel_relaxed(enable_ppi_sgi, rd_sgi_base + GIC_DIST_ENABLE_SET);
|
||||
|
||||
/*
|
||||
* Set priority on PPI and SGI interrupts
|
||||
*/
|
||||
for (i = 0; i < 32; i += 4) {
|
||||
writel_relaxed(GICD_INT_DEF_PRI_X4,
|
||||
rd_sgi_base + GIC_DIST_PRI + i);
|
||||
}
|
||||
|
||||
/* sync wait */
|
||||
gic_do_wait_for_rwp(rbase);
|
||||
|
||||
/*
|
||||
* Need to check that the SRE bit has actually been set. If
|
||||
* not, it means that SRE is disabled at EL2. We're going to
|
||||
* die painfully, and there is nothing we can do about it.
|
||||
*
|
||||
* Kindly inform the luser.
|
||||
*/
|
||||
if (!gic_enable_sre())
|
||||
panic("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
|
||||
|
||||
#ifndef CONFIG_HAS_NMI
|
||||
/* Set priority mask register */
|
||||
gic_write_pmr(DEFAULT_PMR_VALUE);
|
||||
#endif
|
||||
|
||||
/* EOI deactivates interrupt too (mode 0) */
|
||||
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
|
||||
|
||||
/* ... and let's hit the road... */
|
||||
gic_write_grpen1(1);
|
||||
|
||||
#ifdef CONFIG_HAS_NMI
|
||||
/*
|
||||
* Some firmwares hand over to the kernel with the BPR changed from
|
||||
* its reset value (and with a value large enough to prevent
|
||||
* any pre-emptive interrupts from working at all). Writing a zero
|
||||
* to BPR restores is reset value.
|
||||
*/
|
||||
gic_write_bpr1(0);
|
||||
|
||||
/* Set specific IPI to NMI */
|
||||
writeb_relaxed(GICD_INT_NMI_PRI,
|
||||
rd_sgi_base + GIC_DIST_PRI + INTRID_CPU_STOP);
|
||||
writeb_relaxed(GICD_INT_NMI_PRI,
|
||||
rd_sgi_base + GIC_DIST_PRI + INTRID_MULTI_NMI);
|
||||
writeb_relaxed(GICD_INT_NMI_PRI,
|
||||
rd_sgi_base + GIC_DIST_PRI + INTRID_STACK_TRACE);
|
||||
|
||||
/* sync wait */
|
||||
gic_do_wait_for_rwp(rbase);
|
||||
#endif /* CONFIG_HAS_NMI */
|
||||
}
|
||||
95
arch/arm64/kernel/local.c
Normal file
95
arch/arm64/kernel/local.c
Normal file
@ -0,0 +1,95 @@
|
||||
/* local.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#include <cpulocal.h>
|
||||
#include <ihk/atomic.h>
|
||||
#include <ihk/mm.h>
|
||||
#include <ihk/cpu.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <registers.h>
|
||||
#include <string.h>
|
||||
|
||||
/* BSP initialized stack area */
|
||||
union arm64_cpu_local_variables init_thread_info __attribute__((aligned(KERNEL_STACK_SIZE)));
|
||||
|
||||
/* BSP/AP idle stack pointer head */
|
||||
static union arm64_cpu_local_variables *locals;
|
||||
size_t arm64_cpu_local_variables_span = KERNEL_STACK_SIZE; /* for debugger */
|
||||
|
||||
/* allocate & initialize BSP/AP idle stack */
|
||||
void init_processors_local(int max_id)
|
||||
{
|
||||
int i = 0;
|
||||
union arm64_cpu_local_variables *tmp;
|
||||
const int npages = ((max_id + 1) *
|
||||
(ALIGN_UP(KERNEL_STACK_SIZE, PAGE_SIZE) >>
|
||||
PAGE_SHIFT));
|
||||
|
||||
if (npages < 1) {
|
||||
panic("idle kernel stack allocation failed.");
|
||||
}
|
||||
|
||||
/* allocate one more for alignment */
|
||||
locals = ihk_mc_alloc_pages(npages, IHK_MC_AP_CRITICAL);
|
||||
if (locals == NULL) {
|
||||
panic("idle kernel stack allocation failed.");
|
||||
}
|
||||
locals = (union arm64_cpu_local_variables *)ALIGN_UP((unsigned long)locals, KERNEL_STACK_SIZE);
|
||||
|
||||
/* clear struct process, struct process_vm, struct thread_info area */
|
||||
for (i = 0, tmp = locals; i < max_id; i++, tmp++) {
|
||||
memset(tmp, 0, sizeof(struct thread_info));
|
||||
}
|
||||
kprintf("locals = %p\n", locals);
|
||||
}
|
||||
|
||||
/* get id (logical processor id) local variable address */
|
||||
union arm64_cpu_local_variables *get_arm64_cpu_local_variable(int id)
|
||||
{
|
||||
return locals + id;
|
||||
}
|
||||
|
||||
/* get id (logical processor id) kernel stack address */
|
||||
static void *get_arm64_cpu_local_kstack(int id)
|
||||
{
|
||||
return (char *)get_arm64_cpu_local_variable(id) + THREAD_START_SP;
|
||||
}
|
||||
|
||||
/* get current cpu local variable address */
|
||||
union arm64_cpu_local_variables *get_arm64_this_cpu_local(void)
|
||||
{
|
||||
int id = ihk_mc_get_processor_id();
|
||||
return get_arm64_cpu_local_variable(id);
|
||||
}
|
||||
|
||||
/* get current kernel stack address */
|
||||
void *get_arm64_this_cpu_kstack(void)
|
||||
{
|
||||
int id = ihk_mc_get_processor_id();
|
||||
return get_arm64_cpu_local_kstack(id);
|
||||
}
|
||||
|
||||
/* assign logical processor id for current_thread_info.cpu */
|
||||
/* logical processor id BSP:0, AP0:1, AP1:2, ... APn:n-1 */
|
||||
static ihk_atomic_t last_processor_id = IHK_ATOMIC_INIT(-1);
|
||||
void assign_processor_id(void)
|
||||
{
|
||||
int id;
|
||||
union arm64_cpu_local_variables *v;
|
||||
|
||||
id = ihk_atomic_inc_return(&last_processor_id);
|
||||
|
||||
v = get_arm64_cpu_local_variable(id);
|
||||
v->arm64_cpu_local_thread.thread_info.cpu = id;
|
||||
}
|
||||
|
||||
/** IHK **/
|
||||
/* get current logical processor id */
|
||||
int ihk_mc_get_processor_id(void)
|
||||
{
|
||||
return current_thread_info()->cpu;
|
||||
}
|
||||
|
||||
/* get current physical processor id (not equal AFFINITY !!) */
|
||||
int ihk_mc_get_hardware_processor_id(void)
|
||||
{
|
||||
return ihk_mc_get_cpu_info()->hw_ids[ihk_mc_get_processor_id()];
|
||||
}
|
||||
78
arch/arm64/kernel/memcpy.S
Normal file
78
arch/arm64/kernel/memcpy.S
Normal file
@ -0,0 +1,78 @@
|
||||
/* memcpy.S COPYRIGHT FUJITSU LIMITED 2017 */
|
||||
/*
|
||||
* Copyright (C) 2013 ARM Ltd.
|
||||
* Copyright (C) 2013 Linaro.
|
||||
*
|
||||
* This code is based on glibc cortex strings work originally authored by Linaro
|
||||
* and re-licensed under GPLv2 for the Linux kernel. The original code can
|
||||
* be found @
|
||||
*
|
||||
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
|
||||
* files/head:/src/aarch64/
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linkage.h>
|
||||
#include <assembler.h>
|
||||
#include <cache.h>
|
||||
|
||||
/*
|
||||
* Copy a buffer from src to dest (alignment handled by the hardware)
|
||||
*
|
||||
* Parameters:
|
||||
* x0 - dest
|
||||
* x1 - src
|
||||
* x2 - n
|
||||
* Returns:
|
||||
* x0 - dest
|
||||
*/
|
||||
.macro ldrb1 ptr, regB, val
|
||||
ldrb \ptr, [\regB], \val
|
||||
.endm
|
||||
|
||||
.macro strb1 ptr, regB, val
|
||||
strb \ptr, [\regB], \val
|
||||
.endm
|
||||
|
||||
.macro ldrh1 ptr, regB, val
|
||||
ldrh \ptr, [\regB], \val
|
||||
.endm
|
||||
|
||||
.macro strh1 ptr, regB, val
|
||||
strh \ptr, [\regB], \val
|
||||
.endm
|
||||
|
||||
.macro ldr1 ptr, regB, val
|
||||
ldr \ptr, [\regB], \val
|
||||
.endm
|
||||
|
||||
.macro str1 ptr, regB, val
|
||||
str \ptr, [\regB], \val
|
||||
.endm
|
||||
|
||||
.macro ldp1 ptr, regB, regC, val
|
||||
ldp \ptr, \regB, [\regC], \val
|
||||
.endm
|
||||
|
||||
.macro stp1 ptr, regB, regC, val
|
||||
stp \ptr, \regB, [\regC], \val
|
||||
.endm
|
||||
|
||||
.weak memcpy
|
||||
ENTRY(____inline_memcpy)
|
||||
ENTRY(__inline_memcpy)
|
||||
#include "copy_template.S"
|
||||
ret
|
||||
ENDPIPROC(__inline_memcpy)
|
||||
ENDPROC(____inline_memcpy)
|
||||
3810
arch/arm64/kernel/memory.c
Normal file
3810
arch/arm64/kernel/memory.c
Normal file
File diff suppressed because it is too large
Load Diff
220
arch/arm64/kernel/memset.S
Normal file
220
arch/arm64/kernel/memset.S
Normal file
@ -0,0 +1,220 @@
|
||||
/* memset.S COPYRIGHT FUJITSU LIMITED 2017 */
|
||||
/*
|
||||
* Copyright (C) 2013 ARM Ltd.
|
||||
* Copyright (C) 2013 Linaro.
|
||||
*
|
||||
* This code is based on glibc cortex strings work originally authored by Linaro
|
||||
* and re-licensed under GPLv2 for the Linux kernel. The original code can
|
||||
* be found @
|
||||
*
|
||||
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
|
||||
* files/head:/src/aarch64/
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linkage.h>
|
||||
#include <assembler.h>
|
||||
#include <cache.h>
|
||||
|
||||
/*
|
||||
* Fill in the buffer with character c (alignment handled by the hardware)
|
||||
*
|
||||
* Parameters:
|
||||
* x0 - buf
|
||||
* x1 - c
|
||||
* x2 - n
|
||||
* Returns:
|
||||
* x0 - buf
|
||||
*/
|
||||
|
||||
dstin .req x0
|
||||
val .req w1
|
||||
count .req x2
|
||||
tmp1 .req x3
|
||||
tmp1w .req w3
|
||||
tmp2 .req x4
|
||||
tmp2w .req w4
|
||||
zva_len_x .req x5
|
||||
zva_len .req w5
|
||||
zva_bits_x .req x6
|
||||
|
||||
A_l .req x7
|
||||
A_lw .req w7
|
||||
dst .req x8
|
||||
tmp3w .req w9
|
||||
tmp3 .req x9
|
||||
|
||||
.weak memset
|
||||
ENTRY(____inline_memset)
|
||||
ENTRY(__inline_memset)
|
||||
mov dst, dstin /* Preserve return value. */
|
||||
and A_lw, val, #255
|
||||
orr A_lw, A_lw, A_lw, lsl #8
|
||||
orr A_lw, A_lw, A_lw, lsl #16
|
||||
orr A_l, A_l, A_l, lsl #32
|
||||
|
||||
cmp count, #15
|
||||
b.hi .Lover16_proc
|
||||
/*All store maybe are non-aligned..*/
|
||||
tbz count, #3, 1f
|
||||
str A_l, [dst], #8
|
||||
1:
|
||||
tbz count, #2, 2f
|
||||
str A_lw, [dst], #4
|
||||
2:
|
||||
tbz count, #1, 3f
|
||||
strh A_lw, [dst], #2
|
||||
3:
|
||||
tbz count, #0, 4f
|
||||
strb A_lw, [dst]
|
||||
4:
|
||||
ret
|
||||
|
||||
.Lover16_proc:
|
||||
/*Whether the start address is aligned with 16.*/
|
||||
neg tmp2, dst
|
||||
ands tmp2, tmp2, #15
|
||||
b.eq .Laligned
|
||||
/*
|
||||
* The count is not less than 16, we can use stp to store the start 16 bytes,
|
||||
* then adjust the dst aligned with 16.This process will make the current
|
||||
* memory address at alignment boundary.
|
||||
*/
|
||||
stp A_l, A_l, [dst] /*non-aligned store..*/
|
||||
/*make the dst aligned..*/
|
||||
sub count, count, tmp2
|
||||
add dst, dst, tmp2
|
||||
|
||||
.Laligned:
|
||||
cbz A_l, .Lzero_mem
|
||||
|
||||
.Ltail_maybe_long:
|
||||
cmp count, #64
|
||||
b.ge .Lnot_short
|
||||
.Ltail63:
|
||||
ands tmp1, count, #0x30
|
||||
b.eq 3f
|
||||
cmp tmp1w, #0x20
|
||||
b.eq 1f
|
||||
b.lt 2f
|
||||
stp A_l, A_l, [dst], #16
|
||||
1:
|
||||
stp A_l, A_l, [dst], #16
|
||||
2:
|
||||
stp A_l, A_l, [dst], #16
|
||||
/*
|
||||
* The last store length is less than 16,use stp to write last 16 bytes.
|
||||
* It will lead some bytes written twice and the access is non-aligned.
|
||||
*/
|
||||
3:
|
||||
ands count, count, #15
|
||||
cbz count, 4f
|
||||
add dst, dst, count
|
||||
stp A_l, A_l, [dst, #-16] /* Repeat some/all of last store. */
|
||||
4:
|
||||
ret
|
||||
|
||||
/*
|
||||
* Critical loop. Start at a new cache line boundary. Assuming
|
||||
* 64 bytes per line, this ensures the entire loop is in one line.
|
||||
*/
|
||||
.p2align L1_CACHE_SHIFT
|
||||
.Lnot_short:
|
||||
sub dst, dst, #16/* Pre-bias. */
|
||||
sub count, count, #64
|
||||
1:
|
||||
stp A_l, A_l, [dst, #16]
|
||||
stp A_l, A_l, [dst, #32]
|
||||
stp A_l, A_l, [dst, #48]
|
||||
stp A_l, A_l, [dst, #64]!
|
||||
subs count, count, #64
|
||||
b.ge 1b
|
||||
tst count, #0x3f
|
||||
add dst, dst, #16
|
||||
b.ne .Ltail63
|
||||
.Lexitfunc:
|
||||
ret
|
||||
|
||||
/*
|
||||
* For zeroing memory, check to see if we can use the ZVA feature to
|
||||
* zero entire 'cache' lines.
|
||||
*/
|
||||
.Lzero_mem:
|
||||
cmp count, #63
|
||||
b.le .Ltail63
|
||||
/*
|
||||
* For zeroing small amounts of memory, it's not worth setting up
|
||||
* the line-clear code.
|
||||
*/
|
||||
cmp count, #128
|
||||
b.lt .Lnot_short /*count is at least 128 bytes*/
|
||||
|
||||
mrs tmp1, dczid_el0
|
||||
tbnz tmp1, #4, .Lnot_short
|
||||
mov tmp3w, #4
|
||||
and zva_len, tmp1w, #15 /* Safety: other bits reserved. */
|
||||
lsl zva_len, tmp3w, zva_len
|
||||
|
||||
ands tmp3w, zva_len, #63
|
||||
/*
|
||||
* ensure the zva_len is not less than 64.
|
||||
* It is not meaningful to use ZVA if the block size is less than 64.
|
||||
*/
|
||||
b.ne .Lnot_short
|
||||
.Lzero_by_line:
|
||||
/*
|
||||
* Compute how far we need to go to become suitably aligned. We're
|
||||
* already at quad-word alignment.
|
||||
*/
|
||||
cmp count, zva_len_x
|
||||
b.lt .Lnot_short /* Not enough to reach alignment. */
|
||||
sub zva_bits_x, zva_len_x, #1
|
||||
neg tmp2, dst
|
||||
ands tmp2, tmp2, zva_bits_x
|
||||
b.eq 2f /* Already aligned. */
|
||||
/* Not aligned, check that there's enough to copy after alignment.*/
|
||||
sub tmp1, count, tmp2
|
||||
/*
|
||||
* grantee the remain length to be ZVA is bigger than 64,
|
||||
* avoid to make the 2f's process over mem range.*/
|
||||
cmp tmp1, #64
|
||||
ccmp tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
|
||||
b.lt .Lnot_short
|
||||
/*
|
||||
* We know that there's at least 64 bytes to zero and that it's safe
|
||||
* to overrun by 64 bytes.
|
||||
*/
|
||||
mov count, tmp1
|
||||
1:
|
||||
stp A_l, A_l, [dst]
|
||||
stp A_l, A_l, [dst, #16]
|
||||
stp A_l, A_l, [dst, #32]
|
||||
subs tmp2, tmp2, #64
|
||||
stp A_l, A_l, [dst, #48]
|
||||
add dst, dst, #64
|
||||
b.ge 1b
|
||||
/* We've overrun a bit, so adjust dst downwards.*/
|
||||
add dst, dst, tmp2
|
||||
2:
|
||||
sub count, count, zva_len_x
|
||||
3:
|
||||
dc zva, dst
|
||||
add dst, dst, zva_len_x
|
||||
subs count, count, zva_len_x
|
||||
b.ge 3b
|
||||
ands count, count, zva_bits_x
|
||||
b.ne .Ltail_maybe_long
|
||||
ret
|
||||
ENDPIPROC(__inline_memset)
|
||||
ENDPROC(____inline_memset)
|
||||
44
arch/arm64/kernel/mikc.c
Normal file
44
arch/arm64/kernel/mikc.c
Normal file
@ -0,0 +1,44 @@
|
||||
/* mikc.c COPYRIGHT FUJITSU LIMITED 2015-2016 */
|
||||
#include <ihk/ikc.h>
|
||||
#include <ihk/lock.h>
|
||||
#include <ikc/msg.h>
|
||||
#include <memory.h>
|
||||
#include <string.h>
|
||||
|
||||
extern int num_processors;
|
||||
extern void arch_set_mikc_queue(void *r, void *w);
|
||||
ihk_ikc_ph_t arch_master_channel_packet_handler;
|
||||
|
||||
int ihk_mc_ikc_init_first_local(struct ihk_ikc_channel_desc *channel,
|
||||
ihk_ikc_ph_t packet_handler)
|
||||
{
|
||||
struct ihk_ikc_queue_head *rq, *wq;
|
||||
size_t mikc_queue_pages;
|
||||
|
||||
ihk_ikc_system_init(NULL);
|
||||
|
||||
memset(channel, 0, sizeof(struct ihk_ikc_channel_desc));
|
||||
|
||||
mikc_queue_pages = ((8 * num_processors * MASTER_IKCQ_PKTSIZE)
|
||||
+ (PAGE_SIZE - 1)) / PAGE_SIZE;
|
||||
|
||||
/* Place both sides in this side */
|
||||
rq = ihk_mc_alloc_pages(mikc_queue_pages, IHK_MC_AP_CRITICAL);
|
||||
wq = ihk_mc_alloc_pages(mikc_queue_pages, IHK_MC_AP_CRITICAL);
|
||||
|
||||
ihk_ikc_init_queue(rq, 0, 0,
|
||||
mikc_queue_pages * PAGE_SIZE, MASTER_IKCQ_PKTSIZE);
|
||||
ihk_ikc_init_queue(wq, 0, 0,
|
||||
mikc_queue_pages * PAGE_SIZE, MASTER_IKCQ_PKTSIZE);
|
||||
|
||||
arch_master_channel_packet_handler = packet_handler;
|
||||
|
||||
ihk_ikc_init_desc(channel, IKC_OS_HOST, 0, rq, wq,
|
||||
ihk_ikc_master_channel_packet_handler, channel);
|
||||
ihk_ikc_enable_channel(channel);
|
||||
|
||||
/* Set boot parameter */
|
||||
arch_set_mikc_queue(rq, wq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
297
arch/arm64/kernel/perfctr.c
Normal file
297
arch/arm64/kernel/perfctr.c
Normal file
@ -0,0 +1,297 @@
|
||||
/* perfctr.c COPYRIGHT FUJITSU LIMITED 2015-2018 */
|
||||
#include <arch-perfctr.h>
|
||||
#include <ihk/perfctr.h>
|
||||
#include <mc_perf_event.h>
|
||||
#include <errno.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <registers.h>
|
||||
#include <string.h>
|
||||
#include <ihk/mm.h>
|
||||
#include <irq.h>
|
||||
#include <process.h>
|
||||
|
||||
/*
|
||||
* @ref.impl arch/arm64/kernel/perf_event.c
|
||||
* Set at runtime when we know what CPU type we are.
|
||||
*/
|
||||
struct arm_pmu cpu_pmu;
|
||||
extern int ihk_param_pmu_irq_affi[CONFIG_SMP_MAX_CORES];
|
||||
extern int ihk_param_nr_pmu_irq_affi;
|
||||
|
||||
int arm64_init_perfctr(void)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
int pages;
|
||||
const struct ihk_mc_cpu_info *cpu_info;
|
||||
|
||||
memset(&cpu_pmu, 0, sizeof(cpu_pmu));
|
||||
ret = armv8pmu_init(&cpu_pmu);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
cpu_info = ihk_mc_get_cpu_info();
|
||||
pages = (sizeof(struct per_cpu_arm_pmu) * cpu_info->ncpus +
|
||||
PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
cpu_pmu.per_cpu = ihk_mc_alloc_pages(pages, IHK_MC_AP_NOWAIT);
|
||||
if (cpu_pmu.per_cpu == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(cpu_pmu.per_cpu, 0, pages * PAGE_SIZE);
|
||||
|
||||
if (0 < ihk_param_nr_pmu_irq_affi) {
|
||||
for (i = 0; i < ihk_param_nr_pmu_irq_affi; i++) {
|
||||
ret = ihk_mc_register_interrupt_handler(ihk_param_pmu_irq_affi[i],
|
||||
cpu_pmu.handler);
|
||||
if (ret) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
ret = ihk_mc_register_interrupt_handler(INTRID_PERF_OVF,
|
||||
cpu_pmu.handler);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
void arm64_init_per_cpu_perfctr(void)
|
||||
{
|
||||
armv8pmu_per_cpu_init(&cpu_pmu.per_cpu[ihk_mc_get_processor_id()]);
|
||||
}
|
||||
|
||||
int arm64_enable_pmu(void)
|
||||
{
|
||||
int ret;
|
||||
if (cpu_pmu.reset) {
|
||||
cpu_pmu.reset(&cpu_pmu);
|
||||
}
|
||||
ret = cpu_pmu.enable_pmu();
|
||||
return ret;
|
||||
}
|
||||
|
||||
void arm64_disable_pmu(void)
|
||||
{
|
||||
cpu_pmu.disable_pmu();
|
||||
}
|
||||
|
||||
void arm64_enable_user_access_pmu_regs(void)
|
||||
{
|
||||
cpu_pmu.enable_user_access_pmu_regs();
|
||||
}
|
||||
|
||||
void arm64_disable_user_access_pmu_regs(void)
|
||||
{
|
||||
cpu_pmu.disable_user_access_pmu_regs();
|
||||
}
|
||||
|
||||
static int __ihk_mc_perfctr_init(int counter, uint32_t type, uint64_t config, int mode)
|
||||
{
|
||||
int ret = -1;
|
||||
unsigned long config_base = 0;
|
||||
|
||||
ret = cpu_pmu.disable_counter(1UL << counter);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cpu_pmu.enable_intens(1UL << counter);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = cpu_pmu.set_event_filter(&config_base, mode);
|
||||
if (ret) {
|
||||
return ret;
|
||||
}
|
||||
config_base |= config;
|
||||
cpu_pmu.write_evtype(counter, config_base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_init_raw(int counter, uint64_t config, int mode)
|
||||
{
|
||||
int ret;
|
||||
ret = __ihk_mc_perfctr_init(counter, PERF_TYPE_RAW, config, mode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_start(unsigned long counter_mask)
|
||||
{
|
||||
return cpu_pmu.enable_counter(counter_mask);
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_stop(unsigned long counter_mask, int flags)
|
||||
{
|
||||
return cpu_pmu.disable_counter(counter_mask);
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_reset(int counter)
|
||||
{
|
||||
cpu_pmu.write_counter(counter, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_set(int counter, long val)
|
||||
{
|
||||
uint32_t v = val;
|
||||
cpu_pmu.write_counter(counter, v);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_read_mask(unsigned long counter_mask, unsigned long *value)
|
||||
{
|
||||
/* this function not used yet. */
|
||||
panic("not implemented.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_alloc(struct thread *thread, struct mc_perf_event *event)
|
||||
{
|
||||
const int counters = ihk_mc_perf_get_num_counters();
|
||||
|
||||
return cpu_pmu.get_event_idx(counters,
|
||||
thread->pmc_alloc_map,
|
||||
event->hw_config);
|
||||
}
|
||||
|
||||
unsigned long ihk_mc_perfctr_read(int counter)
|
||||
{
|
||||
unsigned long count;
|
||||
count = cpu_pmu.read_counter(counter);
|
||||
return count;
|
||||
}
|
||||
|
||||
unsigned long ihk_mc_perfctr_value(int counter, unsigned long correction)
|
||||
{
|
||||
unsigned long count = ihk_mc_perfctr_read(counter) + correction;
|
||||
|
||||
count &= ((1UL << 32) - 1);
|
||||
return count;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_alloc_counter(unsigned int *type, unsigned long *config,
|
||||
unsigned long pmc_status)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (*type == PERF_TYPE_HARDWARE) {
|
||||
switch (*config) {
|
||||
case PERF_COUNT_HW_INSTRUCTIONS:
|
||||
ret = cpu_pmu.map_event(*type, *config);
|
||||
if (ret < 0) {
|
||||
return -1;
|
||||
}
|
||||
*type = PERF_TYPE_RAW;
|
||||
break;
|
||||
default:
|
||||
// Unexpected config
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
else if (*type != PERF_TYPE_RAW) {
|
||||
return -1;
|
||||
}
|
||||
ret = cpu_pmu.get_event_idx(get_per_cpu_pmu()->num_events, pmc_status,
|
||||
*config);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ihk_mc_perf_counter_mask_check(unsigned long counter_mask)
|
||||
{
|
||||
return cpu_pmu.counter_mask_valid(counter_mask);
|
||||
}
|
||||
|
||||
int ihk_mc_perf_get_num_counters(void)
|
||||
{
|
||||
const struct per_cpu_arm_pmu *per_cpu_arm_pmu = get_per_cpu_pmu();
|
||||
|
||||
return per_cpu_arm_pmu->num_events;
|
||||
}
|
||||
|
||||
int ihk_mc_perfctr_set_extra(struct mc_perf_event *event)
|
||||
{
|
||||
/* Nothing to do. */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline uint64_t arm_pmu_event_max_period(struct mc_perf_event *event)
|
||||
{
|
||||
return 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
int hw_perf_event_init(struct mc_perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
|
||||
if (!is_sampling_event(event)) {
|
||||
hwc->sample_period = arm_pmu_event_max_period(event) >> 1;
|
||||
hwc->last_period = hwc->sample_period;
|
||||
ihk_atomic64_set(&hwc->period_left, hwc->sample_period);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ihk_mc_event_set_period(struct mc_perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int64_t left = ihk_atomic64_read(&hwc->period_left);
|
||||
int64_t period = hwc->sample_period;
|
||||
uint64_t max_period;
|
||||
int ret = 0;
|
||||
|
||||
max_period = arm_pmu_event_max_period(event);
|
||||
if (unlikely(left <= -period)) {
|
||||
left = period;
|
||||
ihk_atomic64_set(&hwc->period_left, left);
|
||||
hwc->last_period = period;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
if (unlikely(left <= 0)) {
|
||||
left += period;
|
||||
ihk_atomic64_set(&hwc->period_left, left);
|
||||
hwc->last_period = period;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit the maximum period to prevent the counter value
|
||||
* from overtaking the one we are about to program. In
|
||||
* effect we are reducing max_period to account for
|
||||
* interrupt latency (and we are being very conservative).
|
||||
*/
|
||||
if (left > (max_period >> 1))
|
||||
left = (max_period >> 1);
|
||||
|
||||
ihk_atomic64_set(&hwc->prev_count, (uint64_t)-left);
|
||||
|
||||
cpu_pmu.write_counter(event->counter_id,
|
||||
(uint64_t)(-left) & max_period);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t ihk_mc_event_update(struct mc_perf_event *event)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int64_t delta;
|
||||
uint64_t prev_raw_count, new_raw_count;
|
||||
uint64_t max_period = arm_pmu_event_max_period(event);
|
||||
|
||||
again:
|
||||
prev_raw_count = ihk_atomic64_read(&hwc->prev_count);
|
||||
new_raw_count = cpu_pmu.read_counter(event->counter_id);
|
||||
|
||||
if (ihk_atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
|
||||
new_raw_count) != prev_raw_count)
|
||||
goto again;
|
||||
|
||||
delta = (new_raw_count - prev_raw_count) & max_period;
|
||||
|
||||
ihk_atomic64_add(delta, &event->count);
|
||||
ihk_atomic64_add(-delta, &hwc->period_left);
|
||||
|
||||
return new_raw_count;
|
||||
}
|
||||
910
arch/arm64/kernel/perfctr_armv8pmu.c
Normal file
910
arch/arm64/kernel/perfctr_armv8pmu.c
Normal file
@ -0,0 +1,910 @@
|
||||
/* perfctr_armv8pmu.c COPYRIGHT FUJITSU LIMITED 2016-2018 */
|
||||
#include <arch-perfctr.h>
|
||||
#include <mc_perf_event.h>
|
||||
#include <ihk/perfctr.h>
|
||||
#include <errno.h>
|
||||
#include <ihk/debug.h>
|
||||
#include <sysreg.h>
|
||||
#include <virt.h>
|
||||
#include <bitops.h>
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
#include <cls.h>
|
||||
#include <process.h>
|
||||
|
||||
#define BIT(nr) (1UL << (nr))
|
||||
|
||||
//#define DEBUG_PRINT_PMU
|
||||
#ifdef DEBUG_PRINT_PMU
|
||||
#undef DDEBUG_DEFAULT
|
||||
#define DDEBUG_DEFAULT DDEBUG_PRINT
|
||||
#endif
|
||||
|
||||
/*
|
||||
* read pmevcntr<n>_el0 functions
|
||||
*/
|
||||
#define read_pmevcntrN_el0(N) \
|
||||
static uint32_t read_pmevcntr##N##_el0(void) \
|
||||
{ \
|
||||
return read_sysreg(pmevcntr##N##_el0); \
|
||||
}
|
||||
|
||||
read_pmevcntrN_el0(0)
|
||||
read_pmevcntrN_el0(1)
|
||||
read_pmevcntrN_el0(2)
|
||||
read_pmevcntrN_el0(3)
|
||||
read_pmevcntrN_el0(4)
|
||||
read_pmevcntrN_el0(5)
|
||||
read_pmevcntrN_el0(6)
|
||||
read_pmevcntrN_el0(7)
|
||||
read_pmevcntrN_el0(8)
|
||||
read_pmevcntrN_el0(9)
|
||||
read_pmevcntrN_el0(10)
|
||||
read_pmevcntrN_el0(11)
|
||||
read_pmevcntrN_el0(12)
|
||||
read_pmevcntrN_el0(13)
|
||||
read_pmevcntrN_el0(14)
|
||||
read_pmevcntrN_el0(15)
|
||||
read_pmevcntrN_el0(16)
|
||||
read_pmevcntrN_el0(17)
|
||||
read_pmevcntrN_el0(18)
|
||||
read_pmevcntrN_el0(19)
|
||||
read_pmevcntrN_el0(20)
|
||||
read_pmevcntrN_el0(21)
|
||||
read_pmevcntrN_el0(22)
|
||||
read_pmevcntrN_el0(23)
|
||||
read_pmevcntrN_el0(24)
|
||||
read_pmevcntrN_el0(25)
|
||||
read_pmevcntrN_el0(26)
|
||||
read_pmevcntrN_el0(27)
|
||||
read_pmevcntrN_el0(28)
|
||||
read_pmevcntrN_el0(29)
|
||||
read_pmevcntrN_el0(30)
|
||||
|
||||
static uint32_t (* const read_pmevcntr_el0[])(void) = {
|
||||
read_pmevcntr0_el0, read_pmevcntr1_el0, read_pmevcntr2_el0,
|
||||
read_pmevcntr3_el0, read_pmevcntr4_el0, read_pmevcntr5_el0,
|
||||
read_pmevcntr6_el0, read_pmevcntr7_el0, read_pmevcntr8_el0,
|
||||
read_pmevcntr9_el0, read_pmevcntr10_el0, read_pmevcntr11_el0,
|
||||
read_pmevcntr12_el0, read_pmevcntr13_el0, read_pmevcntr14_el0,
|
||||
read_pmevcntr15_el0, read_pmevcntr16_el0, read_pmevcntr17_el0,
|
||||
read_pmevcntr18_el0, read_pmevcntr19_el0, read_pmevcntr20_el0,
|
||||
read_pmevcntr21_el0, read_pmevcntr22_el0, read_pmevcntr23_el0,
|
||||
read_pmevcntr24_el0, read_pmevcntr25_el0, read_pmevcntr26_el0,
|
||||
read_pmevcntr27_el0, read_pmevcntr28_el0, read_pmevcntr29_el0,
|
||||
read_pmevcntr30_el0,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* write pmevcntr<n>_el0 functions
|
||||
*/
|
||||
#define write_pmevcntrN_el0(N) \
|
||||
static void write_pmevcntr##N##_el0(uint32_t v) \
|
||||
{ \
|
||||
write_sysreg(v, pmevcntr##N##_el0); \
|
||||
}
|
||||
|
||||
write_pmevcntrN_el0(0)
|
||||
write_pmevcntrN_el0(1)
|
||||
write_pmevcntrN_el0(2)
|
||||
write_pmevcntrN_el0(3)
|
||||
write_pmevcntrN_el0(4)
|
||||
write_pmevcntrN_el0(5)
|
||||
write_pmevcntrN_el0(6)
|
||||
write_pmevcntrN_el0(7)
|
||||
write_pmevcntrN_el0(8)
|
||||
write_pmevcntrN_el0(9)
|
||||
write_pmevcntrN_el0(10)
|
||||
write_pmevcntrN_el0(11)
|
||||
write_pmevcntrN_el0(12)
|
||||
write_pmevcntrN_el0(13)
|
||||
write_pmevcntrN_el0(14)
|
||||
write_pmevcntrN_el0(15)
|
||||
write_pmevcntrN_el0(16)
|
||||
write_pmevcntrN_el0(17)
|
||||
write_pmevcntrN_el0(18)
|
||||
write_pmevcntrN_el0(19)
|
||||
write_pmevcntrN_el0(20)
|
||||
write_pmevcntrN_el0(21)
|
||||
write_pmevcntrN_el0(22)
|
||||
write_pmevcntrN_el0(23)
|
||||
write_pmevcntrN_el0(24)
|
||||
write_pmevcntrN_el0(25)
|
||||
write_pmevcntrN_el0(26)
|
||||
write_pmevcntrN_el0(27)
|
||||
write_pmevcntrN_el0(28)
|
||||
write_pmevcntrN_el0(29)
|
||||
write_pmevcntrN_el0(30)
|
||||
|
||||
static void (* const write_pmevcntr_el0[])(uint32_t) = {
|
||||
write_pmevcntr0_el0, write_pmevcntr1_el0, write_pmevcntr2_el0,
|
||||
write_pmevcntr3_el0, write_pmevcntr4_el0, write_pmevcntr5_el0,
|
||||
write_pmevcntr6_el0, write_pmevcntr7_el0, write_pmevcntr8_el0,
|
||||
write_pmevcntr9_el0, write_pmevcntr10_el0, write_pmevcntr11_el0,
|
||||
write_pmevcntr12_el0, write_pmevcntr13_el0, write_pmevcntr14_el0,
|
||||
write_pmevcntr15_el0, write_pmevcntr16_el0, write_pmevcntr17_el0,
|
||||
write_pmevcntr18_el0, write_pmevcntr19_el0, write_pmevcntr20_el0,
|
||||
write_pmevcntr21_el0, write_pmevcntr22_el0, write_pmevcntr23_el0,
|
||||
write_pmevcntr24_el0, write_pmevcntr25_el0, write_pmevcntr26_el0,
|
||||
write_pmevcntr27_el0, write_pmevcntr28_el0, write_pmevcntr29_el0,
|
||||
write_pmevcntr30_el0,
|
||||
};
|
||||
|
||||
/*
|
||||
* write pmevtyper<n>_el0 functions
|
||||
*/
|
||||
#define write_pmevtyperN_el0(N) \
|
||||
static void write_pmevtyper##N##_el0(uint32_t v) \
|
||||
{ \
|
||||
write_sysreg(v, pmevtyper##N##_el0); \
|
||||
}
|
||||
|
||||
write_pmevtyperN_el0(0)
|
||||
write_pmevtyperN_el0(1)
|
||||
write_pmevtyperN_el0(2)
|
||||
write_pmevtyperN_el0(3)
|
||||
write_pmevtyperN_el0(4)
|
||||
write_pmevtyperN_el0(5)
|
||||
write_pmevtyperN_el0(6)
|
||||
write_pmevtyperN_el0(7)
|
||||
write_pmevtyperN_el0(8)
|
||||
write_pmevtyperN_el0(9)
|
||||
write_pmevtyperN_el0(10)
|
||||
write_pmevtyperN_el0(11)
|
||||
write_pmevtyperN_el0(12)
|
||||
write_pmevtyperN_el0(13)
|
||||
write_pmevtyperN_el0(14)
|
||||
write_pmevtyperN_el0(15)
|
||||
write_pmevtyperN_el0(16)
|
||||
write_pmevtyperN_el0(17)
|
||||
write_pmevtyperN_el0(18)
|
||||
write_pmevtyperN_el0(19)
|
||||
write_pmevtyperN_el0(20)
|
||||
write_pmevtyperN_el0(21)
|
||||
write_pmevtyperN_el0(22)
|
||||
write_pmevtyperN_el0(23)
|
||||
write_pmevtyperN_el0(24)
|
||||
write_pmevtyperN_el0(25)
|
||||
write_pmevtyperN_el0(26)
|
||||
write_pmevtyperN_el0(27)
|
||||
write_pmevtyperN_el0(28)
|
||||
write_pmevtyperN_el0(29)
|
||||
write_pmevtyperN_el0(30)
|
||||
|
||||
static void (* const write_pmevtyper_el0[])(uint32_t) = {
|
||||
write_pmevtyper0_el0, write_pmevtyper1_el0, write_pmevtyper2_el0,
|
||||
write_pmevtyper3_el0, write_pmevtyper4_el0, write_pmevtyper5_el0,
|
||||
write_pmevtyper6_el0, write_pmevtyper7_el0, write_pmevtyper8_el0,
|
||||
write_pmevtyper9_el0, write_pmevtyper10_el0, write_pmevtyper11_el0,
|
||||
write_pmevtyper12_el0, write_pmevtyper13_el0, write_pmevtyper14_el0,
|
||||
write_pmevtyper15_el0, write_pmevtyper16_el0, write_pmevtyper17_el0,
|
||||
write_pmevtyper18_el0, write_pmevtyper19_el0, write_pmevtyper20_el0,
|
||||
write_pmevtyper21_el0, write_pmevtyper22_el0, write_pmevtyper23_el0,
|
||||
write_pmevtyper24_el0, write_pmevtyper25_el0, write_pmevtyper26_el0,
|
||||
write_pmevtyper27_el0, write_pmevtyper28_el0, write_pmevtyper29_el0,
|
||||
write_pmevtyper30_el0,
|
||||
};
|
||||
|
||||
#define ARMV8_IDX_CYCLE_COUNTER 31
|
||||
#define ARMV8_IDX_COUNTER0 0
|
||||
|
||||
/*
|
||||
* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/perf_event.h
|
||||
* Per-CPU PMCR: config reg
|
||||
*/
|
||||
#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
|
||||
#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
|
||||
#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
|
||||
#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
|
||||
#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
|
||||
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
|
||||
#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */
|
||||
#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
|
||||
#define ARMV8_PMU_PMCR_N_MASK 0x1f
|
||||
#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */
|
||||
|
||||
/*
|
||||
* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/perf_event.h
|
||||
* PMOVSR: counters overflow flag status reg
|
||||
*/
|
||||
#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
|
||||
#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
|
||||
|
||||
/*
|
||||
* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/perf_event.h
|
||||
* PMXEVTYPER: Event selection reg
|
||||
*/
|
||||
#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */
|
||||
#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */
|
||||
|
||||
/*
|
||||
* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/perf_event.h
|
||||
* Event filters for PMUv3
|
||||
*/
|
||||
#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31)
|
||||
#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30)
|
||||
#define ARMV8_PMU_INCLUDE_EL2 (1 << 27)
|
||||
|
||||
/*
|
||||
* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/perf_event.h
|
||||
* PMUSERENR: user enable reg
|
||||
*/
|
||||
#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
|
||||
#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
|
||||
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
|
||||
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
|
||||
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
|
||||
|
||||
/*
|
||||
* @ref.impl linux-v4.15-rc3 arch/arm64/include/asm/perf_event.h
|
||||
* PMUv3 event types: required events
|
||||
*/
|
||||
#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10
|
||||
#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12
|
||||
|
||||
/*
|
||||
* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c
|
||||
* ARMv8 PMUv3 Performance Events handling code.
|
||||
* Common event types (some are defined in asm/perf_event.h).
|
||||
*/
|
||||
|
||||
/* At least one of the following is required. */
|
||||
#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08
|
||||
#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B
|
||||
|
||||
/* Common architectural events. */
|
||||
#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x06
|
||||
#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x07
|
||||
#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x09
|
||||
#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x0A
|
||||
#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x0B
|
||||
#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x0C
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x0D
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x0E
|
||||
#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x0F
|
||||
#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x1C
|
||||
#define ARMV8_PMUV3_PERFCTR_CHAIN 0x1E
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x21
|
||||
|
||||
/* Common microarchitectural events. */
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x01
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x02
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x05
|
||||
#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x13
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x14
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x15
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x16
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x17
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x18
|
||||
#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x19
|
||||
#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x1A
|
||||
#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x1D
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x1F
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x20
|
||||
#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x22
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x23
|
||||
#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x24
|
||||
#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x25
|
||||
#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x26
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x27
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x28
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x29
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x2A
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x2B
|
||||
#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x2C
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x2D
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x2E
|
||||
#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x2F
|
||||
#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x30
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 include/linux/perf/arm_pmu.h */
|
||||
#define HW_OP_UNSUPPORTED 0xFFFF
|
||||
#define C(_x) PERF_COUNT_HW_CACHE_##_x
|
||||
#define CACHE_OP_UNSUPPORTED 0xFFFF
|
||||
|
||||
#define PERF_MAP_ALL_UNSUPPORTED \
|
||||
[0 ... PERF_COUNT_HW_MAX - 1] = HW_OP_UNSUPPORTED
|
||||
|
||||
#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
|
||||
[0 ... C(MAX) - 1] = { \
|
||||
[0 ... C(OP_MAX) - 1] = { \
|
||||
[0 ... C(RESULT_MAX) - 1] = CACHE_OP_UNSUPPORTED, \
|
||||
}, \
|
||||
}
|
||||
|
||||
/* PMUv3 HW events mapping. */
|
||||
|
||||
/* disable -Woverride-init for the following initializations */
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Woverride-init"
|
||||
|
||||
/*
|
||||
* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c
|
||||
* ARMv8 Architectural defined events, not all of these may
|
||||
* be supported on any given implementation. Undefined events will
|
||||
* be disabled at run-time.
|
||||
*/
|
||||
static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
PERF_MAP_ALL_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND,
|
||||
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND,
|
||||
};
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
PERF_CACHE_MAP_ALL_UNSUPPORTED,
|
||||
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
|
||||
[C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE,
|
||||
[C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL,
|
||||
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE,
|
||||
[C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL,
|
||||
|
||||
[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL,
|
||||
[C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB,
|
||||
|
||||
[C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL,
|
||||
[C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB,
|
||||
|
||||
[C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
|
||||
[C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
|
||||
[C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED,
|
||||
[C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED,
|
||||
};
|
||||
|
||||
/* restore warnings */
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 drivers/perf/arm_pmu.c */
|
||||
static int
|
||||
armpmu_map_cache_event(const unsigned (*cache_map)
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX],
|
||||
uint64_t config)
|
||||
{
|
||||
unsigned int cache_type, cache_op, cache_result, ret;
|
||||
|
||||
cache_type = (config >> 0) & 0xff;
|
||||
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
cache_op = (config >> 8) & 0xff;
|
||||
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
cache_result = (config >> 16) & 0xff;
|
||||
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if (!cache_map)
|
||||
return -ENOENT;
|
||||
|
||||
ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
|
||||
|
||||
if (ret == CACHE_OP_UNSUPPORTED)
|
||||
return -ENOENT;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 drivers/perf/arm_pmu.c */
|
||||
static int
|
||||
armpmu_map_hw_event(const unsigned int (*event_map)[PERF_COUNT_HW_MAX],
|
||||
uint64_t config)
|
||||
{
|
||||
int mapping;
|
||||
|
||||
if (config >= PERF_COUNT_HW_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
if (!event_map)
|
||||
return -ENOENT;
|
||||
|
||||
mapping = (*event_map)[config];
|
||||
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 drivers/perf/arm_pmu.c */
|
||||
static int
|
||||
armpmu_map_raw_event(uint32_t raw_event_mask, uint64_t config)
|
||||
{
|
||||
return (int)(config & raw_event_mask);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 drivers/perf/arm_pmu.c */
|
||||
static int
|
||||
armpmu_map_event(uint32_t type, uint64_t config,
|
||||
const unsigned int (*event_map)[PERF_COUNT_HW_MAX],
|
||||
const unsigned int (*cache_map)
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX],
|
||||
uint32_t raw_event_mask)
|
||||
{
|
||||
switch (type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
return armpmu_map_hw_event(event_map, config);
|
||||
case PERF_TYPE_HW_CACHE:
|
||||
return armpmu_map_cache_event(cache_map, config);
|
||||
case PERF_TYPE_RAW:
|
||||
return armpmu_map_raw_event(raw_event_mask, config);
|
||||
}
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static inline int armv8pmu_counter_mask_valid(unsigned long counter_mask)
|
||||
{
|
||||
int num;
|
||||
unsigned long event;
|
||||
unsigned long cycle;
|
||||
unsigned long invalid_mask;
|
||||
|
||||
num = get_per_cpu_pmu()->num_events;
|
||||
num--; /* Sub the CPU cycles counter */
|
||||
event = ((1UL << num) - 1) << ARMV8_IDX_COUNTER0;
|
||||
cycle = 1UL << ARMV8_IDX_CYCLE_COUNTER;
|
||||
invalid_mask = ~(event | cycle);
|
||||
|
||||
return !(counter_mask & invalid_mask);
|
||||
}
|
||||
|
||||
static inline int armv8pmu_counter_valid(int idx)
|
||||
{
|
||||
return armv8pmu_counter_mask_valid(1UL << idx);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline uint32_t armv8pmu_getreset_flags(void)
|
||||
{
|
||||
uint32_t value;
|
||||
|
||||
/* Read */
|
||||
value = read_sysreg(pmovsclr_el0);
|
||||
|
||||
/* Write to clear flags */
|
||||
value &= ARMV8_PMU_OVSR_MASK;
|
||||
write_sysreg(value, pmovsclr_el0);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline int armv8pmu_has_overflowed(uint32_t pmovsr)
|
||||
{
|
||||
return pmovsr & ARMV8_PMU_OVERFLOWED_MASK;
|
||||
}
|
||||
|
||||
static inline int armv8pmu_counter_has_overflowed(uint32_t pmnc, int idx)
|
||||
{
|
||||
return pmnc & BIT(idx);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static int __armv8_pmuv3_map_event(uint32_t type, uint64_t config,
|
||||
const unsigned int (*extra_event_map)
|
||||
[PERF_COUNT_HW_MAX],
|
||||
const unsigned int (*extra_cache_map)
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX])
|
||||
{
|
||||
int hw_event_id;
|
||||
|
||||
hw_event_id = armpmu_map_event(type, config, &armv8_pmuv3_perf_map,
|
||||
&armv8_pmuv3_perf_cache_map,
|
||||
ARMV8_PMU_EVTYPE_EVENT);
|
||||
|
||||
/* Onl expose micro/arch events supported by this PMU */
|
||||
if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS)
|
||||
&& test_bit(hw_event_id, get_per_cpu_pmu()->pmceid_bitmap)) {
|
||||
return hw_event_id;
|
||||
}
|
||||
|
||||
return armpmu_map_event(type, config, extra_event_map, extra_cache_map,
|
||||
ARMV8_PMU_EVTYPE_EVENT);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static int armv8_pmuv3_map_event(uint32_t type, uint64_t config)
|
||||
{
|
||||
return __armv8_pmuv3_map_event(type, config, NULL, NULL);
|
||||
}
|
||||
|
||||
|
||||
static int armv8_pmuv3_map_hw_event(uint64_t config)
|
||||
{
|
||||
return __armv8_pmuv3_map_event(PERF_TYPE_HARDWARE, config, NULL, NULL);
|
||||
}
|
||||
|
||||
|
||||
static int armv8_pmuv3_map_cache_event(uint64_t config)
|
||||
{
|
||||
return __armv8_pmuv3_map_event(PERF_TYPE_HW_CACHE, config, NULL, NULL);
|
||||
}
|
||||
|
||||
static int armv8_pmuv3_map_raw_event(uint64_t config)
|
||||
{
|
||||
return __armv8_pmuv3_map_event(PERF_TYPE_RAW, config, NULL, NULL);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline uint32_t armv8pmu_pmcr_read(void)
|
||||
{
|
||||
return read_sysreg(pmcr_el0);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline void armv8pmu_pmcr_write(uint32_t val)
|
||||
{
|
||||
val &= ARMV8_PMU_PMCR_MASK;
|
||||
isb();
|
||||
write_sysreg(val, pmcr_el0);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline uint32_t armv8pmu_read_counter(int idx)
|
||||
{
|
||||
uint32_t value = 0;
|
||||
|
||||
if (!armv8pmu_counter_valid(idx)) {
|
||||
ekprintf("%s: The count_register#%d is not implemented.\n",
|
||||
__func__, idx);
|
||||
}
|
||||
else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
|
||||
value = read_sysreg(pmccntr_el0);
|
||||
}
|
||||
else {
|
||||
value = read_pmevcntr_el0[idx]();
|
||||
}
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline void armv8pmu_write_counter(int idx, uint32_t value)
|
||||
{
|
||||
if (!armv8pmu_counter_valid(idx)) {
|
||||
ekprintf("%s: The count_register#%d is not implemented.\n",
|
||||
__func__, idx);
|
||||
}
|
||||
else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
|
||||
/*
|
||||
* Set the upper 32bits as this is a 64bit counter but we only
|
||||
* count using the lower 32bits and we want an interrupt when
|
||||
* it overflows.
|
||||
*/
|
||||
uint64_t value64 = (int32_t)value;
|
||||
|
||||
write_sysreg(value64, pmccntr_el0);
|
||||
}
|
||||
else {
|
||||
write_pmevcntr_el0[idx](value);
|
||||
}
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline int armv8pmu_enable_intens(unsigned long counter_mask)
|
||||
{
|
||||
if (!armv8pmu_counter_mask_valid(counter_mask)) {
|
||||
ekprintf("%s: invalid counter mask(%#lx)\n",
|
||||
__func__, counter_mask);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
write_sysreg(counter_mask, pmintenset_el1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline int armv8pmu_disable_intens(unsigned long counter_mask)
|
||||
{
|
||||
if (!armv8pmu_counter_mask_valid(counter_mask)) {
|
||||
ekprintf("%s: invalid counter mask(%#lx)\n",
|
||||
__func__, counter_mask);
|
||||
return -EINVAL;
|
||||
}
|
||||
write_sysreg(counter_mask, pmintenclr_el1);
|
||||
isb();
|
||||
/* Clear the overflow flag in case an interrupt is pending. */
|
||||
write_sysreg(counter_mask, pmovsclr_el0);
|
||||
isb();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static int armv8pmu_set_event_filter(unsigned long *config_base, int mode)
|
||||
{
|
||||
/* exclude_idle is unused mode, unsupported */
|
||||
// if (attr->exclude_idle)
|
||||
// return -EPERM;
|
||||
|
||||
/*
|
||||
* If we're running in hyp mode, then we *are* the hypervisor.
|
||||
* Therefore we ignore exclude_hv in this configuration, since
|
||||
* there's no hypervisor to sample anyway. This is consistent
|
||||
* with other architectures (x86 and Power).
|
||||
*/
|
||||
if (is_kernel_in_hyp_mode()) {
|
||||
if (mode & PERFCTR_KERNEL_MODE)
|
||||
*config_base |= ARMV8_PMU_INCLUDE_EL2;
|
||||
} else {
|
||||
if (!(mode & PERFCTR_KERNEL_MODE))
|
||||
*config_base |= ARMV8_PMU_EXCLUDE_EL1;
|
||||
/* exclude_hv is unused mode, unsupported */
|
||||
// if (!attr->exclude_hv)
|
||||
// config_base |= ARMV8_PMU_INCLUDE_EL2;
|
||||
}
|
||||
if (!(mode & PERFCTR_USER_MODE))
|
||||
*config_base |= ARMV8_PMU_EXCLUDE_EL0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline void armv8pmu_write_evtype(int idx, uint32_t val)
|
||||
{
|
||||
if (!armv8pmu_counter_valid(idx)) {
|
||||
ekprintf("%s: The count_register#%d is not implemented.\n",
|
||||
__func__, idx);
|
||||
return;
|
||||
} else if (idx != ARMV8_IDX_CYCLE_COUNTER) {
|
||||
write_pmevtyper_el0[idx](val);
|
||||
}
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline int armv8pmu_enable_counter(unsigned long counter_mask)
|
||||
{
|
||||
if (!armv8pmu_counter_mask_valid(counter_mask)) {
|
||||
ekprintf("%s: invalid counter mask 0x%lx.\n",
|
||||
__func__, counter_mask);
|
||||
return -EINVAL;
|
||||
}
|
||||
write_sysreg(counter_mask, pmcntenset_el0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static inline int armv8pmu_disable_counter(unsigned long counter_mask)
|
||||
{
|
||||
if (!armv8pmu_counter_mask_valid(counter_mask)) {
|
||||
ekprintf("%s: invalid counter mask 0x%lx.\n",
|
||||
__func__, counter_mask);
|
||||
return -EINVAL;
|
||||
}
|
||||
write_sysreg(counter_mask, pmcntenclr_el0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static ihk_spinlock_t pmu_lock = SPIN_LOCK_UNLOCKED;
|
||||
static int armv8pmu_start(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&pmu_lock);
|
||||
/* Enable all counters */
|
||||
armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
|
||||
ihk_mc_spinlock_unlock(&pmu_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static void armv8pmu_stop(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
flags = ihk_mc_spinlock_lock(&pmu_lock);
|
||||
/* Disable all counters */
|
||||
armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
|
||||
ihk_mc_spinlock_unlock(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static void armv8pmu_reset(void *info)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
|
||||
uint32_t nb_cnt =
|
||||
cpu_pmu->per_cpu[ihk_mc_get_processor_id()].num_events;
|
||||
nb_cnt--; /* Sub the CPU cycles counter */
|
||||
unsigned long event = ((1UL << nb_cnt) - 1) << ARMV8_IDX_COUNTER0;
|
||||
unsigned long cycle = 1UL << ARMV8_IDX_CYCLE_COUNTER;
|
||||
unsigned long valid_mask = event | cycle;
|
||||
|
||||
/* The counter and interrupt enable registers are unknown at reset. */
|
||||
armv8pmu_disable_counter(valid_mask);
|
||||
armv8pmu_disable_intens(valid_mask);
|
||||
|
||||
/*
|
||||
* Initialize & Reset PMNC. Request overflow interrupt for
|
||||
* 64 bit cycle counter but cheat in armv8pmu_write_counter().
|
||||
*/
|
||||
armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
|
||||
ARMV8_PMU_PMCR_LC);
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c */
|
||||
static int armv8pmu_get_event_idx(int num_events, unsigned long used_mask,
|
||||
unsigned long config)
|
||||
{
|
||||
int idx, end;
|
||||
unsigned long evtype = config & ARMV8_PMU_EVTYPE_EVENT;
|
||||
|
||||
/* Always prefer to place a cycle counter into the cycle counter. */
|
||||
if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) {
|
||||
if (!(used_mask & (1UL << ARMV8_IDX_CYCLE_COUNTER)))
|
||||
return ARMV8_IDX_CYCLE_COUNTER;
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise use events counters
|
||||
*/
|
||||
end = ARMV8_IDX_COUNTER0 + num_events;
|
||||
end--; /* Sub the CPU cycles counter */
|
||||
for (idx = ARMV8_IDX_COUNTER0; idx < end; ++idx) {
|
||||
if (!(used_mask & (1UL << idx)))
|
||||
return idx;
|
||||
}
|
||||
|
||||
/* The counters are all in use. */
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* @ref.impl linux-v4.15-rc3 arch/arm64/kernel/perf_event.c:__armv8pmu_probe_pmu() */
|
||||
/* Extract get num_events processing. */
|
||||
static uint32_t armv8pmu_read_num_pmnc_events(void)
|
||||
{
|
||||
uint32_t num_events = 0;
|
||||
|
||||
/* Read the nb of CNTx counters supported from PMNC */
|
||||
num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
|
||||
& ARMV8_PMU_PMCR_N_MASK;
|
||||
|
||||
/* Add the CPU cycles counter */
|
||||
num_events += 1;
|
||||
|
||||
return num_events;
|
||||
}
|
||||
|
||||
static void armv8pmu_handle_irq(void *priv)
|
||||
{
|
||||
uint32_t pmovsr;
|
||||
struct thread *thread = cpu_local_var(current);
|
||||
struct process *proc = thread->proc;
|
||||
const struct per_cpu_arm_pmu *cpu_pmu = get_per_cpu_pmu();
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* Get and reset the IRQ flags
|
||||
*/
|
||||
pmovsr = armv8pmu_getreset_flags();
|
||||
|
||||
/*
|
||||
* Did an overflow occur?
|
||||
*/
|
||||
if (!armv8pmu_has_overflowed(pmovsr))
|
||||
return;
|
||||
|
||||
if (!proc->monitoring_event) {
|
||||
return;
|
||||
}
|
||||
/*
|
||||
* Handle the counter(s) overflow(s)
|
||||
*/
|
||||
for (idx = 0; idx < cpu_pmu->num_events; idx++) {
|
||||
struct mc_perf_event *event = NULL;
|
||||
struct mc_perf_event *sub;
|
||||
|
||||
if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (proc->monitoring_event->counter_id == idx) {
|
||||
event = proc->monitoring_event;
|
||||
} else {
|
||||
list_for_each_entry(sub,
|
||||
&proc->monitoring_event->sibling_list,
|
||||
group_entry) {
|
||||
if (sub->counter_id == idx) {
|
||||
event = sub;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!event) {
|
||||
continue;
|
||||
}
|
||||
ihk_mc_event_update(event);
|
||||
ihk_mc_event_set_period(event);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void armv8pmu_enable_user_access_pmu_regs(void)
|
||||
{
|
||||
uint32_t value = 0;
|
||||
|
||||
value = read_sysreg(pmuserenr_el0);
|
||||
write_sysreg(value | (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR),
|
||||
pmuserenr_el0);
|
||||
}
|
||||
|
||||
static void armv8pmu_disable_user_access_pmu_regs(void)
|
||||
{
|
||||
uint32_t value = 0;
|
||||
|
||||
value = read_sysreg(pmuserenr_el0);
|
||||
write_sysreg(value & ~(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR),
|
||||
pmuserenr_el0);
|
||||
}
|
||||
|
||||
static void armv8pmu_create_pmceid_bitmap(unsigned long *bitmap, uint32_t nbits)
|
||||
{
|
||||
uint32_t pmceid[2];
|
||||
|
||||
memset(bitmap, 0, BITS_TO_LONGS(nbits) * sizeof(unsigned long));
|
||||
|
||||
pmceid[0] = read_sysreg(pmceid0_el0);
|
||||
bitmap[0] = (unsigned long)pmceid[0];
|
||||
|
||||
pmceid[1] = read_sysreg(pmceid1_el0);
|
||||
bitmap[0] |= (unsigned long)pmceid[1] << 32;
|
||||
}
|
||||
|
||||
static struct ihk_mc_interrupt_handler armv8pmu_handler = {
|
||||
.func = armv8pmu_handle_irq,
|
||||
.priv = NULL,
|
||||
};
|
||||
|
||||
int armv8pmu_init(struct arm_pmu* cpu_pmu)
|
||||
{
|
||||
cpu_pmu->read_counter = armv8pmu_read_counter;
|
||||
cpu_pmu->write_counter = armv8pmu_write_counter;
|
||||
cpu_pmu->reset = armv8pmu_reset;
|
||||
cpu_pmu->enable_pmu = armv8pmu_start;
|
||||
cpu_pmu->disable_pmu = armv8pmu_stop;
|
||||
cpu_pmu->enable_counter = armv8pmu_enable_counter;
|
||||
cpu_pmu->disable_counter = armv8pmu_disable_counter;
|
||||
cpu_pmu->enable_intens = armv8pmu_enable_intens;
|
||||
cpu_pmu->disable_intens = armv8pmu_disable_intens;
|
||||
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
|
||||
cpu_pmu->write_evtype = armv8pmu_write_evtype;
|
||||
cpu_pmu->get_event_idx = armv8pmu_get_event_idx;
|
||||
cpu_pmu->map_event = armv8_pmuv3_map_event;
|
||||
cpu_pmu->map_hw_event = armv8_pmuv3_map_hw_event;
|
||||
cpu_pmu->map_cache_event = armv8_pmuv3_map_cache_event;
|
||||
cpu_pmu->map_raw_event = armv8_pmuv3_map_raw_event;
|
||||
cpu_pmu->enable_user_access_pmu_regs =
|
||||
armv8pmu_enable_user_access_pmu_regs;
|
||||
cpu_pmu->disable_user_access_pmu_regs =
|
||||
armv8pmu_disable_user_access_pmu_regs;
|
||||
cpu_pmu->handler = &armv8pmu_handler;
|
||||
cpu_pmu->counter_mask_valid = &armv8pmu_counter_mask_valid;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void armv8pmu_per_cpu_init(struct per_cpu_arm_pmu *per_cpu)
|
||||
{
|
||||
per_cpu->num_events = armv8pmu_read_num_pmnc_events();
|
||||
armv8pmu_create_pmceid_bitmap(per_cpu->pmceid_bitmap,
|
||||
ARMV8_PMUV3_MAX_COMMON_EVENTS);
|
||||
}
|
||||
311
arch/arm64/kernel/postk_print_sysreg.c
Normal file
311
arch/arm64/kernel/postk_print_sysreg.c
Normal file
@ -0,0 +1,311 @@
|
||||
/* postk_print_sysreg.c COPYRIGHT FUJITSU LIMITED 2016 */
|
||||
/*
|
||||
* usage:
|
||||
* (gdb) call/x postk_debug_sysreg_ttbr1_el1()
|
||||
* $1 = 0x4e64f000
|
||||
*/
|
||||
#define postk_debug_sysreg(sysreg) __postk_debug_sysreg(sysreg, sysreg)
|
||||
|
||||
#define __postk_debug_sysreg(fname, regname) \
|
||||
unsigned long postk_debug_sysreg_ ## fname (void) \
|
||||
{ \
|
||||
unsigned long sysreg; \
|
||||
asm volatile( \
|
||||
"mrs %0, " # regname "\n" \
|
||||
: "=r" (sysreg) \
|
||||
: \
|
||||
: "memory"); \
|
||||
return sysreg; \
|
||||
}
|
||||
|
||||
/*
|
||||
* ARMR Architecture Reference Manual ARMv8, for ARMv8-A architecture profile Errata markup Beta
|
||||
* - Table J-5 Alphabetical index of AArch64 Registers
|
||||
*/
|
||||
postk_debug_sysreg(actlr_el1)
|
||||
postk_debug_sysreg(actlr_el2)
|
||||
postk_debug_sysreg(actlr_el3)
|
||||
postk_debug_sysreg(afsr0_el1)
|
||||
postk_debug_sysreg(afsr0_el2)
|
||||
postk_debug_sysreg(afsr0_el3)
|
||||
postk_debug_sysreg(afsr1_el1)
|
||||
postk_debug_sysreg(afsr1_el2)
|
||||
postk_debug_sysreg(afsr1_el3)
|
||||
postk_debug_sysreg(aidr_el1)
|
||||
postk_debug_sysreg(amair_el1)
|
||||
postk_debug_sysreg(amair_el2)
|
||||
postk_debug_sysreg(amair_el3)
|
||||
/*postk_debug_sysreg(at s12e0r)*/
|
||||
/*postk_debug_sysreg(at s12e0w)*/
|
||||
/*postk_debug_sysreg(at s12e1r)*/
|
||||
/*postk_debug_sysreg(at s12e1w)*/
|
||||
/*postk_debug_sysreg(at s1e0r)*/
|
||||
/*postk_debug_sysreg(at s1e0w)*/
|
||||
/*postk_debug_sysreg(at s1e1r)*/
|
||||
/*postk_debug_sysreg(at s1e1w)*/
|
||||
/*postk_debug_sysreg(at s1e2r)*/
|
||||
/*postk_debug_sysreg(at s1e2w)*/
|
||||
/*postk_debug_sysreg(at s1e3r)*/
|
||||
/*postk_debug_sysreg(at s1e3w)*/
|
||||
postk_debug_sysreg(ccsidr_el1)
|
||||
postk_debug_sysreg(clidr_el1)
|
||||
postk_debug_sysreg(cntfrq_el0)
|
||||
postk_debug_sysreg(cnthctl_el2)
|
||||
postk_debug_sysreg(cnthp_ctl_el2)
|
||||
postk_debug_sysreg(cnthp_cval_el2)
|
||||
postk_debug_sysreg(cnthp_tval_el2)
|
||||
postk_debug_sysreg(cntkctl_el1)
|
||||
postk_debug_sysreg(cntp_ctl_el0)
|
||||
postk_debug_sysreg(cntp_cval_el0)
|
||||
postk_debug_sysreg(cntp_tval_el0)
|
||||
postk_debug_sysreg(cntpct_el0)
|
||||
postk_debug_sysreg(cntps_ctl_el1)
|
||||
postk_debug_sysreg(cntps_cval_el1)
|
||||
postk_debug_sysreg(cntps_tval_el1)
|
||||
postk_debug_sysreg(cntv_ctl_el0)
|
||||
postk_debug_sysreg(cntv_cval_el0)
|
||||
postk_debug_sysreg(cntv_tval_el0)
|
||||
postk_debug_sysreg(cntvct_el0)
|
||||
postk_debug_sysreg(cntvoff_el2)
|
||||
postk_debug_sysreg(contextidr_el1)
|
||||
postk_debug_sysreg(cpacr_el1)
|
||||
postk_debug_sysreg(cptr_el2)
|
||||
postk_debug_sysreg(cptr_el3)
|
||||
postk_debug_sysreg(csselr_el1)
|
||||
postk_debug_sysreg(ctr_el0)
|
||||
postk_debug_sysreg(currentel)
|
||||
postk_debug_sysreg(dacr32_el2)
|
||||
postk_debug_sysreg(daif)
|
||||
postk_debug_sysreg(dbgauthstatus_el1)
|
||||
/*postk_debug_sysreg(dbgbcr<n>_el1)*/
|
||||
/*postk_debug_sysreg(dbgbvr<n>_el1)*/
|
||||
postk_debug_sysreg(dbgclaimclr_el1)
|
||||
postk_debug_sysreg(dbgclaimset_el1)
|
||||
postk_debug_sysreg(dbgdtr_el0)
|
||||
postk_debug_sysreg(dbgdtrrx_el0)
|
||||
postk_debug_sysreg(dbgdtrtx_el0)
|
||||
postk_debug_sysreg(dbgprcr_el1)
|
||||
postk_debug_sysreg(dbgvcr32_el2)
|
||||
/*postk_debug_sysreg(dbgwcr<n>_el1)*/
|
||||
/*postk_debug_sysreg(dbgwvr<n>_el1)*/
|
||||
/*postk_debug_sysreg(dc cisw)*/
|
||||
/*postk_debug_sysreg(dc civac)*/
|
||||
/*postk_debug_sysreg(dc csw)*/
|
||||
/*postk_debug_sysreg(dc cvac)*/
|
||||
/*postk_debug_sysreg(dc cvau)*/
|
||||
/*postk_debug_sysreg(dc isw)*/
|
||||
/*postk_debug_sysreg(dc ivac)*/
|
||||
/*postk_debug_sysreg(dc zva)*/
|
||||
postk_debug_sysreg(dczid_el0)
|
||||
postk_debug_sysreg(dlr_el0)
|
||||
postk_debug_sysreg(dspsr_el0)
|
||||
postk_debug_sysreg(elr_el1)
|
||||
postk_debug_sysreg(elr_el2)
|
||||
postk_debug_sysreg(elr_el3)
|
||||
postk_debug_sysreg(esr_el1)
|
||||
postk_debug_sysreg(esr_el2)
|
||||
postk_debug_sysreg(esr_el3)
|
||||
postk_debug_sysreg(far_el1)
|
||||
postk_debug_sysreg(far_el2)
|
||||
postk_debug_sysreg(far_el3)
|
||||
postk_debug_sysreg(fpcr)
|
||||
postk_debug_sysreg(fpexc32_el2)
|
||||
postk_debug_sysreg(fpsr)
|
||||
postk_debug_sysreg(hacr_el2)
|
||||
postk_debug_sysreg(hcr_el2)
|
||||
postk_debug_sysreg(hpfar_el2)
|
||||
postk_debug_sysreg(hstr_el2)
|
||||
/*postk_debug_sysreg(ic iallu)*/
|
||||
/*postk_debug_sysreg(ic ialluis)*/
|
||||
/*postk_debug_sysreg(ic ivau)*/
|
||||
/*postk_debug_sysreg(icc_ap0r0_el1)*/
|
||||
/*postk_debug_sysreg(icc_ap0r1_el1)*/
|
||||
/*postk_debug_sysreg(icc_ap0r2_el1)*/
|
||||
/*postk_debug_sysreg(icc_ap0r3_el1)*/
|
||||
/*postk_debug_sysreg(icc_ap1r0_el1)*/
|
||||
/*postk_debug_sysreg(icc_ap1r1_el1)*/
|
||||
/*postk_debug_sysreg(icc_ap1r2_el1)*/
|
||||
/*postk_debug_sysreg(icc_ap1r3_el1)*/
|
||||
/*postk_debug_sysreg(icc_asgi1r_el1)*/
|
||||
/*postk_debug_sysreg(icc_bpr0_el1)*/
|
||||
/*postk_debug_sysreg(icc_bpr1_el1)*/
|
||||
/*postk_debug_sysreg(icc_ctlr_el1)*/
|
||||
/*postk_debug_sysreg(icc_ctlr_el3)*/
|
||||
/*postk_debug_sysreg(icc_dir_el1)*/
|
||||
/*postk_debug_sysreg(icc_eoir0_el1)*/
|
||||
/*postk_debug_sysreg(icc_eoir1_el1)*/
|
||||
/*postk_debug_sysreg(icc_hppir0_el1)*/
|
||||
/*postk_debug_sysreg(icc_hppir1_el1)*/
|
||||
/*postk_debug_sysreg(icc_iar0_el1)*/
|
||||
/*postk_debug_sysreg(icc_iar1_el1)*/
|
||||
/*postk_debug_sysreg(icc_igrpen0_el1)*/
|
||||
/*postk_debug_sysreg(icc_igrpen1_el1)*/
|
||||
/*postk_debug_sysreg(icc_igrpen1_el3)*/
|
||||
/*postk_debug_sysreg(icc_pmr_el1)*/
|
||||
/*postk_debug_sysreg(icc_rpr_el1)*/
|
||||
/*postk_debug_sysreg(icc_seien_el1)*/
|
||||
/*postk_debug_sysreg(icc_sgi0r_el1)*/
|
||||
/*postk_debug_sysreg(icc_sgi1r_el1)*/
|
||||
/*postk_debug_sysreg(icc_sre_el1)*/
|
||||
/*postk_debug_sysreg(icc_sre_el2)*/
|
||||
/*postk_debug_sysreg(icc_sre_el3)*/
|
||||
/*postk_debug_sysreg(ich_ap0r0_el2)*/
|
||||
/*postk_debug_sysreg(ich_ap0r1_el2)*/
|
||||
/*postk_debug_sysreg(ich_ap0r2_el2)*/
|
||||
/*postk_debug_sysreg(ich_ap0r3_el2)*/
|
||||
/*postk_debug_sysreg(ich_ap1r0_el2)*/
|
||||
/*postk_debug_sysreg(ich_ap1r1_el2)*/
|
||||
/*postk_debug_sysreg(ich_ap1r2_el2)*/
|
||||
/*postk_debug_sysreg(ich_ap1r3_el2)*/
|
||||
/*postk_debug_sysreg(ich_eisr_el2)*/
|
||||
/*postk_debug_sysreg(ich_elsr_el2)*/
|
||||
/*postk_debug_sysreg(ich_hcr_el2)*/
|
||||
/*postk_debug_sysreg(ich_lr<n>_el2)*/
|
||||
/*postk_debug_sysreg(ich_misr_el2)*/
|
||||
/*postk_debug_sysreg(ich_vmcr_el2)*/
|
||||
/*postk_debug_sysreg(ich_vseir_el2)*/
|
||||
/*postk_debug_sysreg(ich_vtr_el2)*/
|
||||
postk_debug_sysreg(id_aa64afr0_el1)
|
||||
postk_debug_sysreg(id_aa64afr1_el1)
|
||||
postk_debug_sysreg(id_aa64dfr0_el1)
|
||||
postk_debug_sysreg(id_aa64dfr1_el1)
|
||||
postk_debug_sysreg(id_aa64isar0_el1)
|
||||
postk_debug_sysreg(id_aa64isar1_el1)
|
||||
postk_debug_sysreg(id_aa64mmfr0_el1)
|
||||
postk_debug_sysreg(id_aa64mmfr1_el1)
|
||||
postk_debug_sysreg(id_aa64pfr0_el1)
|
||||
postk_debug_sysreg(id_aa64pfr1_el1)
|
||||
postk_debug_sysreg(id_afr0_el1)
|
||||
postk_debug_sysreg(id_dfr0_el1)
|
||||
postk_debug_sysreg(id_isar0_el1)
|
||||
postk_debug_sysreg(id_isar1_el1)
|
||||
postk_debug_sysreg(id_isar2_el1)
|
||||
postk_debug_sysreg(id_isar3_el1)
|
||||
postk_debug_sysreg(id_isar4_el1)
|
||||
postk_debug_sysreg(id_isar5_el1)
|
||||
postk_debug_sysreg(id_mmfr0_el1)
|
||||
postk_debug_sysreg(id_mmfr1_el1)
|
||||
postk_debug_sysreg(id_mmfr2_el1)
|
||||
postk_debug_sysreg(id_mmfr3_el1)
|
||||
postk_debug_sysreg(id_pfr0_el1)
|
||||
postk_debug_sysreg(id_pfr1_el1)
|
||||
postk_debug_sysreg(ifsr32_el2)
|
||||
postk_debug_sysreg(isr_el1)
|
||||
postk_debug_sysreg(mair_el1)
|
||||
postk_debug_sysreg(mair_el2)
|
||||
postk_debug_sysreg(mair_el3)
|
||||
postk_debug_sysreg(mdccint_el1)
|
||||
postk_debug_sysreg(mdccsr_el0)
|
||||
postk_debug_sysreg(mdcr_el2)
|
||||
postk_debug_sysreg(mdcr_el3)
|
||||
postk_debug_sysreg(mdrar_el1)
|
||||
postk_debug_sysreg(mdscr_el1)
|
||||
postk_debug_sysreg(midr_el1)
|
||||
postk_debug_sysreg(mpidr_el1)
|
||||
postk_debug_sysreg(mvfr0_el1)
|
||||
postk_debug_sysreg(mvfr1_el1)
|
||||
postk_debug_sysreg(mvfr2_el1)
|
||||
postk_debug_sysreg(nzcv)
|
||||
postk_debug_sysreg(osdlr_el1)
|
||||
postk_debug_sysreg(osdtrrx_el1)
|
||||
postk_debug_sysreg(osdtrtx_el1)
|
||||
postk_debug_sysreg(oseccr_el1)
|
||||
postk_debug_sysreg(oslar_el1)
|
||||
postk_debug_sysreg(oslsr_el1)
|
||||
postk_debug_sysreg(par_el1)
|
||||
postk_debug_sysreg(pmccfiltr_el0)
|
||||
postk_debug_sysreg(pmccntr_el0)
|
||||
postk_debug_sysreg(pmceid0_el0)
|
||||
postk_debug_sysreg(pmceid1_el0)
|
||||
postk_debug_sysreg(pmcntenclr_el0)
|
||||
postk_debug_sysreg(pmcntenset_el0)
|
||||
postk_debug_sysreg(pmcr_el0)
|
||||
/*postk_debug_sysreg(pmevcntr<n>_el0)*/
|
||||
/*postk_debug_sysreg(pmevtyper<n>_el0)*/
|
||||
postk_debug_sysreg(pmintenclr_el1)
|
||||
postk_debug_sysreg(pmintenset_el1)
|
||||
postk_debug_sysreg(pmovsclr_el0)
|
||||
postk_debug_sysreg(pmovsset_el0)
|
||||
postk_debug_sysreg(pmselr_el0)
|
||||
postk_debug_sysreg(pmswinc_el0)
|
||||
postk_debug_sysreg(pmuserenr_el0)
|
||||
postk_debug_sysreg(pmxevcntr_el0)
|
||||
postk_debug_sysreg(pmxevtyper_el0)
|
||||
postk_debug_sysreg(revidr_el1)
|
||||
postk_debug_sysreg(rmr_el1)
|
||||
postk_debug_sysreg(rmr_el2)
|
||||
postk_debug_sysreg(rmr_el3)
|
||||
postk_debug_sysreg(rvbar_el1)
|
||||
postk_debug_sysreg(rvbar_el2)
|
||||
postk_debug_sysreg(rvbar_el3)
|
||||
/*postk_debug_sysreg(s3_<op1>_<cn>_<cm>_<op2>)*/
|
||||
postk_debug_sysreg(scr_el3)
|
||||
postk_debug_sysreg(sctlr_el1)
|
||||
postk_debug_sysreg(sctlr_el2)
|
||||
postk_debug_sysreg(sctlr_el3)
|
||||
postk_debug_sysreg(sder32_el3)
|
||||
postk_debug_sysreg(sp_el0)
|
||||
postk_debug_sysreg(sp_el1)
|
||||
postk_debug_sysreg(sp_el2)
|
||||
/*postk_debug_sysreg(sp_el3)*/
|
||||
postk_debug_sysreg(spsel)
|
||||
postk_debug_sysreg(spsr_abt)
|
||||
postk_debug_sysreg(spsr_el1)
|
||||
postk_debug_sysreg(spsr_el2)
|
||||
postk_debug_sysreg(spsr_el3)
|
||||
postk_debug_sysreg(spsr_fiq)
|
||||
postk_debug_sysreg(spsr_irq)
|
||||
postk_debug_sysreg(spsr_und)
|
||||
postk_debug_sysreg(tcr_el1)
|
||||
postk_debug_sysreg(tcr_el2)
|
||||
postk_debug_sysreg(tcr_el3)
|
||||
postk_debug_sysreg(teecr32_el1)
|
||||
postk_debug_sysreg(teehbr32_el1)
|
||||
/*postk_debug_sysreg(tlbi alle1)*/
|
||||
/*postk_debug_sysreg(tlbi alle1is)*/
|
||||
/*postk_debug_sysreg(tlbi alle2)*/
|
||||
/*postk_debug_sysreg(tlbi alle2is)*/
|
||||
/*postk_debug_sysreg(tlbi alle3)*/
|
||||
/*postk_debug_sysreg(tlbi alle3is)*/
|
||||
/*postk_debug_sysreg(tlbi aside1)*/
|
||||
/*postk_debug_sysreg(tlbi aside1is)*/
|
||||
/*postk_debug_sysreg(tlbi ipas2e1)*/
|
||||
/*postk_debug_sysreg(tlbi ipas2e1is)*/
|
||||
/*postk_debug_sysreg(tlbi ipas2le1)*/
|
||||
/*postk_debug_sysreg(tlbi ipas2le1is)*/
|
||||
/*postk_debug_sysreg(tlbi vaae1)*/
|
||||
/*postk_debug_sysreg(tlbi vaae1is)*/
|
||||
/*postk_debug_sysreg(tlbi vaale1)*/
|
||||
/*postk_debug_sysreg(tlbi vaale1is)*/
|
||||
/*postk_debug_sysreg(tlbi vae1)*/
|
||||
/*postk_debug_sysreg(tlbi vae1is)*/
|
||||
/*postk_debug_sysreg(tlbi vae2)*/
|
||||
/*postk_debug_sysreg(tlbi vae2is)*/
|
||||
/*postk_debug_sysreg(tlbi vae3)*/
|
||||
/*postk_debug_sysreg(tlbi vae3is)*/
|
||||
/*postk_debug_sysreg(tlbi vale1)*/
|
||||
/*postk_debug_sysreg(tlbi vale1is)*/
|
||||
/*postk_debug_sysreg(tlbi vale2)*/
|
||||
/*postk_debug_sysreg(tlbi vale2is)*/
|
||||
/*postk_debug_sysreg(tlbi vale3)*/
|
||||
/*postk_debug_sysreg(tlbi vale3is)*/
|
||||
/*postk_debug_sysreg(tlbi vmalle1)*/
|
||||
/*postk_debug_sysreg(tlbi vmalle1is)*/
|
||||
/*postk_debug_sysreg(tlbi vmalls12e1)*/
|
||||
/*postk_debug_sysreg(tlbi vmalls12e1is)*/
|
||||
postk_debug_sysreg(tpidr_el0)
|
||||
postk_debug_sysreg(tpidr_el1)
|
||||
postk_debug_sysreg(tpidr_el2)
|
||||
postk_debug_sysreg(tpidr_el3)
|
||||
postk_debug_sysreg(tpidrro_el0)
|
||||
postk_debug_sysreg(ttbr0_el1)
|
||||
postk_debug_sysreg(ttbr0_el2)
|
||||
postk_debug_sysreg(ttbr0_el3)
|
||||
postk_debug_sysreg(ttbr1_el1)
|
||||
postk_debug_sysreg(vbar_el1)
|
||||
postk_debug_sysreg(vbar_el2)
|
||||
postk_debug_sysreg(vbar_el3)
|
||||
postk_debug_sysreg(vmpidr_el2)
|
||||
postk_debug_sysreg(vpidr_el2)
|
||||
postk_debug_sysreg(vtcr_el2)
|
||||
postk_debug_sysreg(vttbr_el2)
|
||||
13
arch/arm64/kernel/proc-macros.S
Normal file
13
arch/arm64/kernel/proc-macros.S
Normal file
@ -0,0 +1,13 @@
|
||||
/* proc-macros.S COPYRIGHT FUJITSU LIMITED 2015 */
|
||||
|
||||
#include <arch-memory.h>
|
||||
|
||||
/*
|
||||
* dcache_line_size - get the minimum D-cache line size from the CTR register.
|
||||
*/
|
||||
.macro dcache_line_size, reg, tmp
|
||||
mrs \tmp, ctr_el0 // read CTR
|
||||
ubfm \tmp, \tmp, #16, #19 // cache line size encoding
|
||||
mov \reg, #4 // bytes per word
|
||||
lsl \reg, \reg, \tmp // actual cache line size
|
||||
.endm
|
||||
148
arch/arm64/kernel/proc.S
Normal file
148
arch/arm64/kernel/proc.S
Normal file
@ -0,0 +1,148 @@
|
||||
/* proc.S COPYRIGHT FUJITSU LIMITED 2015-2017 */
|
||||
|
||||
#include <linkage.h>
|
||||
#include <arch-memory.h>
|
||||
#include <sysreg.h>
|
||||
#include <assembler.h>
|
||||
#include "proc-macros.S"
|
||||
|
||||
#ifdef CONFIG_ARM64_64K_PAGES
|
||||
# define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K
|
||||
#else
|
||||
# define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K
|
||||
#endif
|
||||
|
||||
//#ifdef CONFIG_SMP
|
||||
#define TCR_SMP_FLAGS TCR_SHARED
|
||||
//#else
|
||||
//#define TCR_SMP_FLAGS 0
|
||||
//#endif
|
||||
|
||||
/* PTWs cacheable, inner/outer WBWA */
|
||||
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
|
||||
|
||||
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
|
||||
|
||||
/*
|
||||
* cpu_do_idle()
|
||||
*
|
||||
* Idle the processor (wait for interrupt).
|
||||
*/
|
||||
#if defined(CONFIG_HAS_NMI)
|
||||
#include <arm-gic-v3.h>
|
||||
ENTRY(__cpu_do_idle)
|
||||
mrs x0, daif // save I bit
|
||||
msr daifset, #2 // set I bit
|
||||
mrs_s x1, ICC_PMR_EL1 // save PMR
|
||||
mov x2, #ICC_PMR_EL1_UNMASKED
|
||||
msr_s ICC_PMR_EL1, x2 // unmask at PMR
|
||||
dsb sy // WFI may enter a low-power mode
|
||||
wfi
|
||||
msr_s ICC_PMR_EL1, x1 // restore PMR
|
||||
msr daif, x0 // restore I bit
|
||||
ret
|
||||
ENDPROC(__cpu_do_idle)
|
||||
#else /* defined(CONFIG_HAS_NMI) */
|
||||
ENTRY(__cpu_do_idle)
|
||||
dsb sy // WFI may enter a low-power mode
|
||||
wfi
|
||||
ret
|
||||
ENDPROC(__cpu_do_idle)
|
||||
#endif /* defined(CONFIG_HAS_NMI) */
|
||||
|
||||
/*
|
||||
* cpu_do_switch_mm(pgd_phys, tsk)
|
||||
*
|
||||
* Set the translation table base pointer to be pgd_phys.
|
||||
*
|
||||
* - pgd_phys - physical address of new TTB
|
||||
*/
|
||||
ENTRY(cpu_do_switch_mm)
|
||||
//mmid w1, x1 // get mm->context.id
|
||||
bfi x0, x1, #48, #16 // set the ASID
|
||||
msr ttbr0_el1, x0 // set TTBR0
|
||||
isb
|
||||
ret
|
||||
ENDPROC(cpu_do_switch_mm)
|
||||
|
||||
.section ".text.init", #alloc, #execinstr
|
||||
|
||||
/*
|
||||
* __cpu_setup
|
||||
*
|
||||
* Initialise the processor for turning the MMU on. Return in x0 the
|
||||
* value of the SCTLR_EL1 register.
|
||||
*/
|
||||
ENTRY(__cpu_setup)
|
||||
tlbi vmalle1 // Invalidate local TLB
|
||||
dsb nsh
|
||||
|
||||
mov x0, #3 << 20
|
||||
|
||||
/* SVE */
|
||||
mrs x5, id_aa64pfr0_el1
|
||||
ubfx x5, x5, #ID_AA64PFR0_SVE_SHIFT, #4
|
||||
cbz x5, 1f
|
||||
|
||||
orr x0, x0, #CPACR_EL1_ZEN // SVE: trap disabled EL1 and EL0
|
||||
1: msr cpacr_el1, x0 // Enable FP/ASIMD
|
||||
|
||||
mov x0, #1 << 12 // Reset mdscr_el1 and disable
|
||||
msr mdscr_el1, x0 // access to the DCC from EL0
|
||||
isb // Unmask debug exceptions now,
|
||||
enable_dbg // since this is per-cpu
|
||||
|
||||
/*
|
||||
* Memory region attributes for LPAE:
|
||||
*
|
||||
* n = AttrIndx[2:0]
|
||||
* n MAIR
|
||||
* DEVICE_nGnRnE 000 00000000
|
||||
* DEVICE_nGnRE 001 00000100
|
||||
* DEVICE_GRE 010 00001100
|
||||
* NORMAL_NC 011 01000100
|
||||
* NORMAL 100 11111111
|
||||
*/
|
||||
ldr x5, =MAIR(0x00, MT_DEVICE_nGnRnE) | \
|
||||
MAIR(0x04, MT_DEVICE_nGnRE) | \
|
||||
MAIR(0x0c, MT_DEVICE_GRE) | \
|
||||
MAIR(0x44, MT_NORMAL_NC) | \
|
||||
MAIR(0xff, MT_NORMAL)
|
||||
msr mair_el1, x5
|
||||
/*
|
||||
* Prepare SCTLR
|
||||
*/
|
||||
adr x5, crval
|
||||
ldp w5, w6, [x5]
|
||||
mrs x0, sctlr_el1
|
||||
bic x0, x0, x5 // clear bits
|
||||
orr x0, x0, x6 // set bits
|
||||
/*
|
||||
* Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for
|
||||
* both user and kernel.
|
||||
*/
|
||||
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
|
||||
TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0
|
||||
/*
|
||||
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in
|
||||
* TCR_EL1.
|
||||
*/
|
||||
mrs x9, ID_AA64MMFR0_EL1
|
||||
bfi x10, x9, #32, #3
|
||||
msr tcr_el1, x10
|
||||
ret // return to head.S
|
||||
ENDPROC(__cpu_setup)
|
||||
|
||||
/*
|
||||
* n n T
|
||||
* U E WT T UD US IHBS
|
||||
* CE0 XWHW CZ ME TEEA S
|
||||
* .... .IEE .... NEAI TE.I ..AD DEN0 ACAM
|
||||
* 0011 0... 1101 ..0. ..0. 10.. .... .... < hardware reserved
|
||||
* .... .1.. .... 01.1 11.1 ..01 0001 1101 < software settings
|
||||
*/
|
||||
.type crval, #object
|
||||
crval:
|
||||
.word 0x000802e2 // clear
|
||||
.word 0x0405d11d // set
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user