From 5c9ba2d7f956d3a9c829c23d03fc6d5e87d972b4 Mon Sep 17 00:00:00 2001 From: Mohammad Iskandarany Date: Thu, 31 Oct 2024 14:19:38 +0300 Subject: [PATCH 1/2] Add markdown files --- tmp/t/10045.md | 207 +++++ tmp/t/10046.md | 44 + tmp/t/10047.md | 57 ++ tmp/t/10048.md | 76 ++ tmp/t/10049.md | 162 ++++ tmp/t/10050.md | 84 ++ tmp/t/10051.md | 65 ++ tmp/t/10052.md | 91 ++ tmp/t/10053.md | 42 + tmp/t/10054.md | 85 ++ tmp/t/10055.md | 48 + tmp/t/10056.md | 51 ++ tmp/t/10057.md | 127 +++ tmp/t/10058.md | 54 ++ tmp/t/10059.md | 38 + tmp/t/10060.md | 45 + tmp/t/10061.md | 41 + tmp/t/10062.md | 47 + tmp/t/10063.md | 30 + tmp/t/10064.md | 38 + tmp/t/10065.md | 70 ++ tmp/t/10066.md | 55 ++ tmp/t/10067.md | 62 ++ tmp/t/10068.md | 37 + tmp/t/10069.md | 41 + tmp/t/10070.md | 44 + tmp/t/10071.md | 150 ++++ tmp/t/10072.md | 36 + tmp/t/10073.md | 62 ++ tmp/t/10074.md | 308 +++++++ tmp/t/10075.md | 49 ++ tmp/t/10076.md | 54 ++ tmp/t/10077.md | 65 ++ tmp/t/10078.md | 39 + tmp/t/10079.md | 38 + tmp/t/10080.md | 40 + tmp/t/10081.md | 52 ++ tmp/t/10082.md | 41 + tmp/t/10083.md | 49 ++ tmp/t/10084.md | 45 + tmp/t/10085.md | 41 + tmp/t/10086.md | 31 + tmp/t/10087.md | 41 + tmp/t/10088.md | 41 + tmp/t/10089.md | 43 + tmp/t/10090.md | 42 + tmp/t/10091.md | 45 + tmp/t/10092.md | 42 + tmp/t/10093.md | 57 ++ tmp/t/10094.md | 89 ++ tmp/t/10095.md | 39 + tmp/t/10096.md | 376 ++++++++ tmp/t/10097.md | 45 + tmp/t/10098.md | 44 + tmp/t/10099.md | 50 ++ tmp/t/10100.md | 45 + tmp/t/10101.md | 57 ++ tmp/t/10102.md | 47 + tmp/t/10103.md | 48 + tmp/t/10104.md | 46 + tmp/t/10105.md | 54 ++ tmp/t/10106.md | 45 + tmp/t/10107.md | 43 + tmp/t/10108.md | 36 + tmp/t/10109.md | 92 ++ tmp/t/10110.md | 64 ++ tmp/t/10111.md | 110 +++ tmp/t/10112.md | 45 + tmp/t/10113.md | 87 ++ tmp/t/10114.md | 42 + tmp/t/10115.md | 42 + tmp/t/10116.md | 115 +++ tmp/t/10117.md | 37 + tmp/t/10118.md | 51 ++ tmp/t/10119.md | 42 + tmp/t/10120.md | 43 + tmp/t/10121.md | 68 ++ tmp/t/10122.md | 70 ++ tmp/t/10123.md | 38 + tmp/t/10124.md | 59 ++ tmp/t/10125.md | 84 ++ tmp/t/10126.md | 36 + tmp/t/10128.md | 130 +++ tmp/t/10129.md | 53 ++ tmp/t/10130.md | 39 + tmp/t/10131.md | 46 + tmp/t/10132.md | 682 +++++++++++++++ tmp/t/10133.md | 44 + tmp/t/10134.md | 84 ++ tmp/t/10135.md | 37 + tmp/t/10136.md | 104 +++ tmp/t/10137.md | 41 + tmp/t/10138.md | 50 ++ tmp/t/10139.md | 151 ++++ tmp/t/10140.md | 178 ++++ tmp/t/10141.md | 106 +++ tmp/t/10142.md | 58 ++ tmp/t/10143.md | 33 + tmp/t/10144.md | 36 + tmp/t/10145.md | 73 ++ tmp/t/10146.md | 37 + tmp/t/10147.md | 55 ++ tmp/t/10148.md | 32 + tmp/t/10149.md | 35 + tmp/t/10150.md | 86 ++ tmp/t/10151.md | 46 + tmp/t/10152.md | 37 + tmp/t/10153.md | 128 +++ tmp/t/10154.md | 50 ++ tmp/t/10155.md | 55 ++ tmp/t/10156.md | 39 + tmp/t/10157.md | 67 ++ tmp/t/10158.md | 58 ++ tmp/t/10159.md | 91 ++ tmp/t/10160.md | 61 ++ tmp/t/10161.md | 52 ++ tmp/t/10162.md | 115 +++ tmp/t/10163.md | 58 ++ tmp/t/10164.md | 37 + tmp/t/10165.md | 38 + tmp/t/10166.md | 38 + tmp/t/10167.md | 54 ++ tmp/t/10168.md | 56 ++ tmp/t/10169.md | 55 ++ tmp/t/10170.md | 66 ++ tmp/t/10171.md | 37 + tmp/t/10172.md | 50 ++ tmp/t/10173.md | 117 +++ tmp/t/10174.md | 47 + tmp/t/10175.md | 49 ++ tmp/t/10176.md | 54 ++ tmp/t/10177.md | 43 + tmp/t/10178.md | 128 +++ tmp/t/10179.md | 40 + tmp/t/10180.md | 49 ++ tmp/t/10181.md | 40 + tmp/t/10182.md | 66 ++ tmp/t/10183.md | 45 + tmp/t/10184.md | 41 + tmp/t/10186.md | 53 ++ tmp/t/10187.md | 41 + tmp/t/10188.md | 74 ++ tmp/t/10189.md | 132 +++ tmp/t/10190.md | 70 ++ tmp/t/10191.md | 29 + tmp/t/10192.md | 39 + tmp/t/10193.md | 45 + tmp/t/10194.md | 39 + tmp/t/10195.md | 114 +++ tmp/t/10196.md | 74 ++ tmp/t/10197.md | 51 ++ tmp/t/10198.md | 37 + tmp/t/10199.md | 67 ++ tmp/t/10200.md | 39 + tmp/t/10201.md | 57 ++ tmp/t/10202.md | 43 + tmp/t/10203.md | 57 ++ tmp/t/10204.md | 84 ++ tmp/t/10205.md | 109 +++ tmp/t/10206.md | 60 ++ tmp/t/10207.md | 190 ++++ tmp/t/10208.md | 41 + tmp/t/10209.md | 34 + tmp/t/10210.md | 46 + tmp/t/10211.md | 56 ++ tmp/t/10212.md | 40 + tmp/t/10213.md | 44 + tmp/t/10214.md | 35 + tmp/t/10215.md | 52 ++ tmp/t/10216.md | 44 + tmp/t/10217.md | 41 + tmp/t/10218.md | 50 ++ tmp/t/10219.md | 35 + tmp/t/10220.md | 88 ++ tmp/t/10221.md | 56 ++ tmp/t/10228.md | 55 ++ tmp/t/10229.md | 73 ++ tmp/t/10230.md | 76 ++ tmp/t/10231.md | 71 ++ tmp/t/10232.md | 42 + tmp/t/10233.md | 44 + tmp/t/10234.md | 51 ++ tmp/t/10235.md | 46 + tmp/t/10236.md | 44 + tmp/t/10237.md | 207 +++++ tmp/t/10238.md | 53 ++ tmp/t/10239.md | 61 ++ tmp/t/10240.md | 39 + tmp/t/10241.md | 34 + tmp/t/10242.md | 72 ++ tmp/t/10243.md | 42 + tmp/t/10244.md | 52 ++ tmp/t/1033.md | 323 +++++++ tmp/t/1041.md | 170 ++++ tmp/t/1051.md | 425 +++++++++ tmp/t/1058.md | 176 ++++ tmp/t/10583.md | 96 ++ tmp/t/10585.md | 81 ++ tmp/t/10586.md | 174 ++++ tmp/t/10621.md | 47 + tmp/t/10622.md | 113 +++ tmp/t/10623.md | 105 +++ tmp/t/1063.md | 189 ++++ tmp/t/10632.md | 28 + tmp/t/1065.md | 285 ++++++ tmp/t/1066.md | 33 + tmp/t/1073.md | 296 +++++++ tmp/t/10811.md | 13 + tmp/t/10813.md | 104 +++ tmp/t/1083.md | 645 ++++++++++++++ tmp/t/10835.md | 22 + tmp/t/10837.md | 33 + tmp/t/1084.md | 139 +++ tmp/t/1086.md | 484 ++++++++++ tmp/t/1087.md | 833 ++++++++++++++++++ tmp/t/1088.md | 83 ++ tmp/t/1093.md | 270 ++++++ tmp/t/1094.md | 124 +++ tmp/t/1095.md | 138 +++ tmp/t/1096.md | 73 ++ tmp/t/1097.md | 262 ++++++ tmp/t/1099.md | 338 +++++++ tmp/t/1100.md | 504 +++++++++++ tmp/t/11012.md | 207 +++++ tmp/t/1102.md | 93 ++ tmp/t/11102.md | 107 +++ tmp/t/1111.md | 1016 +++++++++++++++++++++ tmp/t/1112.md | 426 +++++++++ tmp/t/11125.md | 126 +++ tmp/t/11144.md | 52 ++ tmp/t/11148.md | 45 + tmp/t/11181.md | 59 ++ tmp/t/11182.md | 62 ++ tmp/t/11183.md | 50 ++ tmp/t/11184.md | 57 ++ tmp/t/112.md | 132 +++ tmp/t/11285.md | 40 + tmp/t/11290.md | 35 + tmp/t/11291.md | 35 + tmp/t/11298.md | 6 + tmp/t/11312.md | 138 +++ tmp/t/11313.md | 208 +++++ tmp/t/11351.md | 563 ++++++++++++ tmp/t/11413.md | 55 ++ tmp/t/11414.md | 36 + tmp/t/1145.md | 36 + tmp/t/1150.md | 820 +++++++++++++++++ tmp/t/1155.md | 1271 +++++++++++++++++++++++++++ tmp/t/1156.md | 574 ++++++++++++ tmp/t/1157.md | 175 ++++ tmp/t/1158.md | 78 ++ tmp/t/116.md | 82 ++ tmp/t/1162.md | 184 ++++ tmp/t/1163.md | 2282 ++++++++++++++++++++++++++++++++++++++++++++++++ tmp/t/11672.md | 6 + tmp/t/1168.md | 239 +++++ tmp/t/117.md | 41 + tmp/t/118.md | 142 +++ tmp/t/1184.md | 187 ++++ tmp/t/1187.md | 71 ++ tmp/t/1194.md | 246 ++++++ tmp/t/11961.md | 227 +++++ tmp/t/1199.md | 96 ++ tmp/t/11991.md | 268 ++++++ tmp/t/11998.md | 42 + tmp/t/12005.md | 152 ++++ tmp/t/12012.md | 143 +++ tmp/t/12042.md | 28 + tmp/t/12128.md | 248 ++++++ tmp/t/12244.md | 408 +++++++++ tmp/t/12246.md | 48 + tmp/t/12248.md | 94 ++ tmp/t/12281.md | 297 +++++++ tmp/t/12372.md | 55 ++ tmp/t/12435.md | 602 +++++++++++++ tmp/t/12562.md | 78 ++ tmp/t/12588.md | 75 ++ tmp/t/12659.md | 72 ++ tmp/t/12676.md | 19 + tmp/t/12689.md | 222 +++++ tmp/t/12690.md | 269 ++++++ tmp/t/12691.md | 45 + tmp/t/12731.md | 58 ++ tmp/t/12732.md | 99 +++ tmp/t/12733.md | 50 ++ tmp/t/12734.md | 454 ++++++++++ tmp/t/12818.md | 22 + tmp/t/13005.md | 126 +++ tmp/t/13019.md | 21 + tmp/t/13046.md | 51 ++ tmp/t/13070.md | 27 + tmp/t/13086.md | 15 + tmp/t/13089.md | 25 + tmp/t/13130.md | 11 + tmp/t/13132.md | 29 + tmp/t/13137.md | 20 + tmp/t/13146.md | 14 + tmp/t/13249.md | 109 +++ tmp/t/13276.md | 158 ++++ tmp/t/13400.md | 67 ++ tmp/t/13403.md | 338 +++++++ tmp/t/13541.md | 50 ++ tmp/t/13557.md | 72 ++ tmp/t/13778.md | 37 + tmp/t/13779.md | 28 + tmp/t/13780.md | 27 + tmp/t/13781.md | 37 + tmp/t/13782.md | 37 + tmp/t/13783.md | 39 + tmp/t/13784.md | 37 + tmp/t/13785.md | 40 + tmp/t/13788.md | 214 +++++ tmp/t/13789.md | 117 +++ tmp/t/13899.md | 547 ++++++++++++ tmp/t/13926.md | 258 ++++++ tmp/t/13927.md | 113 +++ tmp/t/13953.md | 128 +++ tmp/t/13990.md | 8 + tmp/t/14018.md | 23 + tmp/t/14043.md | 33 + tmp/t/14056.md | 78 ++ tmp/t/14367.md | 137 +++ tmp/t/14532.md | 72 ++ tmp/t/146.md | 116 +++ tmp/t/14612.md | 26 + tmp/t/14936.md | 159 ++++ tmp/t/14937.md | 264 ++++++ tmp/t/14983.md | 36 + tmp/t/15010.md | 10 + tmp/t/15011.md | 10 + tmp/t/15012.md | 233 +++++ tmp/t/15017.md | 54 ++ tmp/t/15018.md | 418 +++++++++ tmp/t/15211.md | 136 +++ tmp/t/15212.md | 97 ++ tmp/t/154.md | 227 +++++ tmp/t/15405.md | 97 ++ tmp/t/15406.md | 164 ++++ tmp/t/15621.md | 100 +++ tmp/t/15652.md | 23 + tmp/t/15653.md | 221 +++++ tmp/t/15654.md | 545 ++++++++++++ tmp/t/15684.md | 94 ++ tmp/t/15752.md | 54 ++ tmp/t/15753.md | 205 +++++ tmp/t/15754.md | 537 ++++++++++++ tmp/t/15808.md | 610 +++++++++++++ tmp/t/15809.md | 227 +++++ tmp/t/15888.md | 12 + tmp/t/15889.md | 41 + tmp/t/15892.md | 6 + tmp/t/1593.md | 36 + tmp/t/1729.md | 53 ++ tmp/t/2203.md | 174 ++++ tmp/t/2999.md | 44 + tmp/t/3301.md | 103 +++ tmp/t/3341.md | 34 + tmp/t/3352.md | 34 + tmp/t/3723.md | 44 + tmp/t/3784.md | 224 +++++ tmp/t/4058.md | 254 ++++++ tmp/t/4138.md | 218 +++++ tmp/t/4449.md | 551 ++++++++++++ tmp/t/4450.md | 634 ++++++++++++++ tmp/t/4451.md | 248 ++++++ tmp/t/4453.md | 80 ++ tmp/t/4454.md | 66 ++ tmp/t/4455.md | 168 ++++ tmp/t/4457.md | 264 ++++++ tmp/t/4458.md | 193 ++++ tmp/t/4459.md | 383 ++++++++ tmp/t/4460.md | 135 +++ tmp/t/4461.md | 272 ++++++ tmp/t/4462.md | 210 +++++ tmp/t/4463.md | 175 ++++ tmp/t/4466.md | 243 ++++++ tmp/t/4467.md | 627 +++++++++++++ tmp/t/4468.md | 176 ++++ tmp/t/4474.md | 90 ++ tmp/t/4554.md | 1198 +++++++++++++++++++++++++ tmp/t/4614.md | 447 ++++++++++ tmp/t/4652.md | 213 +++++ tmp/t/4776.md | 144 +++ tmp/t/4796.md | 112 +++ tmp/t/4837.md | 479 ++++++++++ tmp/t/4898.md | 205 +++++ tmp/t/4988.md | 86 ++ tmp/t/5064.md | 1270 +++++++++++++++++++++++++++ tmp/t/5132.md | 187 ++++ tmp/t/5188.md | 86 ++ tmp/t/5199.md | 355 ++++++++ tmp/t/5202.md | 118 +++ tmp/t/5212.md | 52 ++ tmp/t/5213.md | 1236 ++++++++++++++++++++++++++ tmp/t/5313.md | 145 +++ tmp/t/5329.md | 549 ++++++++++++ tmp/t/5334.md | 69 ++ tmp/t/5337.md | 22 + tmp/t/5348.md | 99 +++ tmp/t/5358.md | 23 + tmp/t/5364.md | 96 ++ tmp/t/5396.md | 725 +++++++++++++++ tmp/t/5451.md | 25 + tmp/t/5452.md | 142 +++ tmp/t/5454.md | 309 +++++++ tmp/t/5455.md | 103 +++ tmp/t/5456.md | 128 +++ tmp/t/5457.md | 132 +++ tmp/t/5459.md | 131 +++ tmp/t/5460.md | 57 ++ tmp/t/5461.md | 21 + tmp/t/5462.md | 51 ++ tmp/t/5464.md | 294 +++++++ tmp/t/5465.md | 131 +++ tmp/t/5466.md | 130 +++ tmp/t/5471.md | 129 +++ tmp/t/5476.md | 976 +++++++++++++++++++++ tmp/t/5484.md | 201 +++++ tmp/t/5520.md | 25 + tmp/t/5521.md | 99 +++ tmp/t/5522.md | 118 +++ tmp/t/5523.md | 24 + tmp/t/5524.md | 46 + tmp/t/5525.md | 46 + tmp/t/5527.md | 33 + tmp/t/5528.md | 46 + tmp/t/5547.md | 58 ++ tmp/t/5548.md | 130 +++ tmp/t/5593.md | 61 ++ tmp/t/5597.md | 96 ++ tmp/t/5645.md | 24 + tmp/t/5657.md | 335 +++++++ tmp/t/5666.md | 30 + tmp/t/5667.md | 27 + tmp/t/5668.md | 31 + tmp/t/5679.md | 942 ++++++++++++++++++++ tmp/t/5683.md | 51 ++ tmp/t/5732.md | 175 ++++ tmp/t/5740.md | 234 +++++ tmp/t/5780.md | 149 ++++ tmp/t/5781.md | 146 ++++ tmp/t/5784.md | 128 +++ tmp/t/5886.md | 892 +++++++++++++++++++ tmp/t/5891.md | 402 +++++++++ tmp/t/5892.md | 358 ++++++++ tmp/t/5896.md | 159 ++++ tmp/t/5938.md | 293 +++++++ tmp/t/5967.md | 110 +++ tmp/t/6006.md | 183 ++++ tmp/t/6116.md | 43 + tmp/t/6119.md | 43 + tmp/t/6120.md | 74 ++ tmp/t/6121.md | 68 ++ tmp/t/6122.md | 60 ++ tmp/t/6123.md | 80 ++ tmp/t/6124.md | 85 ++ tmp/t/6125.md | 70 ++ tmp/t/6126.md | 100 +++ tmp/t/6127.md | 59 ++ tmp/t/6128.md | 69 ++ tmp/t/6129.md | 59 ++ tmp/t/6131.md | 77 ++ tmp/t/6132.md | 61 ++ tmp/t/6133.md | 67 ++ tmp/t/6134.md | 89 ++ tmp/t/6135.md | 57 ++ tmp/t/6136.md | 58 ++ tmp/t/6137.md | 66 ++ tmp/t/6138.md | 71 ++ tmp/t/6139.md | 56 ++ tmp/t/6140.md | 65 ++ tmp/t/6141.md | 53 ++ tmp/t/6142.md | 58 ++ tmp/t/6161.md | 275 ++++++ tmp/t/6174.md | 178 ++++ tmp/t/6184.md | 328 +++++++ tmp/t/6185.md | 35 + tmp/t/6186.md | 51 ++ tmp/t/6187.md | 158 ++++ tmp/t/6208.md | 102 +++ tmp/t/6209.md | 61 ++ tmp/t/6234.md | 22 + tmp/t/6361.md | 133 +++ tmp/t/6399.md | 75 ++ tmp/t/6450.md | 188 ++++ tmp/t/6463.md | 22 + tmp/t/6464.md | 205 +++++ tmp/t/6465.md | 96 ++ tmp/t/6466.md | 138 +++ tmp/t/6467.md | 23 + tmp/t/6468.md | 204 +++++ tmp/t/6469.md | 52 ++ tmp/t/6470.md | 101 +++ tmp/t/6471.md | 72 ++ tmp/t/6472.md | 64 ++ tmp/t/6473.md | 115 +++ tmp/t/6474.md | 135 +++ tmp/t/6475.md | 75 ++ tmp/t/6476.md | 62 ++ tmp/t/6477.md | 99 +++ tmp/t/6478.md | 78 ++ tmp/t/6479.md | 41 + tmp/t/6480.md | 46 + tmp/t/6481.md | 61 ++ tmp/t/6482.md | 99 +++ tmp/t/6483.md | 64 ++ tmp/t/6484.md | 101 +++ tmp/t/6485.md | 166 ++++ tmp/t/6488.md | 20 + tmp/t/6498.md | 75 ++ tmp/t/6499.md | 42 + tmp/t/6501.md | 38 + tmp/t/6559.md | 1723 ++++++++++++++++++++++++++++++++++++ tmp/t/6562.md | 88 ++ tmp/t/6577.md | 138 +++ tmp/t/6615.md | 22 + tmp/t/6636.md | 33 + tmp/t/6640.md | 65 ++ tmp/t/6641.md | 59 ++ tmp/t/6657.md | 90 ++ tmp/t/6659.md | 63 ++ tmp/t/6663.md | 87 ++ tmp/t/6664.md | 243 ++++++ tmp/t/6665.md | 67 ++ tmp/t/6680.md | 20 + tmp/t/6695.md | 37 + tmp/t/6702.md | 252 ++++++ tmp/t/6749.md | 166 ++++ tmp/t/6835.md | 178 ++++ tmp/t/6864.md | 226 +++++ tmp/t/6877.md | 81 ++ tmp/t/6894.md | 408 +++++++++ tmp/t/6930.md | 141 +++ tmp/t/6973.md | 33 + tmp/t/6974.md | 26 + tmp/t/7059.md | 677 ++++++++++++++ tmp/t/7064.md | 102 +++ tmp/t/7065.md | 47 + tmp/t/7066.md | 47 + tmp/t/7068.md | 1060 ++++++++++++++++++++++ tmp/t/7113.md | 85 ++ tmp/t/7126.md | 136 +++ tmp/t/7128.md | 20 + tmp/t/7129.md | 21 + tmp/t/7130.md | 22 + tmp/t/7131.md | 311 +++++++ tmp/t/7132.md | 1416 ++++++++++++++++++++++++++++++ tmp/t/7133.md | 110 +++ tmp/t/7134.md | 20 + tmp/t/7148.md | 22 + tmp/t/7150.md | 21 + tmp/t/7152.md | 51 ++ tmp/t/7153.md | 22 + tmp/t/7154.md | 24 + tmp/t/7157.md | 65 ++ tmp/t/7171.md | 42 + tmp/t/7180.md | 159 ++++ tmp/t/7181.md | 285 ++++++ tmp/t/7183.md | 44 + tmp/t/7184.md | 223 +++++ tmp/t/7186.md | 27 + tmp/t/7191.md | 78 ++ tmp/t/7192.md | 57 ++ tmp/t/7193.md | 55 ++ tmp/t/7194.md | 69 ++ tmp/t/7195.md | 50 ++ tmp/t/7218.md | 22 + tmp/t/7219.md | 185 ++++ tmp/t/7221.md | 482 ++++++++++ tmp/t/7286.md | 123 +++ tmp/t/7287.md | 266 ++++++ tmp/t/7310.md | 91 ++ tmp/t/7311.md | 62 ++ tmp/t/7319.md | 8 + tmp/t/7329.md | 45 + tmp/t/7330.md | 24 + tmp/t/7331.md | 30 + tmp/t/7332.md | 29 + tmp/t/7379.md | 244 ++++++ tmp/t/7388.md | 846 ++++++++++++++++++ tmp/t/7401.md | 269 ++++++ tmp/t/7406.md | 166 ++++ tmp/t/7414.md | 946 ++++++++++++++++++++ tmp/t/7433.md | 209 +++++ tmp/t/7530.md | 315 +++++++ tmp/t/7592.md | 174 ++++ tmp/t/7814.md | 618 +++++++++++++ tmp/t/7933.md | 17 + tmp/t/7934.md | 11 + tmp/t/8047.md | 41 + tmp/t/8701.md | 314 +++++++ tmp/t/8702.md | 102 +++ tmp/t/8723.md | 53 ++ tmp/t/8793.md | 266 ++++++ tmp/t/8819.md | 12 + tmp/t/8873.md | 153 ++++ tmp/t/8874.md | 158 ++++ tmp/t/8890.md | 164 ++++ tmp/t/8896.md | 797 +++++++++++++++++ tmp/t/9151.md | 609 +++++++++++++ tmp/t/9836.md | 144 +++ tmp/t/9839.md | 47 + 602 files changed, 86594 insertions(+) create mode 100644 tmp/t/10045.md create mode 100644 tmp/t/10046.md create mode 100644 tmp/t/10047.md create mode 100644 tmp/t/10048.md create mode 100644 tmp/t/10049.md create mode 100644 tmp/t/10050.md create mode 100644 tmp/t/10051.md create mode 100644 tmp/t/10052.md create mode 100644 tmp/t/10053.md create mode 100644 tmp/t/10054.md create mode 100644 tmp/t/10055.md create mode 100644 tmp/t/10056.md create mode 100644 tmp/t/10057.md create mode 100644 tmp/t/10058.md create mode 100644 tmp/t/10059.md create mode 100644 tmp/t/10060.md create mode 100644 tmp/t/10061.md create mode 100644 tmp/t/10062.md create mode 100644 tmp/t/10063.md create mode 100644 tmp/t/10064.md create mode 100644 tmp/t/10065.md create mode 100644 tmp/t/10066.md create mode 100644 tmp/t/10067.md create mode 100644 tmp/t/10068.md create mode 100644 tmp/t/10069.md create mode 100644 tmp/t/10070.md create mode 100644 tmp/t/10071.md create mode 100644 tmp/t/10072.md create mode 100644 tmp/t/10073.md create mode 100644 tmp/t/10074.md create mode 100644 tmp/t/10075.md create mode 100644 tmp/t/10076.md create mode 100644 tmp/t/10077.md create mode 100644 tmp/t/10078.md create mode 100644 tmp/t/10079.md create mode 100644 tmp/t/10080.md create mode 100644 tmp/t/10081.md create mode 100644 tmp/t/10082.md create mode 100644 tmp/t/10083.md create mode 100644 tmp/t/10084.md create mode 100644 tmp/t/10085.md create mode 100644 tmp/t/10086.md create mode 100644 tmp/t/10087.md create mode 100644 tmp/t/10088.md create mode 100644 tmp/t/10089.md create mode 100644 tmp/t/10090.md create mode 100644 tmp/t/10091.md create mode 100644 tmp/t/10092.md create mode 100644 tmp/t/10093.md create mode 100644 tmp/t/10094.md create mode 100644 tmp/t/10095.md create mode 100644 tmp/t/10096.md create mode 100644 tmp/t/10097.md create mode 100644 tmp/t/10098.md create mode 100644 tmp/t/10099.md create mode 100644 tmp/t/10100.md create mode 100644 tmp/t/10101.md create mode 100644 tmp/t/10102.md create mode 100644 tmp/t/10103.md create mode 100644 tmp/t/10104.md create mode 100644 tmp/t/10105.md create mode 100644 tmp/t/10106.md create mode 100644 tmp/t/10107.md create mode 100644 tmp/t/10108.md create mode 100644 tmp/t/10109.md create mode 100644 tmp/t/10110.md create mode 100644 tmp/t/10111.md create mode 100644 tmp/t/10112.md create mode 100644 tmp/t/10113.md create mode 100644 tmp/t/10114.md create mode 100644 tmp/t/10115.md create mode 100644 tmp/t/10116.md create mode 100644 tmp/t/10117.md create mode 100644 tmp/t/10118.md create mode 100644 tmp/t/10119.md create mode 100644 tmp/t/10120.md create mode 100644 tmp/t/10121.md create mode 100644 tmp/t/10122.md create mode 100644 tmp/t/10123.md create mode 100644 tmp/t/10124.md create mode 100644 tmp/t/10125.md create mode 100644 tmp/t/10126.md create mode 100644 tmp/t/10128.md create mode 100644 tmp/t/10129.md create mode 100644 tmp/t/10130.md create mode 100644 tmp/t/10131.md create mode 100644 tmp/t/10132.md create mode 100644 tmp/t/10133.md create mode 100644 tmp/t/10134.md create mode 100644 tmp/t/10135.md create mode 100644 tmp/t/10136.md create mode 100644 tmp/t/10137.md create mode 100644 tmp/t/10138.md create mode 100644 tmp/t/10139.md create mode 100644 tmp/t/10140.md create mode 100644 tmp/t/10141.md create mode 100644 tmp/t/10142.md create mode 100644 tmp/t/10143.md create mode 100644 tmp/t/10144.md create mode 100644 tmp/t/10145.md create mode 100644 tmp/t/10146.md create mode 100644 tmp/t/10147.md create mode 100644 tmp/t/10148.md create mode 100644 tmp/t/10149.md create mode 100644 tmp/t/10150.md create mode 100644 tmp/t/10151.md create mode 100644 tmp/t/10152.md create mode 100644 tmp/t/10153.md create mode 100644 tmp/t/10154.md create mode 100644 tmp/t/10155.md create mode 100644 tmp/t/10156.md create mode 100644 tmp/t/10157.md create mode 100644 tmp/t/10158.md create mode 100644 tmp/t/10159.md create mode 100644 tmp/t/10160.md create mode 100644 tmp/t/10161.md create mode 100644 tmp/t/10162.md create mode 100644 tmp/t/10163.md create mode 100644 tmp/t/10164.md create mode 100644 tmp/t/10165.md create mode 100644 tmp/t/10166.md create mode 100644 tmp/t/10167.md create mode 100644 tmp/t/10168.md create mode 100644 tmp/t/10169.md create mode 100644 tmp/t/10170.md create mode 100644 tmp/t/10171.md create mode 100644 tmp/t/10172.md create mode 100644 tmp/t/10173.md create mode 100644 tmp/t/10174.md create mode 100644 tmp/t/10175.md create mode 100644 tmp/t/10176.md create mode 100644 tmp/t/10177.md create mode 100644 tmp/t/10178.md create mode 100644 tmp/t/10179.md create mode 100644 tmp/t/10180.md create mode 100644 tmp/t/10181.md create mode 100644 tmp/t/10182.md create mode 100644 tmp/t/10183.md create mode 100644 tmp/t/10184.md create mode 100644 tmp/t/10186.md create mode 100644 tmp/t/10187.md create mode 100644 tmp/t/10188.md create mode 100644 tmp/t/10189.md create mode 100644 tmp/t/10190.md create mode 100644 tmp/t/10191.md create mode 100644 tmp/t/10192.md create mode 100644 tmp/t/10193.md create mode 100644 tmp/t/10194.md create mode 100644 tmp/t/10195.md create mode 100644 tmp/t/10196.md create mode 100644 tmp/t/10197.md create mode 100644 tmp/t/10198.md create mode 100644 tmp/t/10199.md create mode 100644 tmp/t/10200.md create mode 100644 tmp/t/10201.md create mode 100644 tmp/t/10202.md create mode 100644 tmp/t/10203.md create mode 100644 tmp/t/10204.md create mode 100644 tmp/t/10205.md create mode 100644 tmp/t/10206.md create mode 100644 tmp/t/10207.md create mode 100644 tmp/t/10208.md create mode 100644 tmp/t/10209.md create mode 100644 tmp/t/10210.md create mode 100644 tmp/t/10211.md create mode 100644 tmp/t/10212.md create mode 100644 tmp/t/10213.md create mode 100644 tmp/t/10214.md create mode 100644 tmp/t/10215.md create mode 100644 tmp/t/10216.md create mode 100644 tmp/t/10217.md create mode 100644 tmp/t/10218.md create mode 100644 tmp/t/10219.md create mode 100644 tmp/t/10220.md create mode 100644 tmp/t/10221.md create mode 100644 tmp/t/10228.md create mode 100644 tmp/t/10229.md create mode 100644 tmp/t/10230.md create mode 100644 tmp/t/10231.md create mode 100644 tmp/t/10232.md create mode 100644 tmp/t/10233.md create mode 100644 tmp/t/10234.md create mode 100644 tmp/t/10235.md create mode 100644 tmp/t/10236.md create mode 100644 tmp/t/10237.md create mode 100644 tmp/t/10238.md create mode 100644 tmp/t/10239.md create mode 100644 tmp/t/10240.md create mode 100644 tmp/t/10241.md create mode 100644 tmp/t/10242.md create mode 100644 tmp/t/10243.md create mode 100644 tmp/t/10244.md create mode 100644 tmp/t/1033.md create mode 100644 tmp/t/1041.md create mode 100644 tmp/t/1051.md create mode 100644 tmp/t/1058.md create mode 100644 tmp/t/10583.md create mode 100644 tmp/t/10585.md create mode 100644 tmp/t/10586.md create mode 100644 tmp/t/10621.md create mode 100644 tmp/t/10622.md create mode 100644 tmp/t/10623.md create mode 100644 tmp/t/1063.md create mode 100644 tmp/t/10632.md create mode 100644 tmp/t/1065.md create mode 100644 tmp/t/1066.md create mode 100644 tmp/t/1073.md create mode 100644 tmp/t/10811.md create mode 100644 tmp/t/10813.md create mode 100644 tmp/t/1083.md create mode 100644 tmp/t/10835.md create mode 100644 tmp/t/10837.md create mode 100644 tmp/t/1084.md create mode 100644 tmp/t/1086.md create mode 100644 tmp/t/1087.md create mode 100644 tmp/t/1088.md create mode 100644 tmp/t/1093.md create mode 100644 tmp/t/1094.md create mode 100644 tmp/t/1095.md create mode 100644 tmp/t/1096.md create mode 100644 tmp/t/1097.md create mode 100644 tmp/t/1099.md create mode 100644 tmp/t/1100.md create mode 100644 tmp/t/11012.md create mode 100644 tmp/t/1102.md create mode 100644 tmp/t/11102.md create mode 100644 tmp/t/1111.md create mode 100644 tmp/t/1112.md create mode 100644 tmp/t/11125.md create mode 100644 tmp/t/11144.md create mode 100644 tmp/t/11148.md create mode 100644 tmp/t/11181.md create mode 100644 tmp/t/11182.md create mode 100644 tmp/t/11183.md create mode 100644 tmp/t/11184.md create mode 100644 tmp/t/112.md create mode 100644 tmp/t/11285.md create mode 100644 tmp/t/11290.md create mode 100644 tmp/t/11291.md create mode 100644 tmp/t/11298.md create mode 100644 tmp/t/11312.md create mode 100644 tmp/t/11313.md create mode 100644 tmp/t/11351.md create mode 100644 tmp/t/11413.md create mode 100644 tmp/t/11414.md create mode 100644 tmp/t/1145.md create mode 100644 tmp/t/1150.md create mode 100644 tmp/t/1155.md create mode 100644 tmp/t/1156.md create mode 100644 tmp/t/1157.md create mode 100644 tmp/t/1158.md create mode 100644 tmp/t/116.md create mode 100644 tmp/t/1162.md create mode 100644 tmp/t/1163.md create mode 100644 tmp/t/11672.md create mode 100644 tmp/t/1168.md create mode 100644 tmp/t/117.md create mode 100644 tmp/t/118.md create mode 100644 tmp/t/1184.md create mode 100644 tmp/t/1187.md create mode 100644 tmp/t/1194.md create mode 100644 tmp/t/11961.md create mode 100644 tmp/t/1199.md create mode 100644 tmp/t/11991.md create mode 100644 tmp/t/11998.md create mode 100644 tmp/t/12005.md create mode 100644 tmp/t/12012.md create mode 100644 tmp/t/12042.md create mode 100644 tmp/t/12128.md create mode 100644 tmp/t/12244.md create mode 100644 tmp/t/12246.md create mode 100644 tmp/t/12248.md create mode 100644 tmp/t/12281.md create mode 100644 tmp/t/12372.md create mode 100644 tmp/t/12435.md create mode 100644 tmp/t/12562.md create mode 100644 tmp/t/12588.md create mode 100644 tmp/t/12659.md create mode 100644 tmp/t/12676.md create mode 100644 tmp/t/12689.md create mode 100644 tmp/t/12690.md create mode 100644 tmp/t/12691.md create mode 100644 tmp/t/12731.md create mode 100644 tmp/t/12732.md create mode 100644 tmp/t/12733.md create mode 100644 tmp/t/12734.md create mode 100644 tmp/t/12818.md create mode 100644 tmp/t/13005.md create mode 100644 tmp/t/13019.md create mode 100644 tmp/t/13046.md create mode 100644 tmp/t/13070.md create mode 100644 tmp/t/13086.md create mode 100644 tmp/t/13089.md create mode 100644 tmp/t/13130.md create mode 100644 tmp/t/13132.md create mode 100644 tmp/t/13137.md create mode 100644 tmp/t/13146.md create mode 100644 tmp/t/13249.md create mode 100644 tmp/t/13276.md create mode 100644 tmp/t/13400.md create mode 100644 tmp/t/13403.md create mode 100644 tmp/t/13541.md create mode 100644 tmp/t/13557.md create mode 100644 tmp/t/13778.md create mode 100644 tmp/t/13779.md create mode 100644 tmp/t/13780.md create mode 100644 tmp/t/13781.md create mode 100644 tmp/t/13782.md create mode 100644 tmp/t/13783.md create mode 100644 tmp/t/13784.md create mode 100644 tmp/t/13785.md create mode 100644 tmp/t/13788.md create mode 100644 tmp/t/13789.md create mode 100644 tmp/t/13899.md create mode 100644 tmp/t/13926.md create mode 100644 tmp/t/13927.md create mode 100644 tmp/t/13953.md create mode 100644 tmp/t/13990.md create mode 100644 tmp/t/14018.md create mode 100644 tmp/t/14043.md create mode 100644 tmp/t/14056.md create mode 100644 tmp/t/14367.md create mode 100644 tmp/t/14532.md create mode 100644 tmp/t/146.md create mode 100644 tmp/t/14612.md create mode 100644 tmp/t/14936.md create mode 100644 tmp/t/14937.md create mode 100644 tmp/t/14983.md create mode 100644 tmp/t/15010.md create mode 100644 tmp/t/15011.md create mode 100644 tmp/t/15012.md create mode 100644 tmp/t/15017.md create mode 100644 tmp/t/15018.md create mode 100644 tmp/t/15211.md create mode 100644 tmp/t/15212.md create mode 100644 tmp/t/154.md create mode 100644 tmp/t/15405.md create mode 100644 tmp/t/15406.md create mode 100644 tmp/t/15621.md create mode 100644 tmp/t/15652.md create mode 100644 tmp/t/15653.md create mode 100644 tmp/t/15654.md create mode 100644 tmp/t/15684.md create mode 100644 tmp/t/15752.md create mode 100644 tmp/t/15753.md create mode 100644 tmp/t/15754.md create mode 100644 tmp/t/15808.md create mode 100644 tmp/t/15809.md create mode 100644 tmp/t/15888.md create mode 100644 tmp/t/15889.md create mode 100644 tmp/t/15892.md create mode 100644 tmp/t/1593.md create mode 100644 tmp/t/1729.md create mode 100644 tmp/t/2203.md create mode 100644 tmp/t/2999.md create mode 100644 tmp/t/3301.md create mode 100644 tmp/t/3341.md create mode 100644 tmp/t/3352.md create mode 100644 tmp/t/3723.md create mode 100644 tmp/t/3784.md create mode 100644 tmp/t/4058.md create mode 100644 tmp/t/4138.md create mode 100644 tmp/t/4449.md create mode 100644 tmp/t/4450.md create mode 100644 tmp/t/4451.md create mode 100644 tmp/t/4453.md create mode 100644 tmp/t/4454.md create mode 100644 tmp/t/4455.md create mode 100644 tmp/t/4457.md create mode 100644 tmp/t/4458.md create mode 100644 tmp/t/4459.md create mode 100644 tmp/t/4460.md create mode 100644 tmp/t/4461.md create mode 100644 tmp/t/4462.md create mode 100644 tmp/t/4463.md create mode 100644 tmp/t/4466.md create mode 100644 tmp/t/4467.md create mode 100644 tmp/t/4468.md create mode 100644 tmp/t/4474.md create mode 100644 tmp/t/4554.md create mode 100644 tmp/t/4614.md create mode 100644 tmp/t/4652.md create mode 100644 tmp/t/4776.md create mode 100644 tmp/t/4796.md create mode 100644 tmp/t/4837.md create mode 100644 tmp/t/4898.md create mode 100644 tmp/t/4988.md create mode 100644 tmp/t/5064.md create mode 100644 tmp/t/5132.md create mode 100644 tmp/t/5188.md create mode 100644 tmp/t/5199.md create mode 100644 tmp/t/5202.md create mode 100644 tmp/t/5212.md create mode 100644 tmp/t/5213.md create mode 100644 tmp/t/5313.md create mode 100644 tmp/t/5329.md create mode 100644 tmp/t/5334.md create mode 100644 tmp/t/5337.md create mode 100644 tmp/t/5348.md create mode 100644 tmp/t/5358.md create mode 100644 tmp/t/5364.md create mode 100644 tmp/t/5396.md create mode 100644 tmp/t/5451.md create mode 100644 tmp/t/5452.md create mode 100644 tmp/t/5454.md create mode 100644 tmp/t/5455.md create mode 100644 tmp/t/5456.md create mode 100644 tmp/t/5457.md create mode 100644 tmp/t/5459.md create mode 100644 tmp/t/5460.md create mode 100644 tmp/t/5461.md create mode 100644 tmp/t/5462.md create mode 100644 tmp/t/5464.md create mode 100644 tmp/t/5465.md create mode 100644 tmp/t/5466.md create mode 100644 tmp/t/5471.md create mode 100644 tmp/t/5476.md create mode 100644 tmp/t/5484.md create mode 100644 tmp/t/5520.md create mode 100644 tmp/t/5521.md create mode 100644 tmp/t/5522.md create mode 100644 tmp/t/5523.md create mode 100644 tmp/t/5524.md create mode 100644 tmp/t/5525.md create mode 100644 tmp/t/5527.md create mode 100644 tmp/t/5528.md create mode 100644 tmp/t/5547.md create mode 100644 tmp/t/5548.md create mode 100644 tmp/t/5593.md create mode 100644 tmp/t/5597.md create mode 100644 tmp/t/5645.md create mode 100644 tmp/t/5657.md create mode 100644 tmp/t/5666.md create mode 100644 tmp/t/5667.md create mode 100644 tmp/t/5668.md create mode 100644 tmp/t/5679.md create mode 100644 tmp/t/5683.md create mode 100644 tmp/t/5732.md create mode 100644 tmp/t/5740.md create mode 100644 tmp/t/5780.md create mode 100644 tmp/t/5781.md create mode 100644 tmp/t/5784.md create mode 100644 tmp/t/5886.md create mode 100644 tmp/t/5891.md create mode 100644 tmp/t/5892.md create mode 100644 tmp/t/5896.md create mode 100644 tmp/t/5938.md create mode 100644 tmp/t/5967.md create mode 100644 tmp/t/6006.md create mode 100644 tmp/t/6116.md create mode 100644 tmp/t/6119.md create mode 100644 tmp/t/6120.md create mode 100644 tmp/t/6121.md create mode 100644 tmp/t/6122.md create mode 100644 tmp/t/6123.md create mode 100644 tmp/t/6124.md create mode 100644 tmp/t/6125.md create mode 100644 tmp/t/6126.md create mode 100644 tmp/t/6127.md create mode 100644 tmp/t/6128.md create mode 100644 tmp/t/6129.md create mode 100644 tmp/t/6131.md create mode 100644 tmp/t/6132.md create mode 100644 tmp/t/6133.md create mode 100644 tmp/t/6134.md create mode 100644 tmp/t/6135.md create mode 100644 tmp/t/6136.md create mode 100644 tmp/t/6137.md create mode 100644 tmp/t/6138.md create mode 100644 tmp/t/6139.md create mode 100644 tmp/t/6140.md create mode 100644 tmp/t/6141.md create mode 100644 tmp/t/6142.md create mode 100644 tmp/t/6161.md create mode 100644 tmp/t/6174.md create mode 100644 tmp/t/6184.md create mode 100644 tmp/t/6185.md create mode 100644 tmp/t/6186.md create mode 100644 tmp/t/6187.md create mode 100644 tmp/t/6208.md create mode 100644 tmp/t/6209.md create mode 100644 tmp/t/6234.md create mode 100644 tmp/t/6361.md create mode 100644 tmp/t/6399.md create mode 100644 tmp/t/6450.md create mode 100644 tmp/t/6463.md create mode 100644 tmp/t/6464.md create mode 100644 tmp/t/6465.md create mode 100644 tmp/t/6466.md create mode 100644 tmp/t/6467.md create mode 100644 tmp/t/6468.md create mode 100644 tmp/t/6469.md create mode 100644 tmp/t/6470.md create mode 100644 tmp/t/6471.md create mode 100644 tmp/t/6472.md create mode 100644 tmp/t/6473.md create mode 100644 tmp/t/6474.md create mode 100644 tmp/t/6475.md create mode 100644 tmp/t/6476.md create mode 100644 tmp/t/6477.md create mode 100644 tmp/t/6478.md create mode 100644 tmp/t/6479.md create mode 100644 tmp/t/6480.md create mode 100644 tmp/t/6481.md create mode 100644 tmp/t/6482.md create mode 100644 tmp/t/6483.md create mode 100644 tmp/t/6484.md create mode 100644 tmp/t/6485.md create mode 100644 tmp/t/6488.md create mode 100644 tmp/t/6498.md create mode 100644 tmp/t/6499.md create mode 100644 tmp/t/6501.md create mode 100644 tmp/t/6559.md create mode 100644 tmp/t/6562.md create mode 100644 tmp/t/6577.md create mode 100644 tmp/t/6615.md create mode 100644 tmp/t/6636.md create mode 100644 tmp/t/6640.md create mode 100644 tmp/t/6641.md create mode 100644 tmp/t/6657.md create mode 100644 tmp/t/6659.md create mode 100644 tmp/t/6663.md create mode 100644 tmp/t/6664.md create mode 100644 tmp/t/6665.md create mode 100644 tmp/t/6680.md create mode 100644 tmp/t/6695.md create mode 100644 tmp/t/6702.md create mode 100644 tmp/t/6749.md create mode 100644 tmp/t/6835.md create mode 100644 tmp/t/6864.md create mode 100644 tmp/t/6877.md create mode 100644 tmp/t/6894.md create mode 100644 tmp/t/6930.md create mode 100644 tmp/t/6973.md create mode 100644 tmp/t/6974.md create mode 100644 tmp/t/7059.md create mode 100644 tmp/t/7064.md create mode 100644 tmp/t/7065.md create mode 100644 tmp/t/7066.md create mode 100644 tmp/t/7068.md create mode 100644 tmp/t/7113.md create mode 100644 tmp/t/7126.md create mode 100644 tmp/t/7128.md create mode 100644 tmp/t/7129.md create mode 100644 tmp/t/7130.md create mode 100644 tmp/t/7131.md create mode 100644 tmp/t/7132.md create mode 100644 tmp/t/7133.md create mode 100644 tmp/t/7134.md create mode 100644 tmp/t/7148.md create mode 100644 tmp/t/7150.md create mode 100644 tmp/t/7152.md create mode 100644 tmp/t/7153.md create mode 100644 tmp/t/7154.md create mode 100644 tmp/t/7157.md create mode 100644 tmp/t/7171.md create mode 100644 tmp/t/7180.md create mode 100644 tmp/t/7181.md create mode 100644 tmp/t/7183.md create mode 100644 tmp/t/7184.md create mode 100644 tmp/t/7186.md create mode 100644 tmp/t/7191.md create mode 100644 tmp/t/7192.md create mode 100644 tmp/t/7193.md create mode 100644 tmp/t/7194.md create mode 100644 tmp/t/7195.md create mode 100644 tmp/t/7218.md create mode 100644 tmp/t/7219.md create mode 100644 tmp/t/7221.md create mode 100644 tmp/t/7286.md create mode 100644 tmp/t/7287.md create mode 100644 tmp/t/7310.md create mode 100644 tmp/t/7311.md create mode 100644 tmp/t/7319.md create mode 100644 tmp/t/7329.md create mode 100644 tmp/t/7330.md create mode 100644 tmp/t/7331.md create mode 100644 tmp/t/7332.md create mode 100644 tmp/t/7379.md create mode 100644 tmp/t/7388.md create mode 100644 tmp/t/7401.md create mode 100644 tmp/t/7406.md create mode 100644 tmp/t/7414.md create mode 100644 tmp/t/7433.md create mode 100644 tmp/t/7530.md create mode 100644 tmp/t/7592.md create mode 100644 tmp/t/7814.md create mode 100644 tmp/t/7933.md create mode 100644 tmp/t/7934.md create mode 100644 tmp/t/8047.md create mode 100644 tmp/t/8701.md create mode 100644 tmp/t/8702.md create mode 100644 tmp/t/8723.md create mode 100644 tmp/t/8793.md create mode 100644 tmp/t/8819.md create mode 100644 tmp/t/8873.md create mode 100644 tmp/t/8874.md create mode 100644 tmp/t/8890.md create mode 100644 tmp/t/8896.md create mode 100644 tmp/t/9151.md create mode 100644 tmp/t/9836.md create mode 100644 tmp/t/9839.md diff --git a/tmp/t/10045.md b/tmp/t/10045.md new file mode 100644 index 000000000..5728621b0 --- /dev/null +++ b/tmp/t/10045.md @@ -0,0 +1,207 @@ +system | 2024-09-16 15:52:45 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +# Index +0. [actions](/t/10069) +1. [add-cloud](/t/10162) +2. [add-credential](/t/10136) +3. [add-k8s](/t/10049) +4. [add-machine](/t/10071) +5. [add-model](/t/10145) +6. [add-secret](/t/11144) +7. [add-secret-backend](/t/10062) +8. [add-space](/t/10117) +9. [add-ssh-key](/t/10238) +10. [add-storage](/t/10159) +11. [add-unit](/t/10141) +12. [add-user](/t/10193) +13. [agree](/t/10161) +14. [agreements](/t/10064) +15. [attach-resource](/t/10124) +16. [attach-storage](/t/10126) +17. [autoload-credentials](/t/10230) +18. [bind](/t/10244) +19. [bootstrap](/t/10132) +20. [cancel-task](/t/10053) +21. [change-user-password](/t/10118) +22. [charm-resources](/t/10099) +23. [clouds](/t/10182) +24. [collect-metrics](/t/10085) +25. [config](/t/10139) +26. [constraints](/t/10060) +27. [consume](/t/10213) +28. [controller-config](/t/10237) +29. [controllers](/t/10152) +30. [create-backup](/t/10197) +31. [create-storage-pool](/t/10093) +32. [credentials](/t/10054) +33. [dashboard](/t/10091) +34. [debug-code](/t/10048) +35. [debug-hook](/t/10229) +36. [debug-hooks](/t/10242) +37. [debug-log](/t/10116) +38. [default-credential](/t/10055) +39. [default-region](/t/10082) +40. [deploy](/t/10074) +41. [destroy-controller](/t/10113) +42. [destroy-model](/t/10190) +43. [detach-storage](/t/10089) +44. [diff-bundle](/t/10142) +45. [disable-command](/t/10205) +46. [disable-user](/t/10198) +47. [disabled-commands](/t/10220) +49. [download](/t/10134) +50. [download-backup](/t/10240) +51. [enable-command](/t/10111) +52. [enable-destroy-controller](/t/10086) +53. [enable-ha](/t/10206) +54. [enable-user](/t/10241) +55. [exec](/t/10195) +56. [export-bundle](/t/10046) +57. [expose](/t/10109) +58. [find](/t/10187) +59. [find-offers](/t/10097) +60. [firewall-rules](/t/10061) +61. [grant](/t/10196) +62. [grant-cloud](/t/10164) +63. [grant-secret](/t/11290) +65. [help-tool](/t/10050) +66. [import-filesystem](/t/10047) +67. [import-ssh-key](/t/10167) +68. [info](/t/10103) +69. [integrate](/t/10207) +70. [kill-controller](/t/10233) +71. [list-actions](/t/10232) +72. [list-agreements](/t/10200) +73. [list-charm-resources](/t/10234) +74. [list-clouds](/t/10199) +75. [list-controllers](/t/10079) +76. [list-credentials](/t/10150) +77. [list-disabled-commands](/t/10094) +78. [list-firewall-rules](/t/10114) +79. [list-machines](/t/10181) +80. [list-models](/t/10107) +81. [list-offers](/t/10170) +82. [list-operations](/t/10158) +83. [list-payloads](/t/10070) +84. [list-regions](/t/10131) +85. [list-resources](/t/10056) +86. [list-secret-backends](/t/10072) +87. [list-secrets](/t/10108) +88. [list-spaces](/t/10100) +89. [list-ssh-keys](/t/10133) +90. [list-storage](/t/10138) +91. [list-storage-pools](/t/10211) +92. [list-subnets](/t/10076) +93. [list-users](/t/10154) +94. [login](/t/10157) +95. [logout](/t/10183) +96. [machines](/t/10078) +97. [metrics](/t/10143) +98. [migrate](/t/10121) +99. [model-config](/t/10096) +100. [model-constraints](/t/10137) +101. [model-default](/t/10178) +102. [model-defaults](/t/10057) +103. [models](/t/10090) +104. [move-to-space](/t/10192) +105. [offer](/t/10080) +106. [offers](/t/10051) +107. [operations](/t/10203) +108. [payloads](/t/10120) +109. [refresh](/t/10189) +110. [regions](/t/10112) +111. [register](/t/10160) +112. [relate](/t/10140) +113. [reload-spaces](/t/10063) +114. [remove-application](/t/10067) +115. [remove-cloud](/t/10216) +116. [remove-credential](/t/10201) +117. [remove-k8s](/t/10098) +118. [remove-machine](/t/10163) +119. [remove-offer](/t/10235) +120. [remove-relation](/t/10110) +121. [remove-saas](/t/10087) +122. [remove-secret](/t/11414) +123. [remove-secret-backend](/t/10194) +124. [remove-space](/t/10084) +125. [remove-ssh-key](/t/10119) +126. [remove-storage](/t/10066) +127. [remove-storage-pool](/t/10068) +128. [remove-unit](/t/10125) +129. [remove-user](/t/10130) +130. [rename-space](/t/10135) +131. [resolve](/t/10146) +132. [resolved](/t/10144) +133. [resources](/t/10218) +134. [resume-relation](/t/10123) +135. [retry-provisioning](/t/10209) +136. [revoke](/t/10077) +137. [revoke-cloud](/t/10104) +138. [revoke-secret](/t/11291) +139. [run](/t/10052) +140. [scale-application](/t/10171) +141. [scp](/t/10128) +142. [secret-backends](/t/10149) +143. [secrets](/t/10214) +144. [set-application-base](/t/10174) +145. [set-constraints](/t/10210) +146. [set-credential](/t/10169) +147. [set-default-credentials](/t/10180) +148. [set-default-region](/t/10092) +149. [set-firewall-rule](/t/10151) +150. [set-meter-status](/t/10166) +151. [set-model-constraints](/t/10208) +152. [show-action](/t/10219) +153. [show-application](/t/10177) +154. [show-cloud](/t/10215) +155. [show-controller](/t/10156) +156. [show-credential](/t/10105) +157. [show-credentials](/t/10147) +158. [show-machine](/t/10243) +159. [show-model](/t/10191) +160. [show-offer](/t/10168) +161. [show-operation](/t/10083) +162. [show-secret](/t/10172) +163. [show-secret-backend](/t/10059) +164. [show-space](/t/10095) +165. [show-status-log](/t/10204) +166. [show-storage](/t/10184) +167. [show-task](/t/10129) +168. [show-unit](/t/10239) +169. [show-user](/t/10212) +170. [spaces](/t/10236) +171. [ssh](/t/10153) +172. [ssh-keys](/t/10202) +173. [status](/t/10173) +174. [storage](/t/10075) +175. [storage-pools](/t/10228) +176. [subnets](/t/10186) +177. [suspend-relation](/t/10179) +178. [switch](/t/10102) +179. [sync-agent-binary](/t/10106) +180. [trust](/t/10088) +181. [unexpose](/t/10221) +182. [unregister](/t/10165) +183. [update-cloud](/t/10081) +184. [update-credential](/t/10065) +185. [update-credentials](/t/10231) +186. [update-k8s](/t/10155) +187. [update-public-clouds](/t/10115) +188. [update-secret](/t/11413) +189. [update-secret-backend](/t/10176) +190. [update-storage-pool](/t/10217) +191. [upgrade-controller](/t/10058) +192. [upgrade-machine](/t/10188) +193. [upgrade-model](/t/10073) +194. [users](/t/10175) +196. [wait-for](/t/10122) +197. [whoami](/t/10148) +--- + +------------------------- + diff --git a/tmp/t/10046.md b/tmp/t/10046.md new file mode 100644 index 000000000..88607e0c4 --- /dev/null +++ b/tmp/t/10046.md @@ -0,0 +1,44 @@ +system | 2024-09-16 15:57:20 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Exports the current model configuration as a reusable bundle. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--filename` | | Bundle file | +| `--include-charm-defaults` | false | Whether to include charm config default values in the exported bundle | +| `--include-series` | false | Comaptibility option. Set to include series in the bundle alongside bases | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju export-bundle + juju export-bundle --filename mymodel.yaml + juju export-bundle --include-charm-defaults + juju export-bundle --include-series + + +## Details + +Exports the current model configuration as a reusable bundle. + +If --filename is not used, the configuration is printed to stdout. + --filename specifies an output file. + +If --include-series is used, the exported bundle will include the OS series + alongside bases. This should be used as a compatibility option for older + versions of Juju before bases were added. + + +--- + +------------------------- + diff --git a/tmp/t/10047.md b/tmp/t/10047.md new file mode 100644 index 000000000..bfdb73187 --- /dev/null +++ b/tmp/t/10047.md @@ -0,0 +1,57 @@ +system | 2024-09-16 15:55:53 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [storage](/t/10075) + +## Summary +Imports a filesystem into the model. + +## Usage +```juju import-filesystem [options] + +``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +Import an existing filesystem backed by an EBS volume, +and assign it the "pgdata" storage name. Juju will +associate a storage instance ID like "pgdata/0" with +the volume and filesystem contained within. + + juju import-filesystem ebs vol-123456 pgdata + + + +## Details + +Import an existing filesystem into the model. This will lead to the model +taking ownership of the storage, so you must take care not to import storage +that is in use by another Juju model. + +To import a filesystem, you must specify three things: + + - the storage provider which manages the storage, and with + which the storage will be associated + - the storage provider ID for the filesystem, or + volume that backs the filesystem + - the storage name to assign to the filesystem, + corresponding to the storage name used by a charm + +Once a filesystem is imported, Juju will create an associated storage +instance using the given storage name. + + +--- + +------------------------- + diff --git a/tmp/t/10048.md b/tmp/t/10048.md new file mode 100644 index 000000000..2ba22eb1a --- /dev/null +++ b/tmp/t/10048.md @@ -0,0 +1,76 @@ +system | 2024-09-16 15:57:41 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [ssh](/t/10153), [debug-hooks](/t/10242) + +## Summary +Launch a tmux session to debug hooks and/or actions. + +## Usage +```juju debug-code [options] [hook or action names]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--at` | all | will set the JUJU_DEBUG_AT environment variable to this value, which will then be interpreted by the charm for where you want to stop, defaults to 'all' | +| `--container` | | the container name of the target pod | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-host-key-checks` | false | Skip host key checking (INSECURE) | +| `--proxy` | false | Proxy through the API server | +| `--pty` | <auto> | Enable pseudo-tty allocation | +| `--remote` | false | Target on the workload or operator pod (k8s-only) | + +## Examples + +Debug all hooks and actions of unit '0': + + juju debug-code mysql/0 + +Debug all hooks and actions of the leader: + + juju debug-code mysql/leader + +Debug the 'config-changed' hook of unit '1': + + juju debug-code mysql/1 config-changed + +Debug the 'pull-site' action and 'update-status' hook: + + juju debug-code hello-kubecon/0 pull-site update-status + +Debug the 'leader-elected' hook and set 'JUJU_DEBUG_AT' variable to 'hook': + + juju debug-code --at=hook mysql/0 leader-elected + + +## Details + +The command launches a tmux session that will intercept matching hooks and/or +actions. + +Initially, the tmux session will take you to '/var/lib/juju' or '/home/ubuntu'. +As soon as a matching hook or action is fired, the hook or action is executed +and the JUJU_DEBUG_AT variable is set. Charms implementing support for this +should set debug breakpoints based on the environment variable. Charms written +with the Charmed Operator Framework Ops automatically provide support for this. + +For more details on debugging charm code, see the charm SDK documentation. + +Valid unit identifiers are: + a standard unit ID, such as mysql/0 or; + leader syntax of the form <application>/leader, such as mysql/leader. + +If no hook or action is specified, all hooks and actions will be intercepted. + +See the "juju help ssh" for information about SSH related options +accepted by the debug-code command. + + +--- + +------------------------- + diff --git a/tmp/t/10049.md b/tmp/t/10049.md new file mode 100644 index 000000000..5af69b79c --- /dev/null +++ b/tmp/t/10049.md @@ -0,0 +1,162 @@ +system | 2024-09-16 15:52:51 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [remove-k8s](/t/10098) + +## Summary +Adds a k8s endpoint and credential to Juju. + +## Usage +```juju add-k8s [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--cloud` | | k8s cluster cloud | +| `--cluster-name` | | Specify the k8s cluster to import | +| `--context-name` | | Specify the k8s context to import | +| `--credential` | | the credential to use when accessing the cluster | +| `--region` | | k8s cluster region or cloud/region | +| `--skip-storage` | false | used when adding a cluster that doesn't have storage | +| `--storage` | | k8s storage class for workload storage | + +## Examples + +When your kubeconfig file is in the default location: + + juju add-k8s myk8scloud + juju add-k8s myk8scloud --client + juju add-k8s myk8scloud --controller mycontroller + juju add-k8s --context-name mycontext myk8scloud + juju add-k8s myk8scloud --region cloudNameOrCloudType/someregion + juju add-k8s myk8scloud --cloud cloudNameOrCloudType + juju add-k8s myk8scloud --cloud cloudNameOrCloudType --region=someregion + juju add-k8s myk8scloud --cloud cloudNameOrCloudType --storage mystorageclass + +To add a Kubernetes cloud using data from your kubeconfig file, when this file is not in the default location: + + KUBECONFIG=path-to-kubeconfig-file juju add-k8s myk8scloud --cluster-name=my_cluster_name + +To add a Kubernetes cloud using data from kubectl, when your kubeconfig file is not in the default location: + + kubectl config view --raw | juju add-k8s myk8scloud --cluster-name=my_cluster_name + + + +## Details + +Creates a user-defined cloud based on a k8s cluster. + +The new k8s cloud can then be used to bootstrap into, or it +can be added to an existing controller. + +Use --controller option to add k8s cloud to a controller. +Use --client option to add k8s cloud to this client. + +Specify a non default kubeconfig file location using $KUBECONFIG +environment variable or pipe in file content from stdin. + +The config file can contain definitions for different k8s clusters, +use --cluster-name to pick which one to use. +It's also possible to select a context by name using --context-name. + +When running add-k8s the underlying cloud/region hosting the cluster needs to be +detected to enable storage to be correctly configured. If the cloud/region cannot +be detected automatically, use either + --cloud <cloudType|cloudName> to specify the host cloud +or + --region <cloudType|cloudName>/<someregion> to specify the host + cloud type and region. + +Region is strictly necessary only when adding a k8s cluster to a JAAS controller. +When using a standalone Juju controller, usually just --cloud is required. + +Once Juju is aware of the underlying cloud type, it looks for a suitably configured +storage class to provide operator and workload storage. If none is found, use +of the --storage option is required so that Juju will create a storage class +with the specified name. + +If the cluster does not have a storage provisioning capability, use the +--skip-storage option to add the cluster without any workload storage configured. + + + +--- + +------------------------- + +sed-i | 2024-04-11 21:02:21 UTC | #2 + +[quote="system, post:1, topic:10049"] +Use --controller option to add k8s cloud to a controller. Use --client option to add k8s cloud to this client. +[/quote] + +Could you please elaborate on what it means to "add k8s cloud to a controller"? +I thought it's impossible to bootstrap a controller without a cloud, and that a controller can have only one cloud associated with it. + +------------------------- + +tmihoc | 2024-04-12 05:11:36 UTC | #3 + +Adding a cloud (whether a machine cloud, via `add-cloud`, or a Kubernetes cloud, via `add-K8s`) to a controller (by using the `--controller` flag) results in what is called a 'multi-cloud' controller. It's a possibility that can be used to minimize the cloud resources used to maintain a controller plus, in the case of adding a Kubernetes cloud to another cloud, to take advantage, e.g., of the fact that a controller on a machine cloud can be made highly available whereas a controller on a Kubernetes cloud currently cannot. We mention this in our How to guide on how to add a cloud. + +------------------------- + +birru2 | 2024-10-07 10:06:43 UTC | #4 + +It seems it is not working when we have a config file for a broken microk8s cluster in the ".kube/config". + +It is still trying to communicate with the API server of the broken microk8s cluster: + +``` +$ KUBECONFIG=new-microk8s.yaml juju add-k8s microk8s-new --debug +08:29:07 INFO juju.cmd supercommand.go:56 running juju [3.5.4 31b4b0914740b84ad8166993635ac797a44276de gc go1.23.1] +08:29:07 DEBUG juju.cmd supercommand.go:57 args: []string{"/snap/juju/28520/bin/juju", "add-k8s", "microk8s-new", "--debug"} +08:29:07 DEBUG juju.environs.tools build.go:123 looking for: /snap/juju/28520/bin/juju +08:29:07 DEBUG juju.environs.tools versionfile.go:54 looking for sha256 86e52c09e852fb74e18905d90385b53e23af6533a868e301bd717cbbb8bd01da +08:29:07 DEBUG juju.kubernetes.provider detectcloud.go:33 failed to query local microk8s: "/var/snap/juju/28520/microk8s/credentials/client.config" does not exist: juju "3.5.4" can only work with strictly confined microk8s +ERROR making juju admin credentials in cluster: ensuring cluster role "juju-credential-1de4d752" in namespace "kube-system": Get "https://10.149.16.44:16443/apis/rbac.authorization.k8s.io/v1/clusterroles/juju-credential-1de4d752": dial tcp 10.149.16.44:16443: connect: no route to host +08:29:10 DEBUG cmd supercommand.go:549 error stack: +Get "https://10.149.16.44:16443/apis/rbac.authorization.k8s.io/v1/clusterroles/juju-credential-1de4d752": dial tcp 10.149.16.44:16443: connect: no route to host +github.com/juju/juju/caas/kubernetes/clientconfig.getOrCreateClusterRole:167: +github.com/juju/juju/caas/kubernetes/clientconfig.ensureJujuAdminServiceAccount:88: ensuring cluster role "juju-credential-1de4d752" in namespace "kube-system" +github.com/juju/juju/cmd/juju/caas.(*AddCAASCommand).Run:509: making juju admin credentials in cluster +``` + +In the new-microk8s.yaml file I have the server address as "https://10.149.16.72:16443": + +``` +$ cat new-microk8s.yaml | grep server + server: https://10.149.16.72:16443 +``` + +It might be related to this one: https://github.com/canonical/bundle-kubeflow/issues/830 + +But I could not add the new cloud after implementing the workaround mentioned [here](https://github.com/canonical/bundle-kubeflow/issues/830#issuecomment-2241472145) + +I am getting the following error: + +``` +$ KUBECONFIG=new-microk8s.yaml juju add-k8s new-test --debug +10:06:12 INFO juju.cmd supercommand.go:56 running juju [3.5.4 31b4b0914740b84ad8166993635ac797a44276de gc go1.23.1] +10:06:12 DEBUG juju.cmd supercommand.go:57 args: []string{"/snap/juju/28520/bin/juju", "add-k8s", "new-test", "--debug"} +10:06:12 DEBUG juju.environs.tools build.go:123 looking for: /snap/juju/28520/bin/juju +10:06:13 DEBUG juju.environs.tools versionfile.go:54 looking for sha256 86e52c09e852fb74e18905d90385b53e23af6533a868e301bd717cbbb8bd01da +10:06:13 DEBUG juju.kubernetes.provider detectcloud.go:33 failed to query local microk8s: microk8s is not installed: kubernetes cluster "microk8s-cluster" not found not found +ERROR making juju admin credentials in cluster: ensuring cluster role "juju-credential-bd8efe10" in namespace "kube-system": Unauthorized +10:06:13 DEBUG cmd supercommand.go:549 error stack: +Unauthorized +github.com/juju/juju/caas/kubernetes/clientconfig.getOrCreateClusterRole:167: +github.com/juju/juju/caas/kubernetes/clientconfig.ensureJujuAdminServiceAccount:88: ensuring cluster role "juju-credential-bd8efe10" in namespace "kube-system" +github.com/juju/juju/cmd/juju/caas.(*AddCAASCommand).Run:509: making juju admin credentials in cluster +``` + +------------------------- + diff --git a/tmp/t/10050.md b/tmp/t/10050.md new file mode 100644 index 000000000..4bee8692e --- /dev/null +++ b/tmp/t/10050.md @@ -0,0 +1,84 @@ +system | 2024-09-16 15:54:56 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [help](/t/10185) + +## Summary +Show help on a Juju charm hook tool. + +## Usage +```juju help-tool [options] [tool]``` + +## Examples + +For help on a specific tool, supply the name of that tool, for example: + + juju help-tool unit-get + + +## Details + +Juju charms can access a series of built-in helpers called 'hook-tools'. +These are useful for the charm to be able to inspect its running environment. +Currently available charm hook tools are: + + action-fail Set action fail status with message. + action-get Get action parameters. + action-log Record a progress message for the current action. + action-set Set action results. + add-metric Add metrics. + application-version-set Specify which version of the application is deployed. + close-port Register a request to close a port or port range. + config-get Print application configuration. + credential-get Access cloud credentials. + goal-state Print the status of the charm's peers and related units. + is-leader Print application leadership status. + juju-log Write a message to the juju log. + juju-reboot Reboot the host machine. + k8s-raw-get Get k8s raw spec information. + k8s-raw-set Set k8s raw spec information. + k8s-spec-get Get k8s spec information. + k8s-spec-set Set k8s spec information. + leader-get Print application leadership settings. + leader-set Write application leadership settings. + network-get Get network config. + open-port Register a request to open a port or port range. + opened-ports List all ports or port ranges opened by the unit. + payload-register Register a charm payload with Juju. + payload-status-set Update the status of a payload. + payload-unregister Stop tracking a payload. + pod-spec-get Get k8s spec information. (deprecated) + pod-spec-set Set k8s spec information. (deprecated) + relation-get Get relation settings. + relation-ids List all relation IDs for the given endpoint. + relation-list List relation units. + relation-set Set relation settings. + resource-get Get the path to the locally cached resource file. + secret-add Add a new secret. + secret-get Get the content of a secret. + secret-grant Grant access to a secret. + secret-ids Print secret IDs. + secret-info-get Get a secret's metadata info. + secret-remove Remove an existing secret. + secret-revoke Revoke access to a secret. + secret-set Update an existing secret. + state-delete Delete server-side-state key value pairs. + state-get Print server-side-state value. + state-set Set server-side-state values. + status-get Print status information. + status-set Set status information. + storage-add Add storage instances. + storage-get Print information for the storage instance with the specified ID. + storage-list List storage attached to the unit. + unit-get Print public-address or private-address. + + + +--- + +------------------------- + diff --git a/tmp/t/10051.md b/tmp/t/10051.md new file mode 100644 index 000000000..d1efdb990 --- /dev/null +++ b/tmp/t/10051.md @@ -0,0 +1,65 @@ +system | 2024-09-16 15:56:57 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [find-offers](/t/10097), [show-offer](/t/10168) + +## Summary +Lists shared endpoints. + +## Usage +```juju offers [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--active-only` | false | only return results where the offer is in use | +| `--allowed-consumer` | | return results where the user is allowed to consume the offer | +| `--application` | | return results matching the application | +| `--connected-user` | | return results where the user has a connection to the offer | +| `--format` | tabular | Specify output format (json|summary|tabular|yaml) | +| `--interface` | | return results matching the interface name | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju offers + juju offers -m model + juju offers --interface db2 + juju offers --application mysql + juju offers --connected-user fred + juju offers --allowed-consumer mary + juju offers hosted-mysql + juju offers hosted-mysql --active-only + + +## Details + +List information about applications' endpoints that have been shared and who is connected. + +The default tabular output shows each user connected (relating to) the offer, and the +relation id of the relation. + +The summary output shows one row per offer, with a count of active/total relations. + +The YAML output shows additional information about the source of connections, including +the source model UUID. + +The output can be filtered by: + - interface: the interface name of the endpoint + - application: the name of the offered application + - connected user: the name of a user who has a relation to the offer + - allowed consumer: the name of a user allowed to consume the offer + - active only: only show offers which are in use (are related to) + + + +--- + +------------------------- + diff --git a/tmp/t/10052.md b/tmp/t/10052.md new file mode 100644 index 000000000..b2277132c --- /dev/null +++ b/tmp/t/10052.md @@ -0,0 +1,91 @@ +system | 2024-09-16 15:55:24 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [operations](/t/10203), [show-operation](/t/10083), [show-task](/t/10129) + +## Summary +Run an action on a specified unit. + +## Usage +```juju run [options] [ ...] [= [[. ...]=]]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--background` | false | Run the task in the background | +| `--color` | false | Use ANSI color codes in output | +| `--format` | plain | Specify output format (json|plain|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-color` | false | Disable ANSI color codes in output | +| `-o`, `--output` | | Specify an output file | +| `--params` | | Path to yaml-formatted params file | +| `--string-args` | false | Use raw string values of CLI args | +| `--utc` | false | Show times in UTC | +| `--wait` | 0s | Maximum wait time for a task to complete | + +## Examples + + juju run mysql/3 backup --background + juju run mysql/3 backup --wait=2m + juju run mysql/3 backup --format yaml + juju run mysql/3 backup --utc + juju run mysql/3 backup + juju run mysql/leader backup + juju show-operation + juju run mysql/3 backup --params parameters.yml + juju run mysql/3 backup out=out.tar.bz2 file.kind=xz file.quality=high + juju run mysql/3 backup --params p.yml file.kind=xz file.quality=high + juju run sleeper/0 pause time=1000 + juju run sleeper/0 pause --string-args time=1000 + + +## Details + +Run a charm action for execution on the given unit(s), with a given set of params. +An ID is returned for use with 'juju show-operation <ID>'. + +A action executed on a given unit becomes a task with an ID that can be +used with 'juju show-task <ID>'. + +Running an action returns the overall operation ID as well as the individual +task ID(s) for each unit. + +To queue a action to be run in the background without waiting for it to finish, +use the --background option. + +To set the maximum time to wait for a action to complete, use the --wait option. + +By default, a single action will output its failure message if the action fails, +followed by any results set by the action. For multiple actions, each action's +results will be printed with the action id and action status. To see more detailed +information about run timings etc, use --format yaml. + +Valid unit identifiers are: + a standard unit ID, such as mysql/0 or; + leader syntax of the form <application>/leader, such as mysql/leader. + +If the leader syntax is used, the leader unit for the application will be +resolved before the action is enqueued. + +Params are validated according to the charm for the unit's application. The +valid params can be seen using "juju actions <application> --schema". +Params may be in a yaml file which is passed with the --params option, or they +may be specified by a key.key.key...=value format (see examples below.) + +Params given in the CLI invocation will be parsed as YAML unless the +--string-args option is set. This can be helpful for values such as 'y', which +is a boolean true in YAML. + +If --params is passed, along with key.key...=value explicit arguments, the +explicit arguments will override the parameter file. + + +--- + +------------------------- + diff --git a/tmp/t/10053.md b/tmp/t/10053.md new file mode 100644 index 000000000..6f3a39e0f --- /dev/null +++ b/tmp/t/10053.md @@ -0,0 +1,42 @@ +system | 2024-09-16 15:54:22 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [show-task](/t/10129) + +## Summary +Cancel pending or running tasks. + +## Usage +```juju cancel-task [options] (|) [...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + +To cancel a task by ID: + + juju cancel-task 1 + +To cancel multiple tasks by ID: + + juju cancel-task 1 2 3 + + +## Details + +Cancel pending or running tasks matching given IDs or partial ID prefixes. + +--- + +------------------------- + diff --git a/tmp/t/10054.md b/tmp/t/10054.md new file mode 100644 index 000000000..3bb093e06 --- /dev/null +++ b/tmp/t/10054.md @@ -0,0 +1,85 @@ +system | 2024-09-16 15:55:01 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136), [update-credential](/t/10065), [remove-credential](/t/10201), [default-credential](/t/10055), [autoload-credentials](/t/10230), [show-credential](/t/10105) + +## Summary +Lists Juju credentials for a cloud. + +## Usage +```juju credentials [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--show-secrets` | false | Show secrets, applicable to yaml or json formats only | + +## Examples + + juju credentials + juju credentials aws + juju credentials aws --client + juju credentials --format yaml --show-secrets + juju credentials --controller mycontroller + juju credentials --controller mycontroller --client + + +## Details +This command list credentials from this client and credentials +from a controller. + +Locally stored credentials are client specific and +are used with `juju bootstrap` +and `juju add-model`. It's paramount to understand that +different client devices may have different locally stored credentials +for the same user. + +Remotely stored credentials or controller stored credentials are +stored on the controller. + +An arbitrary "credential name" is used to represent credentials, which are +added either via `juju add-credential` or `juju autoload-credentials`. +Note that there can be multiple sets of credentials and, thus, multiple +names. + +Actual authentication material is exposed with the '--show-secrets' +option in json or yaml formats. Secrets are not shown in tabular format. + +A controller, and subsequently created models, can be created with a +different set of credentials but any action taken within the model (e.g.: +`juju deploy`; `juju add-unit`) applies the credential used +to create that model. This model credential is stored on the controller. + +A credential for 'controller' model is determined at bootstrap time and +will be stored on the controller. It is considered to be controller default. + +Recall that when a controller is created a 'default' model is also +created. This model will use the controller default credential. +To see details of your credentials use "juju show-credential" command. + +When adding a new model, Juju will reuse the controller default credential. +To add a model that uses a different credential, specify a credential +from this client using --credential option. See `juju help add-model` +for more information. + +Credentials denoted with an asterisk '*' are currently set as the user default +for a given cloud. + +Use --controller option to list credentials from a controller. + +Use --client option to list credentials known locally on this client. + + +--- + +------------------------- + diff --git a/tmp/t/10055.md b/tmp/t/10055.md new file mode 100644 index 000000000..366ee4fb4 --- /dev/null +++ b/tmp/t/10055.md @@ -0,0 +1,48 @@ +system | 2024-09-16 15:51:43 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [credentials](/t/10054), [add-credential](/t/10136), [remove-credential](/t/10201), [autoload-credentials](/t/10230) + +## Summary +Sets local default credentials for a cloud on this client. + +## Usage +```juju default-credential [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--reset` | false | Reset default credential for the cloud | + +## Examples + + juju default-credential google credential_name + juju default-credential google + juju default-credential google --reset + + +## Details +The default credentials are specified with a "credential name". + +A credential name is created during the process of adding credentials either +via `juju add-credential` or `juju autoload-credentials`. +Credential names can be listed with `juju credentials`. + +This command sets a locally stored credential to be used as a default. +Default credentials avoid the need to specify a particular set of +credentials when more than one are available for a given cloud. + +To unset previously set default credential for a cloud, use --reset option. + +To view currently set default credential for a cloud, use the command +without a credential name argument. + + +--- + +------------------------- + diff --git a/tmp/t/10056.md b/tmp/t/10056.md new file mode 100644 index 000000000..b14d2d2b4 --- /dev/null +++ b/tmp/t/10056.md @@ -0,0 +1,51 @@ +system | 2024-09-16 15:51:26 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [attach-resource](/t/10124), [charm-resources](/t/10099) +**Alias:** resources + +## Summary +Show the resources for an application or unit. + +## Usage +```juju list-resources [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--details` | false | show detailed information about resources used by each unit. | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + +To list resources for an application: + + juju resources mysql + +To list resources for a unit: + + juju resources mysql/0 + +To show detailed information about resources used by a unit: + + juju resources mysql/0 --details + + +## Details + +This command shows the resources required by and those in use by an existing +application or unit in your model. When run for an application, it will also show any +updates available for resources from a store. + + +--- + +------------------------- + diff --git a/tmp/t/10057.md b/tmp/t/10057.md new file mode 100644 index 000000000..faba3a482 --- /dev/null +++ b/tmp/t/10057.md @@ -0,0 +1,127 @@ +system | 2024-09-16 15:56:55 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [models](/t/10090), [model-config](/t/10096) + +## Summary +Displays or sets default configuration settings for new models. + +## Usage +```juju model-defaults [options] [[<=value>] ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--cloud` | | The cloud to target | +| `--color` | false | Use ANSI color codes in output | +| `--file` | | path to yaml-formatted configuration file | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `--ignore-read-only-fields` | false | Ignore read only fields that might cause errors to be emitted while processing yaml documents | +| `--no-color` | false | Disable ANSI color codes in tabular output | +| `-o`, `--output` | | Specify an output file | +| `--region` | | The region or cloud/region to target | +| `--reset` | | Reset the provided comma delimited keys | + +## Examples + +Display all model config default values: + + juju model-defaults + +Display the value of http-proxy model config default: + + juju model-defaults http-proxy + +Display the value of http-proxy model config default for the aws cloud: + + juju model-defaults --cloud=aws http-proxy + +Display the value of http-proxy model config default for the aws cloud +and us-east-1 region: + + juju model-defaults --region=aws/us-east-1 http-proxy + +Display the value of http-proxy model config default for the us-east-1 region: + + juju model-defaults --region=us-east-1 http-proxy + +Set the value of ftp-proxy model config default to 10.0.0.1:8000: + + juju model-defaults ftp-proxy=10.0.0.1:8000 + +Set the value of ftp-proxy model config default to 10.0.0.1:8000 in the +us-east-1 region: + + juju model-defaults --region=us-east-1 ftp-proxy=10.0.0.1:8000 + +Set model default values for the aws cloud as defined in path/to/file.yaml: + + juju model-defaults --cloud=aws --file path/to/file.yaml + +Reset the value of default-base and test-mode to default: + + juju model-defaults --reset default-base,test-mode + +Reset the value of http-proxy for the us-east-1 region to default: + + juju model-defaults --region us-east-1 --reset http-proxy + + +## Details + +To view all model default values for the current controller, run + juju model-defaults +You can target a specific controller using the -c flag: + juju model-defaults -c +By default, the output will be printed in a tabular format. You can instead +print it in json or yaml format using the --format flag: + juju model-defaults --format json + juju model-defaults --format yaml + +To view the value of a single model default, run + juju model-defaults key +To set default model config values, run + juju model-defaults key1=val1 key2=val2 ... +You can also reset default keys to their original values: + juju model-defaults --reset key1 + juju model-defaults --reset key1,key2,key3 +You may simultaneously set some keys and reset others: + juju model-defaults key1=val1 key2=val2 --reset key3,key4 + +Default values can be imported from a yaml file using the --file flag: + juju model-defaults --file=path/to/cfg.yaml +This allows you to e.g. save a controller's model defaults to a file: + juju model-defaults --format=yaml > cfg.yaml +and then import these later. Note that the output of model-defaults may +include read-only values, which will cause an error when importing later. +To prevent the error, use the --ignore-read-only-fields flag: + juju model-defaults --file=cfg.yaml --ignore-read-only-fields + +You can also read from stdin using "-", which allows you to pipe default model +values from one controller to another: + juju model-defaults -c c1 --format=yaml \ + | juju model-defaults -c c2 --file=- --ignore-read-only-fields +You can simultaneously read config from a yaml file and set config keys +as above. The command-line args will override any values specified in the file. + +Model default configuration settings are specific to the cloud on which the +model is deployed. If the controller hosts more than one cloud, the cloud +(and optionally region) must be specified using the --cloud flag. This flag +accepts arguments in the following forms: + --cloud= (specified cloud, all regions) + --region= (default cloud, specified region) + --region=/ (specified cloud and region) + --cloud= --region= (specified cloud and region) + + + +--- + +------------------------- + diff --git a/tmp/t/10058.md b/tmp/t/10058.md new file mode 100644 index 000000000..684a2fab3 --- /dev/null +++ b/tmp/t/10058.md @@ -0,0 +1,54 @@ +system | 2024-09-16 15:52:23 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [upgrade-model](/t/10073) + +## Summary +Upgrades Juju on a controller. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--agent-stream` | | Check this agent stream for upgrades | +| `--agent-version` | | Upgrade to specific version | +| `--build-agent` | false | Build a local version of the agent binary; for development use only | +| `-c`, `--controller` | | Controller to operate in | +| `--dry-run` | false | Don't change anything, just report what would be changed | +| `--ignore-agent-versions` | false | Don't check if all agents have already reached the current version | +| `--reset-previous-upgrade` | false | Clear the previous (incomplete) upgrade status (use with care) | +| `--timeout` | 10m0s | Timeout before upgrade is aborted | +| `-y`, `--yes` | false | Answer 'yes' to confirmation prompts | + +## Examples + + juju upgrade-controller --dry-run + juju upgrade-controller --agent-version 2.0.1 + + +## Details +This command upgrades the Juju agent for a controller. + +A controller's agent version can be shown with `juju model-config -m controller agent-version`. +A version is denoted by: major.minor.patch + +You can upgrade the controller to a new patch version by specifying +the '--agent-version' flag. If not specified, the upgrade candidate +will default to the most recent patch version matching the current +major and minor version. Upgrading to a new major or minor version is +not supported. + +The command will abort if an upgrade is in progress. It will also abort if +a previous upgrade was not fully completed (e.g.: if one of the +controllers in a high availability model failed to upgrade). + + + +--- + +------------------------- + diff --git a/tmp/t/10059.md b/tmp/t/10059.md new file mode 100644 index 000000000..7903e6baa --- /dev/null +++ b/tmp/t/10059.md @@ -0,0 +1,38 @@ +system | 2024-09-16 15:53:14 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-secret-backend](/t/10062), [secret-backends](/t/10149), [remove-secret-backend](/t/10194), [update-secret-backend](/t/10176) + +## Summary +Displays the specified secret backend. + +## Usage +```juju show-secret-backend [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-c`, `--controller` | | Controller to operate in | +| `--format` | yaml | Specify output format (json|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--reveal` | false | Include sensitive backend config content | + +## Examples + + juju show-secret-backend myvault + juju secret-backends myvault --reveal + + +## Details + +Displays the specified secret backend. + + +--- + +------------------------- + diff --git a/tmp/t/10060.md b/tmp/t/10060.md new file mode 100644 index 000000000..023c5b265 --- /dev/null +++ b/tmp/t/10060.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:55:27 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [set-constraints](/t/10210), [model-constraints](/t/10137), [set-model-constraints](/t/10208) + +## Summary +Displays machine constraints for an application. + +## Usage +```juju constraints [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | constraints | Specify output format (constraints|json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju constraints mysql + juju constraints -m mymodel apache2 + + +## Details + +Shows machine constraints that have been set for an application with `juju set- +constraints`. +By default, the model is the current model. +Application constraints are combined with model constraints, set with `juju +set-model-constraints`, for commands (such as 'deploy') that provision +machines for applications. Where model and application constraints overlap, the +application constraints take precedence. +Constraints for a specific model can be viewed with `juju model- +constraints`. + +--- + +------------------------- + diff --git a/tmp/t/10061.md b/tmp/t/10061.md new file mode 100644 index 000000000..22322a964 --- /dev/null +++ b/tmp/t/10061.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:56:49 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [set-firewall-rule](/t/10151) + +## Summary +Prints the firewall rules. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju firewall-rules + + + +## Details + +Lists the firewall rules which control ingress to well known services +within a Juju model. + +DEPRECATION WARNING: Firewall rules have been moved to model-config settings "ssh-allow" and +"saas-ingress-allow". This command is deprecated in favour of +reading/writing directly to these settings. + + + + +--- + +------------------------- + diff --git a/tmp/t/10062.md b/tmp/t/10062.md new file mode 100644 index 000000000..18d3643ad --- /dev/null +++ b/tmp/t/10062.md @@ -0,0 +1,47 @@ +system | 2024-09-16 15:53:31 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [secret-backends](/t/10149), [remove-secret-backend](/t/10194), [show-secret-backend](/t/10059), [update-secret-backend](/t/10176) + +## Summary +Add a new secret backend to the controller. + +## Usage +```juju add-secret-backend [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-c`, `--controller` | | Controller to operate in | +| `--config` | | path to yaml-formatted configuration file | +| `--import-id` | | add the backend with the specified id | + +## Examples + + juju add-secret-backend myvault vault --config /path/to/cfg.yaml + juju add-secret-backend myvault vault token-rotate=10m --config /path/to/cfg.yaml + juju add-secret-backend myvault vault endpoint=https://vault.io:8200 token=s.1wshwhw + + +## Details + +Adds a new secret backend for storing secret content. + +You must specify a name for the backend and its type, +followed by any necessary backend specific config values. +Config may be specified as key values ot read from a file. +Any key values override file content if both are specified. + +To rotate the backend access credential/token (if specified), use +the "token-rotate" config and supply a duration. + + + +--- + +------------------------- + diff --git a/tmp/t/10063.md b/tmp/t/10063.md new file mode 100644 index 000000000..19fa74862 --- /dev/null +++ b/tmp/t/10063.md @@ -0,0 +1,30 @@ +system | 2024-09-16 15:56:36 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [spaces](/t/10236), [add-space](/t/10117), [show-space](/t/10095), [move-to-space](/t/10192) + +## Summary +Reloads spaces and subnets from substrate. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju reload-spaces + + +## Details +Reloades spaces and subnets from substrate. + +--- + +------------------------- + diff --git a/tmp/t/10064.md b/tmp/t/10064.md new file mode 100644 index 000000000..5bf076bfb --- /dev/null +++ b/tmp/t/10064.md @@ -0,0 +1,38 @@ +system | 2024-09-16 15:55:15 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [agree](/t/10161) + +## Summary +List user's agreements. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju agreements + + +## Details + +Charms may require a user to accept its terms in order for it to be deployed. +In other words, some applications may only be installed if a user agrees to +accept some terms defined by the charm. + +This command lists the terms that the user has agreed to. + + +--- + +------------------------- + diff --git a/tmp/t/10065.md b/tmp/t/10065.md new file mode 100644 index 000000000..078e2bc94 --- /dev/null +++ b/tmp/t/10065.md @@ -0,0 +1,70 @@ +system | 2024-09-16 15:56:06 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136), [credentials](/t/10054), [remove-credential](/t/10201), [set-credential](/t/10169) + +## Summary +Updates a controller credential for a cloud. + +## Usage +```juju update-credential [options] [ []]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `-f`, `--file` | | The YAML file containing credential details to update | +| `--force` | false | Force update controller side credential, ignore validation errors | +| `--region` | | Cloud region that credential is valid for | + +## Examples + + juju update-credential aws mysecrets + juju update-credential -f mine.yaml + juju update-credential -f mine.yaml --client + juju update-credential aws -f mine.yaml + juju update-credential azure --region brazilsouth -f mine.yaml + juju update-credential -f mine.yaml --controller mycontroller --force + + +## Details +Cloud credentials are used for model operations and manipulations. +Since it is common to have long-running models, it is also common to +have these cloud credentials become invalid during models' lifetime. +When this happens, a user must update the cloud credential that +a model was created with to the new and valid details on controller. + +This command allows to update an existing, already-stored, named, +cloud-specific credential on a controller as well as the one from this client. + +Use --controller option to update a credential definition on a controller. + +When updating cloud credential on a controller, Juju performs additional +checks to ensure that the models that use this credential can still +access cloud instances after the update. Occasionally, these checks may not be desired +by the user and can be by-passed using --force option. +Force update may leave some models with un-reachable machines. +Consequently, it is not recommended as a default update action. +Models with un-reachable machines are most commonly fixed by using another cloud credential, +see ' + "'juju set-credential'" + ' for more information. + +Use --client to update a credential definition on this client. +If a user will use a different client, say a different laptop, +the update will not affect that client's (laptop's) copy. + +Before credential is updated, the new content is validated. For some providers, +cloud credentials are region specific. To validate the credential for a non-default region, +use --region. + + + +--- + +------------------------- + diff --git a/tmp/t/10066.md b/tmp/t/10066.md new file mode 100644 index 000000000..ea89ced8b --- /dev/null +++ b/tmp/t/10066.md @@ -0,0 +1,55 @@ +system | 2024-09-16 15:56:47 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-storage](/t/10159), [attach-storage](/t/10126), [detach-storage](/t/10089), [list-storage](/t/10138), [show-storage](/t/10184), [storage](/t/10075) + +## Summary +Removes storage from the model. + +## Usage +```juju remove-storage [options] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--force` | false | Remove storage even if it is currently attached | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-destroy` | false | Remove the storage without destroying it | + +## Examples + +Remove the detached storage pgdata/0: + + juju remove-storage pgdata/0 + +Remove the possibly attached storage pgdata/0: + + juju remove-storage --force pgdata/0 + +Remove the storage pgdata/0, without destroying +the corresponding cloud storage: + + juju remove-storage --no-destroy pgdata/0 + + + +## Details + +Removes storage from the model. Specify one or more +storage IDs, as output by "juju storage". + +By default, remove-storage will fail if the storage +is attached to any units. To override this behaviour, +you can use "juju remove-storage --force". +Note: forced detach is not available on container models. + + +--- + +------------------------- + diff --git a/tmp/t/10067.md b/tmp/t/10067.md new file mode 100644 index 000000000..426cbbbf8 --- /dev/null +++ b/tmp/t/10067.md @@ -0,0 +1,62 @@ +system | 2024-09-16 15:53:06 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [scale-application](/t/10171), [show-application](/t/10177) + +## Summary +Remove applications from the model. + +## Usage +```juju remove-application [options] [...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--destroy-storage` | false | Destroy storage attached to application units | +| `--dry-run` | false | Print what this command would remove without removing | +| `--force` | false | Completely remove an application and all its dependencies | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-prompt` | false | Do not ask for confirmation. Overrides `mode` model config setting | +| `--no-wait` | false | Rush through application removal without waiting for each individual step to complete | + +## Examples + + juju remove-application hadoop + juju remove-application --force hadoop + juju remove-application --force --no-wait hadoop + juju remove-application -m test-model mariadb + + +## Details +Removing an application will terminate any relations that application has, remove +all units of the application, and in the case that this leaves machines with +no running applications, Juju will also remove the machine. For this reason, +you should retrieve any logs or data required from applications and units +before removing them. Removing units which are co-located with units of +other charms or a Juju controller will not result in the removal of the +machine. + +Sometimes, the removal of the application may fail as Juju encounters errors +and failures that need to be dealt with before an application can be removed. +For example, Juju will not remove an application if there are hook failures. +However, at times, there is a need to remove an application ignoring +all operational errors. In these rare cases, use --force option but note +that --force will also remove all units of the application, its subordinates +and, potentially, machines without given them the opportunity to shutdown cleanly. + +Application removal is a multi-step process. Under normal circumstances, Juju will not +proceed to the next step until the current step has finished. +However, when using --force, users can also specify --no-wait to progress through steps +without delay waiting for each step to complete. + + + +--- + +------------------------- + diff --git a/tmp/t/10068.md b/tmp/t/10068.md new file mode 100644 index 000000000..88d1d2613 --- /dev/null +++ b/tmp/t/10068.md @@ -0,0 +1,37 @@ +system | 2024-09-16 15:52:31 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [create-storage-pool](/t/10093), [update-storage-pool](/t/10217), [storage-pools](/t/10228) + +## Summary +Remove an existing storage pool. + +## Usage +```juju remove-storage-pool [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +Remove the storage-pool named fast-storage: + + juju remove-storage-pool fast-storage + + +## Details + +Remove a single existing storage pool. + + +--- + +------------------------- + diff --git a/tmp/t/10069.md b/tmp/t/10069.md new file mode 100644 index 000000000..7a2e9cd2e --- /dev/null +++ b/tmp/t/10069.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:56:18 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [run](/t/10052), [show-action](/t/10219) + +## Summary +List actions defined for an application. + +## Usage +```juju actions [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | default | Specify output format (default|json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--schema` | false | Display the full action schema | + +## Examples + + juju actions postgresql + juju actions postgresql --format yaml + juju actions postgresql --schema + + +## Details + +List the actions available to run on the target application, with a short +description. To show the full schema for the actions, use --schema. + + +--- + +------------------------- + diff --git a/tmp/t/10070.md b/tmp/t/10070.md new file mode 100644 index 000000000..b6000cd1f --- /dev/null +++ b/tmp/t/10070.md @@ -0,0 +1,44 @@ +system | 2024-09-16 15:57:22 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +**Alias:** payloads + +## Summary +Display status information about known payloads. + +## Usage +```juju list-payloads [options] [pattern ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Details + +This command will report on the runtime state of defined payloads. + +When one or more pattern is given, Juju will limit the results to only +those payloads which match *any* of the provided patterns. Each pattern +will be checked against the following info in Juju: + +- unit name +- machine id +- payload type +- payload class +- payload id +- payload tag +- payload status + + +--- + +------------------------- + diff --git a/tmp/t/10071.md b/tmp/t/10071.md new file mode 100644 index 000000000..686ee1911 --- /dev/null +++ b/tmp/t/10071.md @@ -0,0 +1,150 @@ +system | 2024-09-16 15:51:58 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [remove-machine](/t/10163), [model-constraints](/t/10137), [set-model-constraints](/t/10208) + +## Summary +Provision a new machine or assign one to the model. + +## Usage +```juju add-machine [options] [[:] | ssh:[@] | ] | | ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--base` | | The operating system base to install on the new machine(s) | +| `--constraints` | | Machine constraints that overwrite those available from 'juju model-constraints' and provider's defaults | +| `--disks` | | Storage constraints for disks to attach to the machine(s) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-n` | 1 | The number of machines to add | +| `--private-key` | | Path to the private key to use during the connection | +| `--public-key` | | Path to the public key to add to the remote authorized keys | +| `--series` | | The operating system series to install on the new machine(s). DEPRECATED use --base | + +## Examples + +Start a new machine by requesting one from the cloud provider: + + juju add-machine + +Start 2 new machines: + + juju add-machine -n 2 + +Start a LXD container on a new machine instance and add both as machines: + + juju add-machine lxd + +Start two machine instances, each hosting a LXD container, then add all four as machines: + + juju add-machine lxd -n 2 + +Create a container on machine 4 and add it as a machine: + + juju add-machine lxd:4 + +Start a new machine and require that it has 8GB RAM: + + juju add-machine --constraints mem=8G + +Start a new machine within the "us-east-1a" availability zone: + + juju add-machine --constraints zones=us-east-1a + +Start a new machine with at least 4 CPU cores and 16GB RAM, and request three storage volumes to be attached to it. Two are large capacity (1TB) HDD and one is a lower capacity (100GB) SSD. Note: 'ebs' and 'ebs-ssd' are storage pools specific to AWS. + + juju add-machine --constraints="cores=4 mem=16G" --disks="ebs,1T,2 ebs-ssd,100G,1" + +Allocate a machine to the model via SSH: + + juju add-machine ssh:user@10.10.0.3 + +Allocate a machine specifying the private key to use during the connection: + + juju add-machine ssh:user@10.10.0.3 --private-key /tmp/id_rsa + +Allocate a machine specifying a public key to set in the list of authorized keys in the machine: + + juju add-machine ssh:user@10.10.0.3 --public-key /tmp/id_rsa.pub + +Allocate a machine specifying a public key to set in the list of authorized keys and the private key to used during the connection: + + juju add-machine ssh:user@10.10.0.3 --public-key /tmp/id_rsa.pub --private-key /tmp/id_rsa + +Allocate a machine to the model. Note: specific to MAAS. + + juju add-machine host.internal + + +## Details + +Add a new machine to the model. The command operates in three modes, +depending on the options provided: + + - provision a new machine from the cloud (default, see "Provisioning + a new machine") + - create an operating system container (see "Container creation") + - connect to a live computer and allocate it as a machine (see "Manual + provisioning") + +The add-machine command is unavailable in k8s clouds. Provisioning +a new machine is unavailable on the manual cloud provider. + +Once the add-machine command has finished, the machine's ID can be +used as a placement directive for deploying applications. Machine IDs +are also accessible via 'juju status' and 'juju machines'. + + +Provisioning a new machine + +When add-machine is called without arguments, Juju provisions a new +machine instance from the current cloud. The machine's specifications, +including whether the machine is virtual or physical depends on the cloud. + +To control which instance type is provisioned, use the --constraints and +--base options. --base can be specified using the OS name and the version of +the OS, separated by @. For example, --base ubuntu@22.04. + +To add storage volumes to the instance, provide a whitespace-delimited +list of storage constraints to the --disks option. + +Add "placement directives" as an argument give Juju additional information +about how to allocate the machine in the cloud. For example, one can direct +the MAAS provider to acquire a particular node by specifying its hostname. + + +Manual provisioning + +Call add-machine with the address of a network-accessible computer to +allocate that machine to the model. + +Manual provisioning is the process of installing Juju on an existing machine +and bringing it under Juju's management. The Juju controller must be able to +access the new machine over the network. + + +Container creation + +If a operating system container type is specified (e.g. "lxd" or "kvm"), +then add-machine will allocate a container of that type on a new machine +instance. Both the new instance, and the new container will be available +as machines in the model. + +It is also possible to add containers to existing machines using the format +<container-type>:<machine-id>. Constraints cannot be combined this mode. + + +Further reading: + https://juju.is/docs/reference/commands/add-machine + https://juju.is/docs/reference/constraints + + +--- + +------------------------- + diff --git a/tmp/t/10072.md b/tmp/t/10072.md new file mode 100644 index 000000000..57811a0fc --- /dev/null +++ b/tmp/t/10072.md @@ -0,0 +1,36 @@ +system | 2024-09-16 15:54:42 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-secret-backend](/t/10062), [remove-secret-backend](/t/10194), [show-secret-backend](/t/10059), [update-secret-backend](/t/10176) +**Alias:** secret-backends + +## Summary +Lists secret backends available in the controller. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-c`, `--controller` | | Controller to operate in | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--reveal` | false | Include sensitive backend config content | + +## Examples + + juju secret-backends + juju secret-backends --format yaml + + +## Details + +Displays the secret backends available for storing secret content. + + +--- + +------------------------- + diff --git a/tmp/t/10073.md b/tmp/t/10073.md new file mode 100644 index 000000000..09014b78b --- /dev/null +++ b/tmp/t/10073.md @@ -0,0 +1,62 @@ +system | 2024-09-16 15:54:11 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [sync-agent-binary](/t/10106) + +## Summary +Upgrades Juju on all machines in a model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--agent-stream` | | Check this agent stream for upgrades | +| `--agent-version` | | Upgrade to specific version | +| `--dry-run` | false | Don't change anything, just report what would be changed | +| `--ignore-agent-versions` | false | Don't check if all agents have already reached the current version | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--reset-previous-upgrade` | false | Clear the previous (incomplete) upgrade status (use with care) | +| `--timeout` | 10m0s | Timeout before upgrade is aborted | +| `-y`, `--yes` | false | Answer 'yes' to confirmation prompts | + +## Examples + + juju upgrade-model --dry-run + juju upgrade-model --agent-version 2.0.1 + juju upgrade-model --agent-stream proposed + + +## Details +Juju provides agent software to every machine it creates. This command +upgrades that software across an entire model, which is, by default, the +current model. +A model's agent version can be shown with `juju model-config agent-version`. +A version is denoted by: major.minor.patch + +If '--agent-version' is not specified, then the upgrade candidate is +selected to be the exact version the controller itself is running. + +If the controller is without internet access, the client must first supply +the software to the controller's cache via the `juju sync-agent-binary` command. +The command will abort if an upgrade is in progress. It will also abort if +a previous upgrade was not fully completed (e.g.: if one of the +controllers in a high availability model failed to upgrade). + +When looking for an agent to upgrade to, Juju will check the currently +configured agent stream for that model. It's possible to overwrite this for +the lifetime of this upgrade using --agent-stream + +If a failed upgrade has been resolved, '--reset-previous-upgrade' can be +used to allow the upgrade to proceed. +Backups are recommended prior to upgrading. + + + +--- + +------------------------- + diff --git a/tmp/t/10074.md b/tmp/t/10074.md new file mode 100644 index 000000000..51550b89e --- /dev/null +++ b/tmp/t/10074.md @@ -0,0 +1,308 @@ +system | 2024-09-16 15:55:20 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [integrate](/t/10207), [add-unit](/t/10141), [config](/t/10139), [expose](/t/10109), [constraints](/t/10060), [refresh](/t/10189), [set-constraints](/t/10210), [spaces](/t/10236), [charm-resources](/t/10099) + +## Summary +Deploys a new application or bundle. + +## Usage +```juju deploy [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--attach-storage` | | Existing storage to attach to the deployed unit (not available on k8s models) | +| `--base` | | The base on which to deploy | +| `--bind` | | Configure application endpoint bindings to spaces | +| `--channel` | | Channel to use when deploying a charm or bundle from Charmhub | +| `--config` | | Either a path to yaml-formatted application config file or a key=value pair | +| `--constraints` | | Set application constraints | +| `--device` | | Charm device constraints | +| `--dry-run` | false | Just show what the deploy would do | +| `--force` | false | Allow a charm/bundle to be deployed which bypasses checks such as supported base or LXD profile allow list | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--map-machines` | | Specify the existing machines to use for bundle deployments | +| `-n`, `--num-units` | 1 | Number of application units to deploy for principal charms | +| `--overlay` | | Bundles to overlay on the primary bundle, applied in order | +| `--resource` | | Resource to be uploaded to the controller | +| `--revision` | -1 | The revision to deploy | +| `--series` | | The series on which to deploy. DEPRECATED: use --base | +| `--storage` | | Charm storage constraints | +| `--to` | | The machine and/or container to deploy the unit in (bypasses constraints) | +| `--trust` | false | Allows charm to run hooks that require access credentials | + +## Examples + +Deploy to a new machine: + + juju deploy apache2 + +Deploy to machine 23: + + juju deploy mysql --to 23 + +Deploy to a new LXD container on a new machine: + + juju deploy mysql --to lxd + +Deploy to a new LXD container on machine 25: + + juju deploy mysql --to lxd:25 + +Deploy to LXD container 3 on machine 24: + + juju deploy mysql --to 24/lxd/3 + +Deploy 2 units, one on machine 3 and one to a new LXD container on machine 5: + + juju deploy mysql -n 2 --to 3,lxd:5 + +Deploy 3 units, one on machine 3 and the remaining two on new machines: + + juju deploy mysql -n 3 --to 3 + +Deploy to a machine with at least 8 GiB of memory: + + juju deploy postgresql --constraints mem=8G + +Deploy to a specific availability zone (provider-dependent): + + juju deploy mysql --to zone=us-east-1a + +Deploy to a specific MAAS node: + + juju deploy mysql --to host.maas + +Deploy to a machine that is in the 'dmz' network space but not in either the +'cms' nor the 'database' spaces: + + juju deploy haproxy -n 2 --constraints spaces=dmz,^cms,^database + +Deploy a k8s charm that requires a single Nvidia GPU: + + juju deploy mycharm --device miner=1,nvidia.com/gpu + +Deploy a k8s charm that requires two Nvidia GPUs that have an +attribute of 'gpu=nvidia-tesla-p100': + + juju deploy mycharm --device \ + twingpu=2,nvidia.com/gpu,gpu=nvidia-tesla-p100 + +Deploy with specific resources: + + juju deploy foo --resource bar=/some/file.tgz --resource baz=./docs/cfg.xml + + +## Details + +A charm or bundle can be referred to by its simple name and a base, revision, +or channel can optionally be specified: + + juju deploy postgresql + juju deploy ch:postgresql --base ubuntu@22.04 + juju deploy ch:postgresql --channel edge + juju deploy ch:ubuntu --revision 17 --channel edge + +All the above deployments use remote charms found in Charmhub, denoted by the +'ch:' prefix. Remote charms with no prefix will be deployed from Charmhub. + +If a channel is specified, it will be used as the source for looking up the +charm or bundle from Charmhub. When used in a bundle deployment context, +the specified channel is only used for retrieving the bundle and is ignored when +looking up the charms referenced by the bundle. However, each charm within a +bundle is allowed to explicitly specify the channel used to look it up. + +If a revision is specified, a channel must also be specified for Charmhub charms +and bundles. The charm will be deployed with revision. The channel will be used +when refreshing the application in the future. + +A local charm may be deployed by giving the path to its directory: + + juju deploy /path/to/charm + juju deploy /path/to/charm --base ubuntu@22.04 + +You will need to be explicit if there is an ambiguity between a local and a +remote charm: + + juju deploy ./pig + juju deploy ch:pig + +A bundle can be expressed similarly to a charm: + + juju deploy mediawiki-single + juju deploy mediawiki-single --base ubuntu@22.04 + juju deploy ch:mediawiki-single + +A local bundle may be deployed by specifying the path to its YAML file: + + juju deploy /path/to/bundle.yaml + +The final charm/machine base is determined using an order of precedence (most +preferred to least): + + - the '--base' command option + - for a bundle, the series stated in each charm URL (in the bundle file) + - for a bundle, the series given at the top level (in the bundle file) + - the 'default-base' model key + - the first base specified in the charm's manifest file + +An 'application name' provides an alternate name for the application. It works +only for charms; it is silently ignored for bundles (although the same can be +done at the bundle file level). Such a name must consist only of lower-case +letters (a-z), numbers (0-9), and single hyphens (-). The name must begin with +a letter and not have a group of all numbers follow a hyphen: + + Valid: myappname, custom-app, app2-scat-23skidoo + Invalid: myAppName, custom--app, app2-scat-23, areacode-555-info + +Use the '--constraints' option to specify hardware requirements for new machines. +These become the application's default constraints (i.e. they are used if the +application is later scaled out with the `add-unit` command). To overcome this +behaviour use the `set-constraints` command to change the application's default +constraints or add a machine (`add-machine`) with a certain constraint and then +target that machine with `add-unit` by using the '--to' option. + +Use the '--device' option to specify GPU device requirements (with Kubernetes). +The below format is used for this option's value, where the 'label' is named in +the charm metadata file: + + <label>=[<count>,]<device-class>|<vendor/type>[,<attributes>] + +Use the '--config' option to specify application configuration values. This +option accepts either a path to a YAML-formatted file or a key=value pair. A +file should be of this format: + + <charm name>: + <option name>: <option value> + ... + +For example, to deploy 'mediawiki' with file 'mycfg.yaml' that contains: + + mediawiki: + name: my media wiki + admins: me:pwdOne + debug: true + +use + + juju deploy mediawiki --config mycfg.yaml + +Key=value pairs can also be passed directly in the command. For example, to +declare the 'name' key: + + juju deploy mediawiki --config name='my media wiki' + +To define multiple keys: + + juju deploy mediawiki --config name='my media wiki' --config debug=true + +If a key gets defined multiple times the last value will override any earlier +values. For example, + + juju deploy mediawiki --config name='my media wiki' --config mycfg.yaml + +Similar to the 'juju config' command, if the value begins with an '@' character, +it will be treated as a path to a config file and its contents will be assigned +to the specified key. For example, + + juju deploy mediawiki --config name='@wiki-name.txt" + +will set the 'name' key to the contents of file 'wiki-name.txt'. + +If mycfg.yaml contains a value for 'name', it will override the earlier 'my +media wiki' value. The same applies to single value options. For example, + + juju deploy mediawiki --config name='a media wiki' --config name='my wiki' + +the value of 'my wiki' will be used. + +Use the '--resource' option to specify the resources you want to use for your charm. +The format is + + --resource = + +where the resource name is the name from the metadata.yaml file of the charm +and where, depending on the type of the resource, the resource can be specified +as follows: + +(1) If the resource is type 'file', you can specify it by providing +(a) the resource revision number or +(b) a path to a local file. + +(2) If the resource is type 'oci-image', you can specify it by providing +(a) the resource revision number, +(b) a path to a local file = private OCI image, +(c) a link to a public OCI image. + + +Note: If you choose (1b) or (2b-c), i.e., a resource that is not from Charmhub: +You will not be able to go back to using a resource from Charmhub. + +Note: If you choose (1b) or (2b): This uploads a file from your loal disk to the juju +controller to be streamed to the charm when "resource-get" is called by a hook. + +Note: If you choose (2b): You will need to specify: +(i) the local path to the private OCI image as well as +(ii) the username/password required to access the private OCI image. + +Note: If multiple resources are needed, repeat the option. + + +Use the '--to' option to deploy to an existing machine or container by +specifying a "placement directive". The `status` command should be used for +guidance on how to refer to machines. A few placement directives are +provider-dependent (e.g.: 'zone'). + +In more complex scenarios, "network spaces" are used to partition the cloud +networking layer into sets of subnets. Instances hosting units inside the same +space can communicate with each other without any firewalls. Traffic crossing +space boundaries could be subject to firewall and access restrictions. Using +spaces as deployment targets, rather than their individual subnets, allows Juju +to perform automatic distribution of units across availability zones to support +high availability for applications. Spaces help isolate applications and their +units, both for security purposes and to manage both traffic segregation and +congestion. + +When deploying an application or adding machines, the 'spaces' constraint can +be used to define a comma-delimited list of required and forbidden spaces (the +latter prefixed with '^', similar to the 'tags' constraint). + +When deploying bundles, machines specified in the bundle are added to the model +as new machines. Use the '--map-machines=existing' option to make use of any +existing machines. To map particular existing machines to machines defined in +the bundle, multiple comma separated values of the form 'bundle-id=existing-id' +can be passed. For example, for a bundle that specifies machines 1, 2, and 3; +and a model that has existing machines 1, 2, 3, and 4, the below deployment +would have existing machines 1 and 2 assigned to machines 1 and 2 defined in +the bundle and have existing machine 4 assigned to machine 3 defined in the +bundle. + + juju deploy mybundle --map-machines=existing,3=4 + +Only top level machines can be mapped in this way, just as only top level +machines can be defined in the machines section of the bundle. + +When charms that include LXD profiles are deployed the profiles are validated +for security purposes by allowing only certain configurations and devices. Use +the '--force' option to bypass this check. Doing so is not recommended as it +can lead to unexpected behaviour. + +Further reading: https://juju.is/docs/olm/manage-applications + + +--- + +------------------------- + +avgomes | 2024-09-25 14:52:28 UTC | #2 + +Small readability issue: Most commands in the ["Details" section](https://discourse.charmhub.io/t/command-juju-deploy/10074?#details-5) are in plain text instead of a code block. + +------------------------- + diff --git a/tmp/t/10075.md b/tmp/t/10075.md new file mode 100644 index 000000000..3c2a325c0 --- /dev/null +++ b/tmp/t/10075.md @@ -0,0 +1,49 @@ +system | 2024-09-16 15:51:48 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [show-storage](/t/10184), [add-storage](/t/10159), [remove-storage](/t/10066) + +## Summary +Lists storage details. + +## Usage +```juju storage [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--filesystem` | false | List filesystem storage(deprecated) | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--volume` | false | List volume storage(deprecated) | + +## Examples + +List all storage: + + juju storage + +List only filesystem storage: + + juju storage --filesystem + +List only volume storage: + + juju storage --volume + + +## Details + +List information about storage. + + +--- + +------------------------- + diff --git a/tmp/t/10076.md b/tmp/t/10076.md new file mode 100644 index 000000000..25292f919 --- /dev/null +++ b/tmp/t/10076.md @@ -0,0 +1,54 @@ +system | 2024-09-16 15:54:33 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +**Alias:** subnets + +## Summary +List subnets known to Juju. + +## Usage +```juju list-subnets [options] [--space ] [--zone ] [--format yaml|json] [--output ]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--space` | | Filter results by space name | +| `--zone` | | Filter results by zone name | + +## Examples + +To list all subnets known to Juju: + + juju subnets + +To list subnets associated with a specific network space: + + juju subnets --space my-space + +To list subnets associated with a specific availability zone: + + juju subnets --zone my-zone + + +## Details +Displays a list of all subnets known to Juju. Results can be filtered +using the optional --space and/or --zone arguments to only display +subnets associated with a given network space and/or availability zone. + +Like with other Juju commands, the output and its format can be changed +using the --format and --output (or -o) optional arguments. Supported +output formats include "yaml" (default) and "json". To redirect the +output to a file, use --output. + +--- + +------------------------- + diff --git a/tmp/t/10077.md b/tmp/t/10077.md new file mode 100644 index 000000000..db4b12794 --- /dev/null +++ b/tmp/t/10077.md @@ -0,0 +1,65 @@ +system | 2024-09-16 15:58:08 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [grant](/t/10196) + +## Summary +Revokes access from a Juju user for a model, controller, or application offer. + +## Usage +```juju revoke [options] [ ... | ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Examples + +Revoke 'read' (and 'write') access from user 'joe' for model 'mymodel': + + juju revoke joe read mymodel + +Revoke 'write' access from user 'sam' for models 'model1' and 'model2': + + juju revoke sam write model1 model2 + +Revoke 'read' (and 'write') access from user 'joe' for application offer 'fred/prod.hosted-mysql': + + juju revoke joe read fred/prod.hosted-mysql + +Revoke 'consume' access from user 'sam' for models 'fred/prod.hosted-mysql' and 'mary/test.hosted-mysql': + + juju revoke sam consume fred/prod.hosted-mysql mary/test.hosted-mysql + + +## Details +By default, the controller is the current controller. + +Revoking write access, from a user who has that permission, will leave +that user with read access. Revoking read access, however, also revokes +write access. + +Valid access levels for models are: + read + write + admin + +Valid access levels for controllers are: + login + superuser + +Valid access levels for application offers are: + read + consume + admin + +--- + +------------------------- + diff --git a/tmp/t/10078.md b/tmp/t/10078.md new file mode 100644 index 000000000..30ca6f58c --- /dev/null +++ b/tmp/t/10078.md @@ -0,0 +1,39 @@ +system | 2024-09-16 15:54:31 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [status](/t/10173) + +## Summary +Lists machines in a model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--color` | false | Force use of ANSI color codes | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--utc` | false | Display time as UTC in RFC3339 format | + +## Examples + + juju machines + + +## Details + +By default, the tabular format is used. +The following sections are included: ID, STATE, DNS, INS-ID, SERIES, AZ +Note: AZ above is the cloud region's availability zone. + + + +--- + +------------------------- + diff --git a/tmp/t/10079.md b/tmp/t/10079.md new file mode 100644 index 000000000..b7c5ebe1a --- /dev/null +++ b/tmp/t/10079.md @@ -0,0 +1,38 @@ +system | 2024-09-16 15:58:04 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [models](/t/10090), [show-controller](/t/10156) +**Alias:** controllers + +## Summary +Lists all controllers. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--refresh` | false | Connect to each controller to download the latest details | + +## Examples + + juju controllers + juju controllers --format json --output ~/tmp/controllers.json + + + +## Details +The output format may be selected with the '--format' option. In the +default tabular output, the current controller is marked with an asterisk. + + + +--- + +------------------------- + diff --git a/tmp/t/10080.md b/tmp/t/10080.md new file mode 100644 index 000000000..4740a6a37 --- /dev/null +++ b/tmp/t/10080.md @@ -0,0 +1,40 @@ +system | 2024-09-16 15:58:10 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [consume](/t/10213), [integrate](/t/10207), [remove-saas](/t/10087) + +## Summary +Offer application endpoints for use in other models. + +## Usage +```juju offer [options] [model-name.]:[,...] [offer-name]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Examples + + juju offer mysql:db + juju offer mymodel.mysql:db + juju offer db2:db hosted-db2 + juju offer db2:db,log hosted-db2 + + +## Details + +Deployed application endpoints are offered for use by consumers. +By default, the offer is named after the application, unless +an offer name is explicitly specified. + + +--- + +------------------------- + diff --git a/tmp/t/10081.md b/tmp/t/10081.md new file mode 100644 index 000000000..2571778f6 --- /dev/null +++ b/tmp/t/10081.md @@ -0,0 +1,52 @@ +system | 2024-09-16 15:52:14 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-cloud](/t/10162), [remove-cloud](/t/10216), [clouds](/t/10182) + +## Summary +Updates cloud information available to Juju. + +## Usage +```juju update-cloud [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `-f` | | The path to a cloud definition file | + +## Examples + + juju update-cloud mymaas -f path/to/maas.yaml + juju update-cloud mymaas -f path/to/maas.yaml --controller mycontroller + juju update-cloud mymaas --controller mycontroller + juju update-cloud mymaas --client --controller mycontroller + juju update-cloud mymaas --client -f path/to/maas.yaml + + +## Details + +Update cloud information on this client and/or on a controller. + +A cloud can be updated from a file. This requires a <cloud name> and a yaml file +containing the cloud details. +This method can be used for cloud updates on the client side and on a controller. + +A cloud on the controller can also be updated just by using a name of a cloud +from this client. + +Use --controller option to update a cloud on a controller. + +Use --client to update cloud definition on this client. + + +--- + +------------------------- + diff --git a/tmp/t/10082.md b/tmp/t/10082.md new file mode 100644 index 000000000..d988a5ee7 --- /dev/null +++ b/tmp/t/10082.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:56:17 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136) + +## Summary +Sets the default region for a cloud. + +## Usage +```juju default-region [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--reset` | false | Reset default region for the cloud | + +## Examples + + juju default-region azure-china chinaeast + juju default-region azure-china + juju default-region azure-china --reset + + +## Details +The default region is specified directly as an argument. + +To unset previously set default region for a cloud, use --reset option. + +To confirm what region is currently set to be default for a cloud, +use the command without region argument. + + + +--- + +------------------------- + diff --git a/tmp/t/10083.md b/tmp/t/10083.md new file mode 100644 index 000000000..276c6444b --- /dev/null +++ b/tmp/t/10083.md @@ -0,0 +1,49 @@ +system | 2024-09-16 15:52:07 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [run](/t/10052), [operations](/t/10203), [show-task](/t/10129) + +## Summary +Show results of an operation. + +## Usage +```juju show-operation [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--utc` | false | Show times in UTC | +| `--wait` | -1s | Wait for results | +| `--watch` | false | Wait indefinitely for results | + +## Examples + + juju show-operation 1 + juju show-operation 1 --wait=2m + juju show-operation 1 --watch + + +## Details + +Show the results returned by an operation with the given ID. +To block until the result is known completed or failed, use +the --wait option with a duration, as in --wait 5s or --wait 1h. +Use --watch to wait indefinitely. + +The default behavior without --wait or --watch is to immediately check and return; +if the results are "pending" then only the available information will be +displayed. This is also the behavior when any negative time is given. + + +--- + +------------------------- + diff --git a/tmp/t/10084.md b/tmp/t/10084.md new file mode 100644 index 000000000..0dd0fa43c --- /dev/null +++ b/tmp/t/10084.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:55:43 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-space](/t/10117), [spaces](/t/10236), [reload-spaces](/t/10063), [rename-space](/t/10135), [show-space](/t/10095) + +## Summary +Remove a network space. + +## Usage +```juju remove-space [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--force` | false | remove the offer as well as any relations to the offer | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-y`, `--yes` | false | Do not prompt for confirmation | + +## Examples + +Remove a space by name: + + juju remove-space db-space + +Remove a space by name with force, without need for confirmation: + + juju remove-space db-space --force -y + + +## Details +Removes an existing Juju network space with the given name. Any subnets +associated with the space will be transferred to the default space. +The command will fail if existing constraints, bindings or controller settings are bound to the given space. + +If the --force option is specified, the space will be deleted even if there are existing bindings, constraints or settings. + +--- + +------------------------- + diff --git a/tmp/t/10085.md b/tmp/t/10085.md new file mode 100644 index 000000000..e4a38d6d7 --- /dev/null +++ b/tmp/t/10085.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:57:04 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [metrics](/t/10143) + +## Summary +Collect metrics on the given unit/application. + +## Usage +```juju collect-metrics [options] [application or unit]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju collect-metrics myapp + + juju collect-metrics myapp/0 + + +## Details + +Trigger metrics collection + +This command waits for the metric collection to finish before returning. +You may abort this command and it will continue to run asynchronously. +Results may be checked by 'juju show-task'. + + +--- + +------------------------- + diff --git a/tmp/t/10086.md b/tmp/t/10086.md new file mode 100644 index 000000000..27ba1978d --- /dev/null +++ b/tmp/t/10086.md @@ -0,0 +1,31 @@ +system | 2024-09-16 15:54:17 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [disable-command](/t/10205), [disabled-commands](/t/10220), [enable-command](/t/10111) + +## Summary +Enable destroy-controller by removing disabled commands in the controller. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Details + +Any model in the controller that has disabled commands will block a controller +from being destroyed. + +A controller administrator is able to enable all the commands across all the models +in a Juju controller so that the controller can be destoyed if desired. + + +--- + +------------------------- + diff --git a/tmp/t/10087.md b/tmp/t/10087.md new file mode 100644 index 000000000..b44a1e25c --- /dev/null +++ b/tmp/t/10087.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:55:48 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [consume](/t/10213), [offer](/t/10080) + +## Summary +Remove consumed applications (SAAS) from the model. + +## Usage +```juju remove-saas [options] [...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--force` | false | Completely remove a SAAS and all its dependencies | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-wait` | false | Rush through SAAS removal without waiting for each individual step to complete | + +## Examples + + juju remove-saas hosted-mysql + juju remove-saas -m test-model hosted-mariadb + + + +## Details +Removing a consumed (SAAS) application will terminate any relations that +application has, potentially leaving any related local applications +in a non-functional state. + + + +--- + +------------------------- + diff --git a/tmp/t/10088.md b/tmp/t/10088.md new file mode 100644 index 000000000..4c4e2c117 --- /dev/null +++ b/tmp/t/10088.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:54:36 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [config](/t/10139) + +## Summary +Sets the trust status of a deployed application to true. + +## Usage +```juju trust [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--remove` | false | Remove trusted access from a trusted application | +| `--scope` | | k8s models only - needs to be set to 'cluster' | + +## Examples + + juju trust media-wiki + juju trust metallb --scope=cluster + + +## Details +Sets the trust configuration value to true. + +On k8s models, the trust operation currently grants the charm full access to the cluster. +Until the permissions model is refined to grant more granular role based access, the use of +'--scope=cluster' is required to confirm this choice. + + +--- + +------------------------- + diff --git a/tmp/t/10089.md b/tmp/t/10089.md new file mode 100644 index 000000000..6c2613006 --- /dev/null +++ b/tmp/t/10089.md @@ -0,0 +1,43 @@ +system | 2024-09-16 15:56:20 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [storage](/t/10075), [attach-storage](/t/10126) + +## Summary +Detaches storage from units. + +## Usage +```juju detach-storage [options] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--force` | false | Forcefully detach storage | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju detach-storage pgdata/0 + juju detach-storage --force pgdata/0 + + + +## Details + +Detaches storage from units. Specify one or more unit/application storage IDs, +as output by "juju storage". The storage will remain in the model until it is +removed by an operator. + +Detaching storage may fail but under some circumstances, Juju user may need +to force storage detachment despite operational errors. + + +--- + +------------------------- + diff --git a/tmp/t/10090.md b/tmp/t/10090.md new file mode 100644 index 000000000..9fad4405f --- /dev/null +++ b/tmp/t/10090.md @@ -0,0 +1,42 @@ +system | 2024-09-16 15:54:07 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-model](/t/10145) + +## Summary +Lists models a user can access on a controller. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | Lists all models, regardless of user accessibility (administrative users only) | +| `-c`, `--controller` | | Controller to operate in | +| `--exact-time` | false | Use full timestamps | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--user` | | The user to list models for (administrative users only) | +| `--uuid` | false | Display UUID for models | + +## Examples + + juju models + juju models --user bob + + +## Details + +The models listed here are either models you have created yourself, or +models which have been shared with you. Default values for user and +controller are, respectively, the current user and the current controller. +The active model is denoted by an asterisk. + + +--- + +------------------------- + diff --git a/tmp/t/10091.md b/tmp/t/10091.md new file mode 100644 index 000000000..dbb7d88d0 --- /dev/null +++ b/tmp/t/10091.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:54:34 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Print the Juju Dashboard URL, or open the Juju Dashboard in the default browser. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--browser` | false | Open the web browser, instead of just printing the Juju Dashboard URL | +| `--hide-credential` | false | Do not show admin credential to use for logging into the Juju Dashboard | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--port` | 31666 | Local port used to serve the dashboard | + +## Examples + +Print the Juju Dashboard URL and show admin credential to use to log into it: + + juju dashboard + +Print the Juju Dashboard URL only: + + juju dashboard --hide-credential + +Open the Juju Dashboard in the default browser and show admin credential to use to log into it: + + juju dashboard --browser + +Open the Juju Dashboard in the default browser without printing the login credential: + + juju dashboard --hide-credential --browser + +An error is returned if the Juju Dashboard is not running. + + +--- + +------------------------- + diff --git a/tmp/t/10092.md b/tmp/t/10092.md new file mode 100644 index 000000000..61c6d8c23 --- /dev/null +++ b/tmp/t/10092.md @@ -0,0 +1,42 @@ +system | 2024-09-16 15:57:30 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136) +**Alias:** default-region + +## Summary +Sets the default region for a cloud. + +## Usage +```juju set-default-region [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--reset` | false | Reset default region for the cloud | + +## Examples + + juju default-region azure-china chinaeast + juju default-region azure-china + juju default-region azure-china --reset + + +## Details +The default region is specified directly as an argument. + +To unset previously set default region for a cloud, use --reset option. + +To confirm what region is currently set to be default for a cloud, +use the command without region argument. + + + +--- + +------------------------- + diff --git a/tmp/t/10093.md b/tmp/t/10093.md new file mode 100644 index 000000000..56e13c95c --- /dev/null +++ b/tmp/t/10093.md @@ -0,0 +1,57 @@ +system | 2024-09-16 15:55:36 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [remove-storage-pool](/t/10068), [update-storage-pool](/t/10217), [storage-pools](/t/10228) + +## Summary +Create or define a storage pool. + +## Usage +```juju create-storage-pool [options] [= [=...]]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju create-storage-pool ebsrotary ebs volume-type=standard + juju create-storage-pool gcepd storage-provisioner=kubernetes.io/gce-pd [storage-mode=RWX|RWO|ROX] parameters.type=pd-standard + + + +## Details + +Pools are a mechanism for administrators to define sources of storage that +they will use to satisfy application storage requirements. + +A single pool might be used for storage from units of many different applications - +it is a resource from which different stores may be drawn. + +A pool describes provider-specific parameters for creating storage, +such as performance (e.g. IOPS), media type (e.g. magnetic vs. SSD), +or durability. + +For many providers, there will be a shared resource +where storage can be requested (e.g. EBS in amazon). +Creating pools there maps provider specific settings +into named resources that can be used during deployment. + +Pools defined at the model level are easily reused across applications. +Pool creation requires a pool name, the provider type and attributes for +configuration as space-separated pairs, e.g. tags, size, path, etc. + +For Kubernetes models, the provider type defaults to "kubernetes" +unless otherwise specified. + + +--- + +------------------------- + diff --git a/tmp/t/10094.md b/tmp/t/10094.md new file mode 100644 index 000000000..45aa5a78c --- /dev/null +++ b/tmp/t/10094.md @@ -0,0 +1,89 @@ +system | 2024-09-16 15:57:06 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [disable-command](/t/10205), [enable-command](/t/10111) +**Alias:** disabled-commands + +## Summary +List disabled commands. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | Lists for all models (administrative users only) | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Details + +List disabled commands for the model. + +Commands that can be disabled are grouped based on logical operations as follows: + +"destroy-model" prevents: + destroy-controller + destroy-model + +"remove-object" prevents: + destroy-controller + destroy-model + detach-storage + remove-application + remove-machine + remove-relation + remove-saas + remove-storage + remove-unit + +"all" prevents: + add-machine + integrate + add-unit + add-ssh-key + add-user + attach-resource + attach-storage + change-user-password + config + consume + deploy + destroy-controller + destroy-model + disable-user + enable-ha + enable-user + expose + import-filesystem + import-ssh-key + model-defaults + model-config + reload-spaces + remove-application + remove-machine + remove-relation + remove-ssh-key + remove-unit + remove-user + resolved + retry-provisioning + run + scale-application + set-application-base + set-credential + set-constraints + sync-agents + unexpose + refresh + upgrade-model + + +--- + +------------------------- + diff --git a/tmp/t/10095.md b/tmp/t/10095.md new file mode 100644 index 000000000..4480a82d7 --- /dev/null +++ b/tmp/t/10095.md @@ -0,0 +1,39 @@ +system | 2024-09-16 15:53:48 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-space](/t/10117), [spaces](/t/10236), [reload-spaces](/t/10063), [rename-space](/t/10135), [remove-space](/t/10084) + +## Summary +Shows information about the network space. + +## Usage +```juju show-space [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + +Show a space by name: + + juju show-space alpha + + +## Details +Displays extended information about a given space. +Output includes the space subnets, applications with bindings to the space, +and a count of machines connected to the space. + +--- + +------------------------- + diff --git a/tmp/t/10096.md b/tmp/t/10096.md new file mode 100644 index 000000000..979cc90dd --- /dev/null +++ b/tmp/t/10096.md @@ -0,0 +1,376 @@ +system | 2024-09-16 15:57:59 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [models](/t/10090), [model-defaults](/t/10057), [show-cloud](/t/10215), [controller-config](/t/10237) + +## Summary +Displays or sets configuration values on a model. + +## Usage +```juju model-config [options] [[=] ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--color` | false | Use ANSI color codes in output | +| `--file` | | path to yaml-formatted configuration file | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `--ignore-read-only-fields` | false | Ignore read only fields that might cause errors to be emitted while processing yaml documents | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-color` | false | Disable ANSI color codes in tabular output | +| `-o`, `--output` | | Specify an output file | +| `--reset` | | Reset the provided comma delimited keys | + +## Examples + +Print the value of default-base: + + juju model-config default-base + +Print the model config of model mycontroller:mymodel: + + juju model-config -m mycontroller:mymodel + +Set the value of ftp-proxy to 10.0.0.1:8000: + + juju model-config ftp-proxy=10.0.0.1:8000 + +Set the model config to key=value pairs defined in a file: + + juju model-config --file path/to/file.yaml + +Set model config values of a specific model: + + juju model-config -m othercontroller:mymodel default-base=ubuntu@22.04 test-mode=false + +Reset the values of the provided keys to model defaults: + + juju model-config --reset default-base,test-mode + + +## Details + +To view all configuration values for the current model, run + juju model-config +You can target a specific model using the -m flag: + juju model-config -m + juju model-config -m : +By default, the config will be printed in a tabular format. You can instead +print it in json or yaml format using the --format flag: + juju model-config --format json + juju model-config --format yaml + +To view the value of a single config key, run + juju model-config key +To set config values, run + juju model-config key1=val1 key2=val2 ... +You can also reset config keys to their default values: + juju model-config --reset key1 + juju model-config --reset key1,key2,key3 +You may simultaneously set some keys and reset others: + juju model-config key1=val1 key2=val2 --reset key3,key4 + +Config values can be imported from a yaml file using the --file flag: + juju model-config --file=path/to/cfg.yaml +This allows you to e.g. save a model's config to a file: + juju model-config --format=yaml > cfg.yaml +and then import the config later. Note that the output of model-config +may include read-only values, which will cause an error when importing later. +To prevent the error, use the --ignore-read-only-fields flag: + juju model-config --file=cfg.yaml --ignore-read-only-fields + +You can also read from stdin using "-", which allows you to pipe config values +from one model to another: + juju model-config -c c1 --format=yaml \ + | juju model-config -c c2 --file=- --ignore-read-only-fields +You can simultaneously read config from a yaml file and set config keys +as above. The command-line args will override any values specified in the file. + +The default-series key is deprecated in favour of default-base +e.g. default-base=ubuntu@22.04. + +The following keys are available: + + agent-metadata-url: + type: string + description: URL of private stream + agent-stream: + type: string + description: Version of Juju to use for deploy/upgrades. + apt-ftp-proxy: + type: string + description: The APT FTP proxy for the model + apt-http-proxy: + type: string + description: The APT HTTP proxy for the model + apt-https-proxy: + type: string + description: The APT HTTPS proxy for the model + apt-mirror: + type: string + description: The APT mirror for the model + apt-no-proxy: + type: string + description: List of domain addresses not to be proxied for APT (comma-separated) + authorized-keys: + type: string + description: Any authorized SSH public keys for the model, as found in a ~/.ssh/authorized_keys + file + automatically-retry-hooks: + type: bool + description: Determines whether the uniter should automatically retry failed hooks + backup-dir: + type: string + description: Directory used to store the backup working directory + charmhub-url: + type: string + description: The url for CharmHub API calls + cloudinit-userdata: + type: string + description: Cloud-init user-data (in yaml format) to be added to userdata for new + machines created in this model + container-image-metadata-defaults-disabled: + type: bool + description: Whether default simplestreams sources are used for image metadata with + containers. + container-image-metadata-url: + type: string + description: The URL at which the metadata used to locate container OS image ids + is located + container-image-stream: + type: string + description: The simplestreams stream used to identify which image ids to search + when starting a container. + container-inherit-properties: + type: string + description: List of properties to be copied from the host machine to new containers + created in this model (comma-separated) + container-networking-method: + type: string + description: Method of container networking setup - one of fan, provider, local + default-base: + type: string + description: The default base image to use for deploying charms, will act like --base + when deploying charms + default-space: + type: string + description: The default network space used for application endpoints in this model + development: + type: bool + description: Whether the model is in development mode + disable-network-management: + type: bool + description: Whether the provider should control networks (on MAAS models, set to + true for MAAS to control networks + disable-telemetry: + type: bool + description: Disable telemetry reporting of model information + egress-subnets: + type: string + description: Source address(es) for traffic originating from this model + enable-os-refresh-update: + type: bool + description: Whether newly provisioned instances should run their respective OS's + update capability. + enable-os-upgrade: + type: bool + description: Whether newly provisioned instances should run their respective OS's + upgrade capability. + extra-info: + type: string + description: Arbitrary user specified string data that is stored against the model. + fan-config: + type: string + description: Configuration for fan networking for this model + firewall-mode: + type: string + description: |- + The mode to use for network firewalling. + + 'instance' requests the use of an individual firewall per instance. + + 'global' uses a single firewall for all instances (access + for a network port is enabled to one instance if any instance requires + that port). + + 'none' requests that no firewalling should be performed + inside the model. It's useful for clouds without support for either + global or per instance security groups. + ftp-proxy: + type: string + description: The FTP proxy value to configure on instances, in the FTP_PROXY environment + variable + http-proxy: + type: string + description: The HTTP proxy value to configure on instances, in the HTTP_PROXY environment + variable + https-proxy: + type: string + description: The HTTPS proxy value to configure on instances, in the HTTPS_PROXY + environment variable + ignore-machine-addresses: + type: bool + description: Whether the machine worker should discover machine addresses on startup + image-metadata-defaults-disabled: + type: bool + description: Whether default simplestreams sources are used for image metadata. + image-metadata-url: + type: string + description: The URL at which the metadata used to locate OS image ids is located + image-stream: + type: string + description: The simplestreams stream used to identify which image ids to search + when starting an instance. + juju-ftp-proxy: + type: string + description: The FTP proxy value to pass to charms in the JUJU_CHARM_FTP_PROXY environment + variable + juju-http-proxy: + type: string + description: The HTTP proxy value to pass to charms in the JUJU_CHARM_HTTP_PROXY + environment variable + juju-https-proxy: + type: string + description: The HTTPS proxy value to pass to charms in the JUJU_CHARM_HTTPS_PROXY + environment variable + juju-no-proxy: + type: string + description: List of domain addresses not to be proxied (comma-separated), may contain + CIDRs. Passed to charms in the JUJU_CHARM_NO_PROXY environment variable + logforward-enabled: + type: bool + description: Whether syslog forwarding is enabled. + logging-config: + type: string + description: The configuration string to use when configuring Juju agent logging + (see http://godoc.org/github.com/juju/loggo#ParseConfigurationString for details) + logging-output: + type: string + description: 'The logging output destination: database and/or syslog. (default "")' + lxd-snap-channel: + type: string + description: The channel to use when installing LXD from a snap (cosmic and later) + max-action-results-age: + type: string + description: The maximum age for action entries before they are pruned, in human-readable + time format + max-action-results-size: + type: string + description: The maximum size for the action collection, in human-readable memory + format + max-status-history-age: + type: string + description: The maximum age for status history entries before they are pruned, + in human-readable time format + max-status-history-size: + type: string + description: The maximum size for the status history collection, in human-readable + memory format + mode: + type: string + description: |- + Mode is a comma-separated list which sets the + mode the model should run in. So far only one is implemented + - If 'requires-prompts' is present, clients will ask for confirmation before removing + potentially valuable resources. + (default "") + net-bond-reconfigure-delay: + type: int + description: The amount of time in seconds to sleep between ifdown and ifup when + bridging + no-proxy: + type: string + description: List of domain addresses not to be proxied (comma-separated) + num-container-provision-workers: + type: int + description: The number of container provisioning workers to use per machine + num-provision-workers: + type: int + description: The number of provisioning workers to use per model + provisioner-harvest-mode: + type: string + description: What to do with unknown machines (default destroyed) + proxy-ssh: + type: bool + description: Whether SSH commands should be proxied through the API server + resource-tags: + type: attrs + description: resource tags + saas-ingress-allow: + type: string + description: |- + Application-offer ingress allowlist is a comma-separated list of + CIDRs specifying what ingress can be applied to offers in this model. + secret-backend: + type: string + description: The name of the secret store backend. (default "auto") + snap-http-proxy: + type: string + description: The HTTP proxy value for installing snaps + snap-https-proxy: + type: string + description: The HTTPS proxy value for installing snaps + snap-store-assertions: + type: string + description: The assertions for the defined snap store proxy + snap-store-proxy: + type: string + description: The snap store proxy for installing snaps + snap-store-proxy-url: + type: string + description: The URL for the defined snap store proxy + ssh-allow: + type: string + description: |- + SSH allowlist is a comma-separated list of CIDRs from + which machines in this model will accept connections to the SSH service. + Currently only the aws & openstack providers support ssh-allow + ssl-hostname-verification: + type: bool + description: Whether SSL hostname verification is enabled (default true) + storage-default-block-source: + type: string + description: The default block storage source for the model + storage-default-filesystem-source: + type: string + description: The default filesystem storage source for the model + syslog-ca-cert: + type: string + description: The certificate of the CA that signed the syslog server certificate, + in PEM format. + syslog-client-cert: + type: string + description: The syslog client certificate in PEM format. + syslog-client-key: + type: string + description: The syslog client key in PEM format. + syslog-host: + type: string + description: The hostname:port of the syslog server. + test-mode: + type: bool + description: |- + Whether the model is intended for testing. + If true, accessing the charm store does not affect statistical + data of the store. (default false) + transmit-vendor-metrics: + type: bool + description: Determines whether metrics declared by charms deployed into this model + are sent for anonymized aggregate analytics + update-status-hook-interval: + type: string + description: How often to run the charm update-status hook, in human-readable time + format (default 5m, range 1-60m) + + + + +--- + +------------------------- + diff --git a/tmp/t/10097.md b/tmp/t/10097.md new file mode 100644 index 000000000..1f1fab63a --- /dev/null +++ b/tmp/t/10097.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:52:06 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [show-offer](/t/10168) + +## Summary +Find offered application endpoints. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `--interface` | | return results matching the interface name | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--offer` | | return results matching the offer name | +| `--url` | | return results matching the offer URL | + +## Examples + + juju find-offers + juju find-offers mycontroller: + juju find-offers fred/prod + juju find-offers --interface mysql + juju find-offers --url fred/prod.db2 + juju find-offers --offer db2 + + + +## Details + +Find which offered application endpoints are available to the current user. + +This command is aimed for a user who wants to discover what endpoints are available to them. + + +--- + +------------------------- + diff --git a/tmp/t/10098.md b/tmp/t/10098.md new file mode 100644 index 000000000..4efa3b0e6 --- /dev/null +++ b/tmp/t/10098.md @@ -0,0 +1,44 @@ +system | 2024-09-16 15:51:41 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-k8s](/t/10049) + +## Summary +Removes a k8s cloud from Juju. + +## Usage +```juju remove-k8s [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | + +## Examples + + juju remove-k8s myk8scloud + juju remove-k8s myk8scloud --client + juju remove-k8s --controller mycontroller myk8scloud + + +## Details + +Removes the specified k8s cloud from this client. + +If --controller is used, also removes the cloud +from the specified controller (if it is not in use). + +Use --client option to update your current client. + + + +--- + +------------------------- + diff --git a/tmp/t/10099.md b/tmp/t/10099.md new file mode 100644 index 000000000..c543f42ae --- /dev/null +++ b/tmp/t/10099.md @@ -0,0 +1,50 @@ +system | 2024-09-16 15:53:10 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [resources](/t/10218), [attach-resource](/t/10124) + +## Summary +Display the resources for a charm in a repository. + +## Usage +```juju charm-resources [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--channel` | stable | the channel of the charm | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + +Display charm resources for the postgresql charm: + + juju charm-resources postgresql + +Display charm resources for mycharm in the 2.0/edge channel: + + juju charm-resources mycharm --channel 2.0/edge + + + +## Details + +This command will report the resources and the current revision of each +resource for a charm in a repository. + +Channel can be specified with --channel. If not provided, stable is used. + +Where a channel is not supplied, stable is used. + + +--- + +------------------------- + diff --git a/tmp/t/10100.md b/tmp/t/10100.md new file mode 100644 index 000000000..ba4872166 --- /dev/null +++ b/tmp/t/10100.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:57:02 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-space](/t/10117), [reload-spaces](/t/10063) +**Alias:** spaces + +## Summary +List known spaces, including associated subnets. + +## Usage +```juju list-spaces [options] [--short] [--format yaml|json] [--output ]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--short` | false | only display spaces. | + +## Examples + +List spaces and their subnets: + + juju spaces + +List spaces: + + juju spaces --short + + +## Details +Displays all defined spaces. By default both spaces and their subnets are displayed. +Supplying the --short option will list just the space names. +The --output argument allows the command's output to be redirected to a file. + +--- + +------------------------- + diff --git a/tmp/t/10101.md b/tmp/t/10101.md new file mode 100644 index 000000000..254867c6f --- /dev/null +++ b/tmp/t/10101.md @@ -0,0 +1,57 @@ +system | 2024-09-16 15:53:26 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Generate the documentation for all commands + +## Usage +```juju documentation [options] --out --no-index --split --url --discourse-ids ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--discourse-ids` | | File containing a mapping of commands and their discourse ids | +| `--no-index` | false | Do not generate the commands index | +| `--out` | | Documentation output folder if not set the result is displayed using the standard output | +| `--split` | false | Generate a separate Markdown file for each command | +| `--url` | | Documentation host URL | + +## Examples + + juju documentation + juju documentation --split + juju documentation --split --no-index --out /tmp/docs + +To render markdown documentation using a list of existing +commands, you can use a file with the following syntax + + command1: id1 + command2: id2 + commandN: idN + +For example: + + add-cloud: 1183 + add-secret: 1284 + remove-cloud: 4344 + +Then, the urls will be populated using the ids indicated +in the file above. + + juju documentation --split --no-index --out /tmp/docs --discourse-ids /tmp/docs/myids + + +## Details + +This command generates a markdown formatted document with all the commands, their descriptions, arguments, and examples. + + +--- + +------------------------- + diff --git a/tmp/t/10102.md b/tmp/t/10102.md new file mode 100644 index 000000000..989571d22 --- /dev/null +++ b/tmp/t/10102.md @@ -0,0 +1,47 @@ +system | 2024-09-16 15:56:34 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [controllers](/t/10152), [models](/t/10090), [show-controller](/t/10156) + +## Summary +Selects or identifies the current controller and model. + +## Usage +```juju switch [options] [||:|:|:]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | + +## Examples + + juju switch + juju switch mymodel + juju switch mycontroller + juju switch mycontroller:mymodel + juju switch mycontroller: + juju switch :mymodel + + +## Details +When used without an argument, the command shows the current controller +and its active model. +When a single argument without a colon is provided juju first looks for a +controller by that name and switches to it, and if it's not found it tries +to switch to a model within current controller. mycontroller: switches to +default model in mycontroller, :mymodel switches to mymodel in current +controller and mycontroller:mymodel switches to mymodel on mycontroller. +The `juju models` command can be used to determine the active model +(of any controller). An asterisk denotes it. + + + +--- + +------------------------- + diff --git a/tmp/t/10103.md b/tmp/t/10103.md new file mode 100644 index 000000000..d4d8a300e --- /dev/null +++ b/tmp/t/10103.md @@ -0,0 +1,48 @@ +system | 2024-09-16 15:52:36 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [find](/t/10187), [download](/t/10134) + +## Summary +Displays detailed information about CharmHub charms. + +## Usage +```juju info [options] [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--arch` | all | specify an arch <all|amd64|arm64|ppc64el|riscv64|s390x> | +| `--base` | | specify a base | +| `--channel` | | specify a channel to use instead of the default release | +| `--charmhub-url` | https://api.charmhub.io | specify the Charmhub URL for querying the store | +| `--config` | false | display config for this charm | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--series` | all | specify a series. DEPRECATED use --base | +| `--unicode` | auto | display output using unicode <auto|never|always> | + +## Examples + + juju info postgresql + + +## Details + +The charm can be specified by name or by path. + +Channels displayed are supported by any base. +To see channels supported for only a specific base, use the --base flag. +--base can be specified using the OS name and the version of the OS, +separated by @. For example, --base ubuntu@22.04. + + + +--- + +------------------------- + diff --git a/tmp/t/10104.md b/tmp/t/10104.md new file mode 100644 index 000000000..fd8321748 --- /dev/null +++ b/tmp/t/10104.md @@ -0,0 +1,46 @@ +system | 2024-09-16 15:53:46 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [grant-cloud](/t/10164) + +## Summary +Revokes access from a Juju user for a cloud. + +## Usage +```juju revoke-cloud [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Examples + +Revoke 'add-model' (and 'admin') access from user 'joe' for cloud 'fluffy': + + juju revoke-cloud joe add-model fluffy + +Revoke 'admin' access from user 'sam' for clouds 'fluffy' and 'rainy': + + juju revoke-cloud sam admin fluffy rainy + + + +## Details +Revoking admin access, from a user who has that permission, will leave +that user with add-model access. Revoking add-model access, however, also revokes +admin access. + +Valid access levels are: + admin + add-model + +--- + +------------------------- + diff --git a/tmp/t/10105.md b/tmp/t/10105.md new file mode 100644 index 000000000..f9257092a --- /dev/null +++ b/tmp/t/10105.md @@ -0,0 +1,54 @@ +system | 2024-09-16 15:58:02 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [credentials](/t/10054), [add-credential](/t/10136), [update-credential](/t/10065), [remove-credential](/t/10201), [autoload-credentials](/t/10230) + +## Summary +Shows credential information stored either on this client or on a controller. + +## Usage +```juju show-credential [options] [ ]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | yaml | Specify output format (yaml) | +| `-o`, `--output` | | Specify an output file | +| `--show-secrets` | false | Display credential secret attributes | + +## Examples + + juju show-credential google my-admin-credential + juju show-credentials + juju show-credentials --controller mycontroller --client + juju show-credentials --controller mycontroller + juju show-credentials --client + juju show-credentials --show-secrets + + +## Details + +This command displays information about cloud credential(s) stored +either on this client or on a controller for this user. + +To see the contents of a specific credential, supply its cloud and name. +To see all credentials stored for you, supply no arguments. + +To see secrets, content attributes marked as hidden, use --show-secrets option. + +To see credentials from this client, use "--client" option. + +To see credentials from a controller, use "--controller" option. + + +--- + +------------------------- + diff --git a/tmp/t/10106.md b/tmp/t/10106.md new file mode 100644 index 000000000..3f1cd9ec1 --- /dev/null +++ b/tmp/t/10106.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:52:52 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [upgrade-controller](/t/10058) + +## Summary +Copy agent binaries from the official agent store into a local controller. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--agent-version` | | Copy a specific major[.minor] version | +| `--dry-run` | false | Don't copy, just print what would be copied | +| `--local-dir` | | Local destination directory | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--public` | false | Tools are for a public cloud, so generate mirrors information | +| `--source` | | Local source directory | +| `--stream` | | Simplestreams stream for which to sync metadata | + +## Examples + + juju sync-agent-binary --debug --agent-version 2.0 + juju sync-agent-binary --debug --agent-version 2.0 --local-dir=/home/ubuntu/sync-agent-binary + + +## Details + +This copies the Juju agent software from the official agent binaries store +(located at https://streams.canonical.com/juju) into the controller. +It is generally done when the controller is without Internet access. + +Instead of the above site, a local directory can be specified as source. +The online store will, of course, need to be contacted at some point to get +the software. + + +--- + +------------------------- + diff --git a/tmp/t/10107.md b/tmp/t/10107.md new file mode 100644 index 000000000..d17959204 --- /dev/null +++ b/tmp/t/10107.md @@ -0,0 +1,43 @@ +system | 2024-09-16 15:53:44 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-model](/t/10145) +**Alias:** models + +## Summary +Lists models a user can access on a controller. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | Lists all models, regardless of user accessibility (administrative users only) | +| `-c`, `--controller` | | Controller to operate in | +| `--exact-time` | false | Use full timestamps | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--user` | | The user to list models for (administrative users only) | +| `--uuid` | false | Display UUID for models | + +## Examples + + juju models + juju models --user bob + + +## Details + +The models listed here are either models you have created yourself, or +models which have been shared with you. Default values for user and +controller are, respectively, the current user and the current controller. +The active model is denoted by an asterisk. + + +--- + +------------------------- + diff --git a/tmp/t/10108.md b/tmp/t/10108.md new file mode 100644 index 000000000..3845b4948 --- /dev/null +++ b/tmp/t/10108.md @@ -0,0 +1,36 @@ +system | 2024-09-16 15:56:38 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-secret](/t/11144), [remove-secret](/t/11414), [show-secret](/t/10172), [update-secret](/t/11413) +**Alias:** secrets + +## Summary +Lists secrets available in the model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--owner` | | Include secrets for the specified owner | + +## Examples + + juju secrets + juju secrets --format yaml + + +## Details + +Displays the secrets available for charms to use if granted access. + + +--- + +------------------------- + diff --git a/tmp/t/10109.md b/tmp/t/10109.md new file mode 100644 index 000000000..bfb1b6ef1 --- /dev/null +++ b/tmp/t/10109.md @@ -0,0 +1,92 @@ +system | 2024-09-16 15:52:27 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [unexpose](/t/10221) + +## Summary +Makes an application publicly available over the network. + +## Usage +```juju expose [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--endpoints` | | Expose only the ports that charms have opened for this comma-delimited list of endpoints | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--to-cidrs` | | A comma-delimited list of CIDRs that should be able to access the application ports once exposed | +| `--to-spaces` | | A comma-delimited list of spaces that should be able to access the application ports once exposed | + +## Examples + +To expose an application: + + juju expose apache2 + +To expose an application to one or multiple spaces: + + juju expose apache2 --to-spaces public + +To expose an application to one or multiple endpoints: + + juju expose apache2 --endpoints logs + +To expose an application to one or multiple CIDRs: + + juju expose apache2 --to-cidrs 10.0.0.0/24 + + +## Details +Adjusts the firewall rules and any relevant security mechanisms of the +cloud to allow public access to the application. + +If no additional options are specified, the command will, by default, allow +access from 0.0.0.0/0 to all ports opened by the application. For example, to +expose all ports opened by apache2, you can run: + +juju expose apache2 + +The --endpoints option may be used to restrict the effect of this command to +the list of ports opened for a comma-delimited list of endpoints. For instance, +to only expose the ports opened by apache2 for the "www" endpoint, you can run: + +juju expose apache2 --endpoints www + +To make the selected set of ports accessible by specific CIDRs, the --to-cidrs +option may be used with a comma-delimited list of CIDR values. For example: + +juju expose apache2 --to-cidrs 10.0.0.0/24,192.168.1.0/24 + +To make the selected set of ports accessible by specific spaces, the --to-spaces +option may be used with a comma-delimited list of space names. For example: + +juju expose apache2 --to-spaces public + +All of the above options can be combined together. In addition, multiple "juju +expose" invocations can be used to specify granular expose rules for different +endpoints. For example, to allow access to all opened apache ports from +0.0.0.0/0 but restrict access to any port opened for the "logs" endpoint to +CIDR 10.0.0.0/24 you can run: + +juju expose apache2 +juju expose apache2 --endpoints logs --to-cidrs 10.0.0.0/24 + +Each "juju expose" invocation always overwrites any previous expose rule for +the same endpoint name. For example, running the following commands instruct +juju to only allow access to ports opened for the "logs" endpoint from CIDR +192.168.0.0/24. + +juju expose apache2 --endpoints logs --to-cidrs 10.0.0.0/24 +juju expose apache2 --endpoints logs --to-cidrs 192.168.0.0/24 + + + +--- + +------------------------- + diff --git a/tmp/t/10110.md b/tmp/t/10110.md new file mode 100644 index 000000000..faa9b1e3a --- /dev/null +++ b/tmp/t/10110.md @@ -0,0 +1,64 @@ +system | 2024-09-16 15:54:44 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [integrate](/t/10207), [remove-application](/t/10067) + +## Summary +Removes an existing relation between two applications. + +## Usage +```juju remove-relation [options] [:] [:] | ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--force` | false | Force remove a relation | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju remove-relation mysql wordpress + juju remove-relation 4 + juju remove-relation 4 --force + +In the case of multiple relations, the relation name should be specified +at least once - the following examples will all have the same effect: + + juju remove-relation mediawiki:db mariadb:db + juju remove-relation mediawiki mariadb:db + juju remove-relation mediawiki:db mariadb + + +## Details +An existing relation between the two specified applications will be removed. +This should not result in either of the applications entering an error state, +but may result in either or both of the applications being unable to continue +normal operation. In the case that there is more than one relation between +two applications it is necessary to specify which is to be removed (see +examples). Relations will automatically be removed when using the`juju +remove-application` command. + +The relation is specified using the relation endpoint names, eg + mysql wordpress, or + mediawiki:db mariadb:db + +It is also possible to specify the relation ID, if known. This is useful to +terminate a relation originating from a different model, where only the ID is known. + +Sometimes, the removal of the relation may fail as Juju encounters errors +and failures that need to be dealt with before a relation can be removed. +However, at times, there is a need to remove a relation ignoring +all operational errors. In these rare cases, use --force option but note +that --force will remove a relation without giving it the opportunity to be removed cleanly. + + + +--- + +------------------------- + diff --git a/tmp/t/10111.md b/tmp/t/10111.md new file mode 100644 index 000000000..df222e203 --- /dev/null +++ b/tmp/t/10111.md @@ -0,0 +1,110 @@ +system | 2024-09-16 15:56:51 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [disable-command](/t/10205), [disabled-commands](/t/10220) + +## Summary +Enable commands that had been previously disabled. + +## Usage +```juju enable-command [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +To allow the model to be destroyed: + + juju enable-command destroy-model + +To allow the machines, applications, units and relations to be removed: + + juju enable-command remove-object + +To allow changes to the model: + + juju enable-command all + + +## Details + +Juju allows to safeguard deployed models from unintentional damage by preventing +execution of operations that could alter model. + +This is done by disabling certain sets of commands from successful execution. +Disabled commands must be manually enabled to proceed. + +Some commands offer a --force option that can be used to bypass a block. + +Commands that can be disabled are grouped based on logical operations as follows: + +"destroy-model" prevents: + destroy-controller + destroy-model + +"remove-object" prevents: + destroy-controller + destroy-model + detach-storage + remove-application + remove-machine + remove-relation + remove-saas + remove-storage + remove-unit + +"all" prevents: + add-machine + integrate + add-unit + add-ssh-key + add-user + attach-resource + attach-storage + change-user-password + config + consume + deploy + destroy-controller + destroy-model + disable-user + enable-ha + enable-user + expose + import-filesystem + import-ssh-key + model-defaults + model-config + reload-spaces + remove-application + remove-machine + remove-relation + remove-ssh-key + remove-unit + remove-user + resolved + retry-provisioning + run + scale-application + set-application-base + set-credential + set-constraints + sync-agents + unexpose + refresh + upgrade-model + + + +--- + +------------------------- + diff --git a/tmp/t/10112.md b/tmp/t/10112.md new file mode 100644 index 000000000..dbc8e14b2 --- /dev/null +++ b/tmp/t/10112.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:56:32 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-cloud](/t/10162), [clouds](/t/10182), [show-cloud](/t/10215), [update-cloud](/t/10081), [update-public-clouds](/t/10115) + +## Summary +Lists regions for a given cloud. + +## Usage +```juju regions [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju regions aws + juju regions aws --controller mycontroller + juju regions aws --client + juju regions aws --client --controller mycontroller + + +## Details + +List regions for a given cloud. + +Use --controller option to list regions from the cloud from a controller. + +Use --client option to list regions known locally on this client. + + +--- + +------------------------- + diff --git a/tmp/t/10113.md b/tmp/t/10113.md new file mode 100644 index 000000000..90d3c9ce3 --- /dev/null +++ b/tmp/t/10113.md @@ -0,0 +1,87 @@ +system | 2024-09-16 15:54:53 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [kill-controller](/t/10233), [unregister](/t/10165) + +## Summary +Destroys a controller. + +## Usage +```juju destroy-controller [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--destroy-all-models` | false | Destroy all models in the controller | +| `--destroy-storage` | false | Destroy all storage instances managed by the controller | +| `--force` | false | Force destroy models ignoring any errors | +| `--model-timeout` | -1s | Timeout for each step of force model destruction | +| `--no-prompt` | false | Do not ask for confirmation | +| `--no-wait` | false | Rush through model destruction without waiting for each individual step to complete | +| `--release-storage` | false | Release all storage instances from management of the controller, without destroying them | + +## Examples + +Destroy the controller and all models. If there is +persistent storage remaining in any of the models, then +this will prompt you to choose to either destroy or release +the storage. + + juju destroy-controller --destroy-all-models mycontroller + +Destroy the controller and all models, destroying +any remaining persistent storage. + + juju destroy-controller --destroy-all-models --destroy-storage + +Destroy the controller and all models, releasing +any remaining persistent storage from Juju's control. + + juju destroy-controller --destroy-all-models --release-storage + +Destroy the controller and all models, continuing +even if there are operational errors. + + juju destroy-controller --destroy-all-models --force + juju destroy-controller --destroy-all-models --force --no-wait + + +## Details +All workload models running on the controller will first +need to be destroyed, either in advance, or by +specifying `--destroy-all-models`. + +If there is persistent storage in any of the models managed by the +controller, then you must choose to either destroy or release the +storage, using `--destroy-storage` or `--release-storage` respectively. + +Sometimes, the destruction of a model may fail as Juju encounters errors +that need to be dealt with before that model can be destroyed. +However, at times, there is a need to destroy a controller ignoring +such model errors. In these rare cases, use --force option but note +that --force will also remove all units of any hosted applications, their subordinates +and, potentially, machines without given them the opportunity to shutdown cleanly. + +Model destruction is a multi-step process. Under normal circumstances, Juju will not +proceed to the next step until the current step has finished. +However, when using --force, users can also specify --no-wait to progress through steps +without delay waiting for each step to complete. + +WARNING: Passing --force with --model-timeout will continue the final destruction without +consideration or respect for clean shutdown or resource cleanup. If model-timeout +elapses with --force, you may have resources left behind that will require +manual cleanup. If --force --model-timeout 0 is passed, the models are brutally +removed with haste. It is recommended to use graceful destroy (without --force, --no-wait or +--model-timeout). + + + +--- + +------------------------- + diff --git a/tmp/t/10114.md b/tmp/t/10114.md new file mode 100644 index 000000000..b03aa7d6c --- /dev/null +++ b/tmp/t/10114.md @@ -0,0 +1,42 @@ +system | 2024-09-16 15:51:29 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [set-firewall-rule](/t/10151) +**Alias:** firewall-rules + +## Summary +Prints the firewall rules. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju firewall-rules + + + +## Details + +Lists the firewall rules which control ingress to well known services +within a Juju model. + +DEPRECATION WARNING: Firewall rules have been moved to model-config settings "ssh-allow" and +"saas-ingress-allow". This command is deprecated in favour of +reading/writing directly to these settings. + + + + +--- + +------------------------- + diff --git a/tmp/t/10115.md b/tmp/t/10115.md new file mode 100644 index 000000000..978debdbd --- /dev/null +++ b/tmp/t/10115.md @@ -0,0 +1,42 @@ +system | 2024-09-16 15:52:11 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [clouds](/t/10182) + +## Summary +Updates public cloud information available to Juju. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | + +## Examples + + juju update-public-clouds + juju update-public-clouds --client + juju update-public-clouds --controller mycontroller + + +## Details + +If any new information for public clouds (such as regions and connection +endpoints) are available this command will update Juju accordingly. It is +suggested to run this command periodically. + +Use --controller option to update public cloud(s) on a controller. The command +will only update the clouds that a controller knows about. + +Use --client to update a definition of public cloud(s) on this client. + + +--- + +------------------------- + diff --git a/tmp/t/10116.md b/tmp/t/10116.md new file mode 100644 index 000000000..98cef5f43 --- /dev/null +++ b/tmp/t/10116.md @@ -0,0 +1,115 @@ +system | 2024-09-16 15:53:01 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [status](/t/10173), [ssh](/t/10153) + +## Summary +Displays log messages for a model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--color` | false | Force use of ANSI color codes | +| `--date` | false | Show dates as well as times | +| `--exclude-label` | | Do not show log messages for these logging labels | +| `--exclude-module` | | Do not show log messages for these logging modules | +| `-i`, `--include` | | Only show log messages for these entities | +| `--include-label` | | Only show log messages for these logging labels | +| `--include-module` | | Only show log messages for these logging modules | +| `-l`, `--level` | | Log level to show, one of [TRACE, DEBUG, INFO, WARNING, ERROR] | +| `--limit` | 0 | Exit once this many of the most recent (possibly filtered) lines are shown | +| `--location` | false | Show filename and line numbers | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--ms` | false | Show times to millisecond precision | +| `-n`, `--lines` | 10 | Show this many of the most recent (possibly filtered) lines, and continue to append | +| `--no-tail` | false | Stop after returning existing log messages | +| `--replay` | false | Show the entire (possibly filtered) log and continue to append | +| `--retry` | false | Retry connection on failure | +| `--retry-delay` | 1s | Retry delay between connection failure retries | +| `--tail` | false | Wait for new logs | +| `--utc` | false | Show times in UTC | +| `-x`, `--exclude` | | Do not show log messages for these entities | + +## Examples + +Exclude all machine 0 messages; show a maximum of 100 lines; and continue to +append filtered messages: + + juju debug-log --exclude machine-0 --lines 100 + +Include only messages from the mysql/0 unit; show a maximum of 50 lines; and then +exit: + + juju debug-log --include mysql/0 --limit 50 + +Show all messages from the apache/2 unit or machine 1 and then exit: + + juju debug-log --replay --include apache/2 --include machine-1 --no-tail + +Show all juju.worker.uniter logging module messages that are also unit +wordpress/0 messages, and then show any new log messages which match the +filter and append: + + juju debug-log --replay + --include-module juju.worker.uniter \ + --include wordpress/0 + +Show all messages from the juju.worker.uniter module, except those sent from +machine-3 or machine-4, and then stop: + + juju debug-log --replay --no-tail + --include-module juju.worker.uniter \ + --exclude machine-3 \ + --exclude machine-4 + +To see all WARNING and ERROR messages and then continue showing any +new WARNING and ERROR messages as they are logged: + + juju debug-log --replay --level WARNING + + +## Details + +This command provides access to all logged Juju activity on a per-model +basis. By default, the logs for the currently select model are shown. + +Each log line is emitted in this format: + + <entity> <timestamp> <log-level> <module>:<line-no> <message> + +The "entity" is the source of the message: a machine or unit. The names for +machines and units can be seen in the output of `juju status`. + +The '--include' and '--exclude' options filter by entity. The entity can be +a machine, unit, or application for vm models, but can be application only +for k8s models. These filters support wildcards `*` if filtering on the +entity full name (prefixed by `-`) + +The '--include-module' and '--exclude-module' options filter by (dotted) +logging module name. The module name can be truncated such that all loggers +with the prefix will match. + +The '--include-label' and '--exclude-label' options filter by logging label. + +The filtering options combine as follows: +* All --include options are logically ORed together. +* All --exclude options are logically ORed together. +* All --include-module options are logically ORed together. +* All --exclude-module options are logically ORed together. +* All --include-label options are logically ORed together. +* All --exclude-label options are logically ORed together. +* The combined --include, --exclude, --include-module, --exclude-module, + --include-label and --exclude-label selections are logically ANDed to form + the complete filter. + + + +--- + +------------------------- + diff --git a/tmp/t/10117.md b/tmp/t/10117.md new file mode 100644 index 000000000..e8e2abc04 --- /dev/null +++ b/tmp/t/10117.md @@ -0,0 +1,37 @@ +system | 2024-09-16 15:58:06 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [spaces](/t/10236), [remove-space](/t/10084) + +## Summary +Add a new network space. + +## Usage +```juju add-space [options] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + +Add space "beta" with subnet 172.31.0.0/20: + + juju add-space beta 172.31.0.0/20 + + +## Details +Adds a new space with the given name and associates the given +(optional) list of existing subnet CIDRs with it. + +--- + +------------------------- + diff --git a/tmp/t/10118.md b/tmp/t/10118.md new file mode 100644 index 000000000..ef14b4640 --- /dev/null +++ b/tmp/t/10118.md @@ -0,0 +1,51 @@ +system | 2024-09-16 15:55:18 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-user](/t/10193), [register](/t/10160) + +## Summary +Changes the password for the current or specified Juju user. + +## Usage +```juju change-user-password [options] [username]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-c`, `--controller` | | Controller to operate in | +| `--no-prompt` | false | don't prompt for password and just read a line from stdin | +| `--reset` | false | Reset user password | + +## Examples + + juju change-user-password + juju change-user-password bob + juju change-user-password bob --reset + juju change-user-password -c another-known-controller + juju change-user-password bob --controller another-known-controller + + +## Details + +The user is, by default, the current user. The latter can be confirmed with +the `juju show-user` command. + +If no controller is specified, the current controller will be used. + +A controller administrator can change the password for another user +by providing desired username as an argument. + +A controller administrator can also reset the password with a --reset option. +This will invalidate any passwords that were previously set +and registration strings that were previously issued for a user. +This option will issue a new registration string to be used with +`juju register`. + +--- + +------------------------- + diff --git a/tmp/t/10119.md b/tmp/t/10119.md new file mode 100644 index 000000000..41da07921 --- /dev/null +++ b/tmp/t/10119.md @@ -0,0 +1,42 @@ +system | 2024-09-16 15:54:01 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [ssh-keys](/t/10202), [add-ssh-key](/t/10238), [import-ssh-key](/t/10167) + +## Summary +Removes a public SSH key (or keys) from a model. + +## Usage +```juju remove-ssh-key [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju remove-ssh-key ubuntu@ubuntu + juju remove-ssh-key 45:7f:33:2c:10:4e:6c:14:e3:a1:a4:c8:b2:e1:34:b4 + juju remove-ssh-key bob@ubuntu carol@ubuntu + + +## Details +Juju maintains a per-model cache of public SSH keys which it copies to +each unit. This command will remove a specified key (or space separated +list of keys) from the model cache and all current units deployed in that +model. The keys to be removed may be specified by the key's fingerprint, +or by the text label associated with them. Invalid keys in the model cache +can be removed by specifying the key verbatim. + + + +--- + +------------------------- + diff --git a/tmp/t/10120.md b/tmp/t/10120.md new file mode 100644 index 000000000..f23036ca8 --- /dev/null +++ b/tmp/t/10120.md @@ -0,0 +1,43 @@ +system | 2024-09-16 15:53:52 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Display status information about known payloads. + +## Usage +```juju payloads [options] [pattern ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Details + +This command will report on the runtime state of defined payloads. + +When one or more pattern is given, Juju will limit the results to only +those payloads which match *any* of the provided patterns. Each pattern +will be checked against the following info in Juju: + +- unit name +- machine id +- payload type +- payload class +- payload id +- payload tag +- payload status + + +--- + +------------------------- + diff --git a/tmp/t/10121.md b/tmp/t/10121.md new file mode 100644 index 000000000..08a5a5684 --- /dev/null +++ b/tmp/t/10121.md @@ -0,0 +1,68 @@ +system | 2024-09-16 15:55:09 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [login](/t/10157), [controllers](/t/10152), [status](/t/10173) + +## Summary +Migrate a workload model to another controller. + +## Usage +```juju migrate [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | + +## Details + +The 'migrate' command begins the migration of a workload model from +its current controller to a new controller. This is useful for load +balancing when a controller is too busy, or as a way to upgrade a +model's controller to a newer Juju version. + +In order to start a migration, the target controller must be in the +juju client's local configuration cache. See the 'login' command +for details of how to do this. + +The 'migrate' command only starts a model migration - it does not wait +for its completion. The progress of a migration can be tracked using +the 'status' command and by consulting the logs. + +Once the migration is complete, the model's machine and unit agents +will be connected to the new controller. The model will no longer be +available at the source controller. + +If the migration fails for some reason, the model is returned to its +original state where it is managed by the original +controller. + + + +--- + +------------------------- + +jose | 2024-06-25 02:31:00 UTC | #2 + +Incorrect conjugation of the verb to be: + +I would change this: + +> If the migration fails for some reason, the model be returned to its original state with the model being managed by the original controller. + +for this: +> If the migration fails for some reason, the model is returned to its original state with the model being managed by the original controller. + +------------------------- + +tmihoc | 2024-06-25 12:26:35 UTC | #3 + +Fix [in progress](https://github.com/juju/juju/pull/17584) -- thanks! + +------------------------- + diff --git a/tmp/t/10122.md b/tmp/t/10122.md new file mode 100644 index 000000000..4a1ec5d70 --- /dev/null +++ b/tmp/t/10122.md @@ -0,0 +1,70 @@ +system | 2024-09-16 15:52:16 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Wait for an entity to reach a specified state. + +## Usage +```juju wait-for [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--description` | false | Show short description of plugin, if any | +| `-h`, `--help` | false | Show help on a command or other topic. | + +## Examples + +Waits for the mysql/0 unit to be created and active. + + juju wait-for unit mysql/0 + +Waits for the mysql application to be active or idle. + + juju wait-for application mysql --query='name=="mysql" && (status=="active" || status=="idle")' + +Waits for the model units to all start with ubuntu. + + juju wait-for model default --query='forEach(units, unit => startsWith(unit.name, "ubuntu"))' + + +## Details +The wait-for set of commands (model, application, machine and unit) defines +a way to wait for a goal state to be reached. The goal state can be defined +programmatically using the query DSL (domain specific language). + +The wait-for command is an optimized alternative to the status command for +determining programmatically if a goal state has been reached. The wait-for +command streams delta changes from the underlying database, unlike the status +command which performs a full query of the database. + +The query DSL is a simple language that can be comprised of expressions to +produce a boolean result. The result of the query is used to determine if the +goal state has been reached. The query DSL is evaluated against the scope of +the command. + +Built-in functions are provided to help define the goal state. The built-in +functions are defined in the query package. Examples of built-in functions +include len, print, forEach (lambda), startsWith and endsWith. + +See also: + wait-for model + wait-for application + wait-for machine + wait-for unit + +## Subcommands +- [application](/t/11181) +- [machine](/t/11183) +- [model](/t/11182) +- [unit](/t/11184) + +--- + +------------------------- + diff --git a/tmp/t/10123.md b/tmp/t/10123.md new file mode 100644 index 000000000..5360aca50 --- /dev/null +++ b/tmp/t/10123.md @@ -0,0 +1,38 @@ +system | 2024-09-16 15:57:12 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [integrate](/t/10207), [offers](/t/10051), [remove-relation](/t/10110), [suspend-relation](/t/10179) + +## Summary +Resumes a suspended relation to an application offer. + +## Usage +```juju resume-relation [options] [,]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju resume-relation 123 + juju resume-relation 123 456 + + +## Details + +A relation between an application in another model and an offer in this model will be resumed. +The relation-joined and relation-changed hooks will be run for the relation, and the relation +status will be set to joined. The relation is specified using its id. + + +--- + +------------------------- + diff --git a/tmp/t/10124.md b/tmp/t/10124.md new file mode 100644 index 000000000..39d48db8f --- /dev/null +++ b/tmp/t/10124.md @@ -0,0 +1,59 @@ +system | 2024-09-16 15:52:09 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [resources](/t/10218), [charm-resources](/t/10099) + +## Summary +Update a resource for an application. + +## Usage +```juju attach-resource [options] application =``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Details + +This command updates a resource for an application. + +The format is + + = + +where the resource name is the name from the metadata.yaml file of the charm +and where, depending on the type of the resource, the resource can be specified +as follows: + +(1) If the resource is type 'file', you can specify it by providing +(a) the resource revision number or +(b) a path to a local file. + +(2) If the resource is type 'oci-image', you can specify it by providing +(a) the resource revision number, +(b) a path to a local file = private OCI image, +(c) a link to a public OCI image. + + +Note: If you choose (1b) or (2b-c), i.e., a resource that is not from Charmhub: +You will not be able to go back to using a resource from Charmhub. + +Note: If you choose (1b) or (2b): This uploads a file from your loal disk to the juju +controller to be streamed to the charm when "resource-get" is called by a hook. + +Note: If you choose (2b): You will need to specify: +(i) the local path to the private OCI image as well as +(ii) the username/password required to access the private OCI image. + + + +--- + +------------------------- + diff --git a/tmp/t/10125.md b/tmp/t/10125.md new file mode 100644 index 000000000..ec03e71a5 --- /dev/null +++ b/tmp/t/10125.md @@ -0,0 +1,84 @@ +system | 2024-09-16 15:52:33 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [remove-application](/t/10067), [scale-application](/t/10171) + +## Summary +Remove application units from the model. + +## Usage +```juju remove-unit [options] [...] | ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--destroy-storage` | false | Destroy storage attached to the unit | +| `--dry-run` | false | Print what this command would remove without removing | +| `--force` | false | Completely remove an unit and all its dependencies | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-prompt` | false | Do not ask for confirmation. Overrides `mode` model config setting | +| `--no-wait` | false | Rush through unit removal without waiting for each individual step to complete | +| `--num-units` | 0 | Number of units to remove (k8s models only) | + +## Examples + + juju remove-unit wordpress/2 wordpress/3 wordpress/4 + + juju remove-unit wordpress/2 --destroy-storage + + juju remove-unit wordpress/2 --force + + juju remove-unit wordpress/2 --force --no-wait + + +## Details + +Remove application units from the model. + +The usage of this command differs depending on whether it is being used on a +k8s or cloud model. + +Removing all units of a application is not equivalent to removing the +application itself; for that, the `juju remove-application` command +is used. + +For k8s models only a single application can be supplied and only the +--num-units argument supported. +Specific units cannot be targeted for removal as that is handled by k8s, +instead the total number of units to be removed is specified. + +Examples: + juju remove-unit wordpress --num-units 2 + +For cloud models specific units can be targeted for removal. +Units of a application are numbered in sequence upon creation. For example, the +fourth unit of wordpress will be designated "wordpress/3". These identifiers +can be supplied in a space delimited list to remove unwanted units from the +model. + +Juju will also remove the machine if the removed unit was the only unit left +on that machine (including units in containers). + +Sometimes, the removal of the unit may fail as Juju encounters errors +and failures that need to be dealt with before a unit can be removed. +For example, Juju will not remove a unit if there are hook failures. +However, at times, there is a need to remove a unit ignoring +all operational errors. In these rare cases, use --force option but note +that --force will remove a unit and, potentially, its machine without +given them the opportunity to shutdown cleanly. + +Unit removal is a multi-step process. Under normal circumstances, Juju will not +proceed to the next step until the current step has finished. +However, when using --force, users can also specify --no-wait to progress through steps +without delay waiting for each step to complete. + + +--- + +------------------------- + diff --git a/tmp/t/10126.md b/tmp/t/10126.md new file mode 100644 index 000000000..09ecc04d1 --- /dev/null +++ b/tmp/t/10126.md @@ -0,0 +1,36 @@ +system | 2024-09-16 15:52:38 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Attaches existing storage to a unit. + +## Usage +```juju attach-storage [options] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju attach-storage postgresql/1 pgdata/0 + + + +## Details + +Attach existing storage to a unit. Specify a unit +and one or more storage IDs to attach to it. + + +--- + +------------------------- + diff --git a/tmp/t/10128.md b/tmp/t/10128.md new file mode 100644 index 000000000..5720151df --- /dev/null +++ b/tmp/t/10128.md @@ -0,0 +1,130 @@ +system | 2024-09-16 15:57:33 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [ssh](/t/10153) + +## Summary +Securely transfer files within a model. + +## Usage +```juju scp [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--container` | | the container name of the target pod | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-host-key-checks` | false | Skip host key checking (INSECURE) | +| `--proxy` | false | Proxy through the API server | +| `--remote` | false | Target on the workload or operator pod (k8s-only) | + +## Examples + +Copy the config of a Charmed Kubernetes cluster to ~/.kube/config: + + juju scp kubernetes-master/0:config ~/.kube/config + +Copy file /var/log/syslog from machine 2 to the client's +current working directory: + + juju scp 2:/var/log/syslog . + +Recursively copy the /var/log/mongodb directory from the +mongodb/0 unit to the client's local remote-logs directory: + + juju scp -- -r mongodb/0:/var/log/mongodb/ remote-logs + +Copy foo.txt from the client's current working directory to a +the apache2/1 unit model "prod" (-m prod). Proxy the SSH connection +through the controller (--proxy) and enable compression (-- -C): + + juju scp -m prod --proxy -- -C foo.txt apache2/1: + +Copy multiple files from the client's current working directory to +the /home/ubuntu directory of machine 2: + + juju scp file1 file2 2: + +Copy multiple files from machine 3 as user "bob" to the client's +current working directory: + + juju scp bob@3:'file1 file2' . + +Copy file.dat from machine 0 to the machine hosting unit foo/0 +(-- -3): + + juju scp -- -3 0:file.dat foo/0: + +Copy a file ('chunks-inspect') from localhost to /loki directory +in a specific container in a juju unit running in Kubernetes: + + juju scp --container loki chunks-inspect loki-k8s/0:/loki + + +## Details + +Transfer files to, from and between Juju machine(s), unit(s) and the +Juju client. + +The basic syntax for the command requires the location of 1 or more source +files or directories and their intended destination: + + + +The <source> and <destination> arguments may either be a path to a local file +or a remote location. Here is a fuller syntax diagram: + + # + [[@]:] [@]:[] + +<user> is a user account that exists on the remote host. Juju defaults to the +"ubuntu" user when this is omitted. + +<target> may be either a unit or machine. Units are specified in form +'<application-name>/<n>', where '<n>' is either the unit number or the value +"leader" when targeting the leader unit for an application e.g. postgresql/0 or +haproxy/leader. Machines are specified in form '<n>', e.g. 0 or 12. The units +and machines in your model can be obtained from the output of "juju status". + +<path> is a file path. Local relative paths are resolved relative to the +current working directory. Remote relative paths are resolved relative to the +home directory of the remote user account. + + +Providing arguments directly to scp + +Send arguments directly to the underlying scp utility for full control by +adding two hyphens to the argument list and adding arguments to the right +(-- <arg> [...]). Common arguments to scp include + + - "-r" recursively copy files from a directory + - "-3" use the client as a proxy for transfers between machines + - "-C" enable SSH compression + + +Transfers between machines + +Machines do not have SSH connectivity to each other by default. Within a Juju +model, all communication is facilitated by the Juju controller. To transfer +files between machines, you can use the -3 option to scp, e.g. add "-- -3" +to the command-line arguments. + + +Security considerations + +To enable transfers to/from machines that do not have internet access, you can use +the Juju controller as a proxy with the --proxy option. + +The SSH host keys of the target are verified by default. To disable this, add + --no-host-key-checks option. Using this option is strongly discouraged. + + + +--- + +------------------------- + diff --git a/tmp/t/10129.md b/tmp/t/10129.md new file mode 100644 index 000000000..4e96c2cdc --- /dev/null +++ b/tmp/t/10129.md @@ -0,0 +1,53 @@ +system | 2024-09-16 15:52:19 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [cancel-task](/t/10053), [run](/t/10052), [operations](/t/10203), [show-operation](/t/10083) + +## Summary +Show results of a task by ID. + +## Usage +```juju show-task [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | plain | Specify output format (json|plain|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--utc` | false | Show times in UTC | +| `--wait` | -1s | Maximum wait time for a task to complete | +| `--watch` | false | Wait indefinitely for results | + +## Examples + + juju show-task 1 + juju show-task 1 --wait=2m + juju show-task 1 --watch + + +## Details + +Show the results returned by a task with the given ID. +To block until the result is known completed or failed, use +the --wait option with a duration, as in --wait 5s or --wait 1h. +Use --watch to wait indefinitely. + +The default behavior without --wait or --watch is to immediately check and return; +if the results are "pending" then only the available information will be +displayed. This is also the behavior when any negative time is given. + +Note: if Juju has been upgraded from 2.6 and there are old action UUIDs still in use, +and you want to specify just the UUID prefix to match on, you will need to include up +to at least the first "-" to disambiguate from a newer numeric id. + + +--- + +------------------------- + diff --git a/tmp/t/10130.md b/tmp/t/10130.md new file mode 100644 index 000000000..0ad2c20b6 --- /dev/null +++ b/tmp/t/10130.md @@ -0,0 +1,39 @@ +system | 2024-09-16 15:57:55 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [unregister](/t/10165), [revoke](/t/10077), [show-user](/t/10212), [users](/t/10175), [disable-user](/t/10198), [enable-user](/t/10241), [change-user-password](/t/10118) + +## Summary +Deletes a Juju user from a controller. + +## Usage +```juju remove-user [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `-y`, `--yes` | false | Confirm deletion of the user | + +## Examples + + juju remove-user bob + juju remove-user bob --yes + + +## Details +This removes a user permanently. + +By default, the controller is the current controller. + + + +--- + +------------------------- + diff --git a/tmp/t/10131.md b/tmp/t/10131.md new file mode 100644 index 000000000..c4bb46f54 --- /dev/null +++ b/tmp/t/10131.md @@ -0,0 +1,46 @@ +system | 2024-09-16 15:51:51 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-cloud](/t/10162), [clouds](/t/10182), [show-cloud](/t/10215), [update-cloud](/t/10081), [update-public-clouds](/t/10115) +**Alias:** regions + +## Summary +Lists regions for a given cloud. + +## Usage +```juju list-regions [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju regions aws + juju regions aws --controller mycontroller + juju regions aws --client + juju regions aws --client --controller mycontroller + + +## Details + +List regions for a given cloud. + +Use --controller option to list regions from the cloud from a controller. + +Use --client option to list regions known locally on this client. + + +--- + +------------------------- + diff --git a/tmp/t/10132.md b/tmp/t/10132.md new file mode 100644 index 000000000..1ee56fe9f --- /dev/null +++ b/tmp/t/10132.md @@ -0,0 +1,682 @@ +system | 2024-09-16 15:52:40 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136), [autoload-credentials](/t/10230), [add-model](/t/10145), [controller-config](/t/10237), [model-config](/t/10096), [set-constraints](/t/10210), [show-cloud](/t/10215) + +## Summary +Initializes a cloud environment. + +## Usage +```juju bootstrap [options] [[/region] []]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--add-model` | | Name of an initial model to create on the new controller | +| `--agent-version` | | Version of agent binaries to use for Juju agents | +| `--auto-upgrade` | false | After bootstrap, upgrade to the latest patch release | +| `--bootstrap-base` | | Specify the base of the bootstrap machine | +| `--bootstrap-constraints` | | Specify bootstrap machine constraints | +| `--bootstrap-image` | | Specify the image of the bootstrap machine (requires --bootstrap-constraints specifying architecture) | +| `--bootstrap-series` | | Specify the series of the bootstrap machine (deprecated use bootstrap-base) | +| `--build-agent` | false | Build local version of agent binary before bootstrapping | +| `--clouds` | false | Print the available clouds which can be used to bootstrap a Juju environment | +| `--config` | | Specify a controller configuration file, or one or more configuration options. Model config keys only affect the controller model. (--config config.yaml [--config key=value ...]) | +| `--constraints` | | Set model constraints | +| `--controller-charm-channel` | 3.5/stable | The Charmhub channel to download the controller charm from (if not using a local charm) | +| `--controller-charm-path` | | Path to a locally built controller charm | +| `--credential` | | Credentials to use when bootstrapping | +| `--db-snap` | | Path to a locally built .snap to use as the internal juju-db service. | +| `--db-snap-asserts` | | Path to a local .assert file. Requires --db-snap | +| `--force` | false | Allow the bypassing of checks such as supported series | +| `--keep-broken` | false | Do not destroy the provisioned controller instance if bootstrap fails | +| `--metadata-source` | | Local path to use as agent and/or image metadata source | +| `--model-default` | | Specify a configuration file, or one or more configuration options to be set for all models, unless otherwise specified (--model-default config.yaml [--model-default key=value ...]) | +| `--no-switch` | false | Do not switch to the newly created controller | +| `--regions` | | Print the available regions for the specified cloud | +| `--storage-pool` | | Specify options for an initial storage pool 'name' and 'type' are required, plus any additional attributes (--storage-pool pool-config.yaml [--storage-pool key=value ...]) | +| `--to` | | Placement directive indicating an instance to bootstrap | + +## Examples + + juju bootstrap + juju bootstrap --clouds + juju bootstrap --regions aws + juju bootstrap aws + juju bootstrap aws/us-east-1 + juju bootstrap google joe-us-east1 + juju bootstrap --config=~/config-rs.yaml google joe-syd + juju bootstrap --agent-version=2.2.4 aws joe-us-east-1 + juju bootstrap --config bootstrap-timeout=1200 azure joe-eastus + juju bootstrap aws --storage-pool name=secret --storage-pool type=ebs --storage-pool encrypted=true + juju bootstrap lxd --bootstrap-base=ubuntu@22.04 + + # For a bootstrap on k8s, setting the service type of the Juju controller service to LoadBalancer + juju bootstrap --config controller-service-type=loadbalancer + + # For a bootstrap on k8s, setting the service type of the Juju controller service to External + juju bootstrap --config controller-service-type=external --config controller-external-name=controller.juju.is + + +## Details +Used without arguments, bootstrap will step you through the process of +initializing a Juju cloud environment. Initialization consists of creating +a 'controller' model and provisioning a machine to act as controller. + +Controller names may only contain lowercase letters, digits and hyphens, and +may not start with a hyphen. +We recommend you call your controller ‘username-region’ e.g. ‘fred-us-east-1’. +See --clouds for a list of clouds and credentials. +See --regions <cloud> for a list of available regions for a given cloud. + +Credentials are set beforehand and are distinct from any other +configuration (see `juju add-credential`). +The 'controller' model typically does not run workloads. It should remain +pristine to run and manage Juju's own infrastructure for the corresponding +cloud. Additional models should be created with `juju add-model` for workload purposes. +Note that a 'default' model is also created and becomes the current model +of the environment once the command completes. It can be discarded if +other models are created. + +If '--bootstrap-constraints' is used, its values will also apply to any +future controllers provisioned for high availability (HA). + +If '--constraints' is used, its values will be set as the default +constraints for all future workload machines in the model, exactly as if +the constraints were set with `juju set-model-constraints`. + +It is possible to override constraints and the automatic machine selection +algorithm by assigning a "placement directive" via the '--to' option. This +dictates what machine to use for the controller. This would typically be +used with the MAAS provider ('--to <host>.maas'). + +You can change the default timeout and retry delays used during the +bootstrap by changing the following settings in your configuration +(all values represent number of seconds): + + # How long to wait for a connection to the controller + bootstrap-timeout: 1200 # default: 20 minutes + # How long to wait between connection attempts to a controller address. + bootstrap-retry-delay: 5 # default: 5 seconds + # How often to refresh controller addresses from the API server. + bootstrap-addresses-delay: 10 # default: 10 seconds + +It is possible to override the base e.g. ubuntu@22.04, Juju attempts +to bootstrap on to, by supplying a base argument to '--bootstrap-base'. + +An error is emitted if the determined base is not supported. Using the +'--force' option to override this check: + + juju bootstrap --bootstrap-base=ubuntu@22.04 --force + +The '--bootstrap-series' flag can be still used, but is deprecated in favour +of '--bootstrap-base'. + +Private clouds may need to specify their own custom image metadata and +tools/agent. Use '--metadata-source' whose value is a local directory. + +By default, the Juju version of the agent binary that is downloaded and +installed on all models for the new controller will be the same as that +of the Juju client used to perform the bootstrap. +However, a user can specify a different agent version via '--agent-version' +option to bootstrap command. Juju will use this version for models' agents +as long as the client's version is from the same Juju release base. +In other words, a 2.2.1 client can bootstrap any 2.2.x agents but cannot +bootstrap any 2.0.x or 2.1.x agents. +The agent version can be specified a simple numeric version, e.g. 2.2.4. + +For example, at the time when 2.3.0, 2.3.1 and 2.3.2 are released and your +agent stream is 'released' (default), then a 2.3.1 client can bootstrap: + * 2.3.0 controller by running '... bootstrap --agent-version=2.3.0 ...'; + * 2.3.1 controller by running '... bootstrap ...'; + * 2.3.2 controller by running 'bootstrap --auto-upgrade'. +However, if this client has a copy of codebase, then a local copy of Juju +will be built and bootstrapped - 2.3.1.1. + +Bootstrapping to a k8s cluster requires that the service set up to handle +requests to the controller be accessible outside the cluster. Typically this +means a service type of LoadBalancer is needed, and Juju does create such a +service if it knows it is supported by the cluster. This is performed by +interrogating the cluster for a well known managed deployment such as microk8s, +GKE or EKS. + +When bootstrapping to a k8s cluster Juju does not recognise, there's no +guarantee a load balancer is available, so Juju defaults to a controller +service type of ClusterIP. This may not be suitable, so there are three bootstrap +options available to tell Juju how to set up the controller service. Part of +the solution may require a load balancer for the cluster to be set up manually +first, or perhaps an external k8s service via a FQDN will be used +(this is a cluster specific implementation decision which Juju needs to be +informed about so it can set things up correctly). The three relevant bootstrap +options are (see list of bootstrap config items below for a full explanation): + +- controller-service-type +- controller-external-name +- controller-external-ips + +Juju advertises those addresses to other controllers, so they must be resolveable from +other controllers for cross-model (cross-controller, actually) relations to work. + +If a storage pool is specified using --storage-pool, this will be created +in the controller model. + + +Available keys for use with --config are: + +Bootstrap configuration keys: + + admin-secret: + type: string + description: Sets the Juju administrator password + bootstrap-addresses-delay: + type: int + description: Controls the amount of time in seconds in between refreshing the bootstrap + machine addresses + bootstrap-retry-delay: + type: int + description: Controls the amount of time in seconds between attempts to connect + to a bootstrap machine address + bootstrap-timeout: + type: int + description: Controls how long Juju will wait for a bootstrap to complete before + considering it failed in seconds + ca-cert: + type: string + description: Sets the bootstrapped controllers CA cert to use and issue certificates + from, used in conjunction with ca-private-key + ca-private-key: + type: string + description: Sets the bootstrapped controllers CA cert private key to sign certificates + with, used in conjunction with ca-cert + controller-external-ips: + type: list + description: Specifies a comma separated list of external IPs for a k8s controller + of type external + controller-external-name: + type: string + description: Sets the external name for a k8s controller of type external + controller-service-type: + type: string + description: |- + Controls the kubernetes service type for Juju controllers, see + https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + valid values are one of cluster, loadbalancer, external + + +Controller configuration keys: + + agent-logfile-max-backups: + type: int + description: The number of old agent log files to keep (compressed) + agent-logfile-max-size: + type: string + description: The maximum size of the agent log file + agent-ratelimit-max: + type: int + description: The maximum size of the token bucket used to ratelimit agent connections + agent-ratelimit-rate: + type: string + description: The time taken to add a new token to the ratelimit bucket + allow-model-access: + type: bool + description: "Determines if the controller allows users to \nconnect to models they + have been authorized for even when \nthey don't have any access rights to the + controller itself" + api-port: + type: int + description: The port used for api connections + api-port-open-delay: + type: string + description: "The duration that the controller will wait \nbetween when the controller + has been deemed to be ready to open \nthe api-port and when the api-port is actually + opened \n(only used when a controller-api-port value is set)." + application-resource-download-limit: + type: int + description: The maximum number of concurrent resources downloads per application + audit-log-capture-args: + type: bool + description: Determines if the audit log contains the arguments passed to API methods + audit-log-exclude-methods: + type: list + description: The list of Facade.Method names that aren't interesting for audit logging + purposes. + audit-log-max-backups: + type: int + description: The number of old audit log files to keep (compressed) + audit-log-max-size: + type: string + description: The maximum size for the current controller audit log file + auditing-enabled: + type: bool + description: Determines if the controller records auditing information + autocert-dns-name: + type: string + description: The DNS name of the controller + autocert-url: + type: string + description: The URL used to obtain official TLS certificates when a client connects + to the API + caas-image-repo: + type: string + description: The docker repo to use for the jujud operator and mongo images + caas-operator-image-path: + type: string + description: |- + (deprecated) The url of the docker image used for the application operator. + Use "caas-image-repo" instead. + controller-api-port: + type: int + description: |- + An optional port that may be set for controllers + that have a very heavy load. If this port is set, this port is used by + the controllers to talk to each other - used for the local API connection + as well as the pubsub forwarders, and the raft workers. If this value is + set, the api-port isn't opened until the controllers have started properly. + controller-name: + type: string + description: The canonical name of the controller + controller-resource-download-limit: + type: int + description: The maximum number of concurrent resources downloads across all the + applications on the controller + features: + type: list + description: A list of runtime changeable features to be updated + identity-public-key: + type: string + description: The public key of the identity manager + identity-url: + type: string + description: The url of the identity manager + juju-db-snap-channel: + type: string + description: Sets channel for installing mongo snaps when bootstrapping on focal + or later + juju-ha-space: + type: string + description: The network space within which the MongoDB replica-set should communicate + juju-mgmt-space: + type: string + description: The network space that agents should use to communicate with controllers + jujud-controller-snap-source: + type: string + description: The source for the jujud-controller snap. + login-token-refresh-url: + type: string + description: The url of the jwt well known endpoint + max-agent-state-size: + type: int + description: The maximum size (in bytes) of internal state data that agents can + store to the controller + max-charm-state-size: + type: int + description: The maximum size (in bytes) of charm-specific state that units can + store to the controller + max-debug-log-duration: + type: string + description: The maximum duration that a debug-log session is allowed to run + max-prune-txn-batch-size: + type: int + description: (deprecated) The maximum number of transactions evaluated in one go + when pruning + max-prune-txn-passes: + type: int + description: (deprecated) The maximum number of batches processed when pruning + max-txn-log-size: + type: string + description: The maximum size the of capped txn log collection + metering-url: + type: string + description: The url for metrics + migration-agent-wait-time: + type: string + description: The maximum during model migrations that the migration worker will + wait for agents to report on phases of the migration + model-logfile-max-backups: + type: int + description: The number of old model log files to keep (compressed) + model-logfile-max-size: + type: string + description: The maximum size of the log file written out by the controller on behalf + of workers running for a model + model-logs-size: + type: string + description: The size of the capped collections used to hold the logs for the models + mongo-memory-profile: + type: string + description: Sets mongo memory profile + prune-txn-query-count: + type: int + description: The number of transactions to read in a single query + prune-txn-sleep-time: + type: string + description: The amount of time to sleep between processing each batch query + public-dns-address: + type: string + description: Public DNS address (with port) of the controller. + query-tracing-enabled: + type: bool + description: Enable query tracing for the dqlite driver + query-tracing-threshold: + type: string + description: "The minimum duration of a query for it to be traced. The lower the + \nthreshold, the more queries will be output. A value of 0 means all queries \nwill + be output if tracing is enabled." + set-numa-control-policy: + type: bool + description: Determines if the NUMA control policy is set + state-port: + type: int + description: The port used for mongo connections + +Model configuration keys (affecting the controller model): + + agent-metadata-url: + type: string + description: URL of private stream + agent-stream: + type: string + description: Version of Juju to use for deploy/upgrades. + apt-ftp-proxy: + type: string + description: The APT FTP proxy for the model + apt-http-proxy: + type: string + description: The APT HTTP proxy for the model + apt-https-proxy: + type: string + description: The APT HTTPS proxy for the model + apt-mirror: + type: string + description: The APT mirror for the model + apt-no-proxy: + type: string + description: List of domain addresses not to be proxied for APT (comma-separated) + authorized-keys: + type: string + description: Any authorized SSH public keys for the model, as found in a ~/.ssh/authorized_keys + file + automatically-retry-hooks: + type: bool + description: Determines whether the uniter should automatically retry failed hooks + backup-dir: + type: string + description: Directory used to store the backup working directory + charmhub-url: + type: string + description: The url for CharmHub API calls + cloudinit-userdata: + type: string + description: Cloud-init user-data (in yaml format) to be added to userdata for new + machines created in this model + container-image-metadata-defaults-disabled: + type: bool + description: Whether default simplestreams sources are used for image metadata with + containers. + container-image-metadata-url: + type: string + description: The URL at which the metadata used to locate container OS image ids + is located + container-image-stream: + type: string + description: The simplestreams stream used to identify which image ids to search + when starting a container. + container-inherit-properties: + type: string + description: List of properties to be copied from the host machine to new containers + created in this model (comma-separated) + container-networking-method: + type: string + description: Method of container networking setup - one of fan, provider, local + default-base: + type: string + description: The default base image to use for deploying charms, will act like --base + when deploying charms + default-space: + type: string + description: The default network space used for application endpoints in this model + development: + type: bool + description: Whether the model is in development mode + disable-network-management: + type: bool + description: Whether the provider should control networks (on MAAS models, set to + true for MAAS to control networks + disable-telemetry: + type: bool + description: Disable telemetry reporting of model information + egress-subnets: + type: string + description: Source address(es) for traffic originating from this model + enable-os-refresh-update: + type: bool + description: Whether newly provisioned instances should run their respective OS's + update capability. + enable-os-upgrade: + type: bool + description: Whether newly provisioned instances should run their respective OS's + upgrade capability. + extra-info: + type: string + description: Arbitrary user specified string data that is stored against the model. + fan-config: + type: string + description: Configuration for fan networking for this model + firewall-mode: + type: string + description: |- + The mode to use for network firewalling. + + 'instance' requests the use of an individual firewall per instance. + + 'global' uses a single firewall for all instances (access + for a network port is enabled to one instance if any instance requires + that port). + + 'none' requests that no firewalling should be performed + inside the model. It's useful for clouds without support for either + global or per instance security groups. + ftp-proxy: + type: string + description: The FTP proxy value to configure on instances, in the FTP_PROXY environment + variable + http-proxy: + type: string + description: The HTTP proxy value to configure on instances, in the HTTP_PROXY environment + variable + https-proxy: + type: string + description: The HTTPS proxy value to configure on instances, in the HTTPS_PROXY + environment variable + ignore-machine-addresses: + type: bool + description: Whether the machine worker should discover machine addresses on startup + image-metadata-defaults-disabled: + type: bool + description: Whether default simplestreams sources are used for image metadata. + image-metadata-url: + type: string + description: The URL at which the metadata used to locate OS image ids is located + image-stream: + type: string + description: The simplestreams stream used to identify which image ids to search + when starting an instance. + juju-ftp-proxy: + type: string + description: The FTP proxy value to pass to charms in the JUJU_CHARM_FTP_PROXY environment + variable + juju-http-proxy: + type: string + description: The HTTP proxy value to pass to charms in the JUJU_CHARM_HTTP_PROXY + environment variable + juju-https-proxy: + type: string + description: The HTTPS proxy value to pass to charms in the JUJU_CHARM_HTTPS_PROXY + environment variable + juju-no-proxy: + type: string + description: List of domain addresses not to be proxied (comma-separated), may contain + CIDRs. Passed to charms in the JUJU_CHARM_NO_PROXY environment variable + logforward-enabled: + type: bool + description: Whether syslog forwarding is enabled. + logging-config: + type: string + description: The configuration string to use when configuring Juju agent logging + (see http://godoc.org/github.com/juju/loggo#ParseConfigurationString for details) + logging-output: + type: string + description: 'The logging output destination: database and/or syslog. (default "")' + lxd-snap-channel: + type: string + description: The channel to use when installing LXD from a snap (cosmic and later) + max-action-results-age: + type: string + description: The maximum age for action entries before they are pruned, in human-readable + time format + max-action-results-size: + type: string + description: The maximum size for the action collection, in human-readable memory + format + max-status-history-age: + type: string + description: The maximum age for status history entries before they are pruned, + in human-readable time format + max-status-history-size: + type: string + description: The maximum size for the status history collection, in human-readable + memory format + mode: + type: string + description: |- + Mode is a comma-separated list which sets the + mode the model should run in. So far only one is implemented + - If 'requires-prompts' is present, clients will ask for confirmation before removing + potentially valuable resources. + (default "") + net-bond-reconfigure-delay: + type: int + description: The amount of time in seconds to sleep between ifdown and ifup when + bridging + no-proxy: + type: string + description: List of domain addresses not to be proxied (comma-separated) + num-container-provision-workers: + type: int + description: The number of container provisioning workers to use per machine + num-provision-workers: + type: int + description: The number of provisioning workers to use per model + provisioner-harvest-mode: + type: string + description: What to do with unknown machines (default destroyed) + proxy-ssh: + type: bool + description: Whether SSH commands should be proxied through the API server + resource-tags: + type: attrs + description: resource tags + saas-ingress-allow: + type: string + description: |- + Application-offer ingress allowlist is a comma-separated list of + CIDRs specifying what ingress can be applied to offers in this model. + secret-backend: + type: string + description: The name of the secret store backend. (default "auto") + snap-http-proxy: + type: string + description: The HTTP proxy value for installing snaps + snap-https-proxy: + type: string + description: The HTTPS proxy value for installing snaps + snap-store-assertions: + type: string + description: The assertions for the defined snap store proxy + snap-store-proxy: + type: string + description: The snap store proxy for installing snaps + snap-store-proxy-url: + type: string + description: The URL for the defined snap store proxy + ssh-allow: + type: string + description: |- + SSH allowlist is a comma-separated list of CIDRs from + which machines in this model will accept connections to the SSH service. + Currently only the aws & openstack providers support ssh-allow + ssl-hostname-verification: + type: bool + description: Whether SSL hostname verification is enabled (default true) + storage-default-block-source: + type: string + description: The default block storage source for the model + storage-default-filesystem-source: + type: string + description: The default filesystem storage source for the model + syslog-ca-cert: + type: string + description: The certificate of the CA that signed the syslog server certificate, + in PEM format. + syslog-client-cert: + type: string + description: The syslog client certificate in PEM format. + syslog-client-key: + type: string + description: The syslog client key in PEM format. + syslog-host: + type: string + description: The hostname:port of the syslog server. + test-mode: + type: bool + description: |- + Whether the model is intended for testing. + If true, accessing the charm store does not affect statistical + data of the store. (default false) + transmit-vendor-metrics: + type: bool + description: Determines whether metrics declared by charms deployed into this model + are sent for anonymized aggregate analytics + update-status-hook-interval: + type: string + description: How often to run the charm update-status hook, in human-readable time + format (default 5m, range 1-60m) + + + + +--- + +------------------------- + +sed-i | 2024-05-27 16:03:39 UTC | #2 + +@tmihoc, I propose a modification to: + +[quote="system, post:1, topic:10132"] +When bootstrapping to a k8s cluster Juju does not recognise, there’s no guarantee a load balancer is available, so Juju defaults to a controller service type of ClusterIP. This may not be suitable, so there’s 3 bootstrap options available to tell Juju how to set up the controller service. Part of the solution may require a load balancer for the cluster to be set up manually first, or perhaps an external k8s service via a FQDN will be used (this is a cluster specific implementation decision which Juju needs to be informed about so it can set things up correctly). + +* controller-service-type +* controller-external-name +* controller-external-ips +[/quote] + +to read as follows: + +[quote] +When bootstrapping to a k8s cluster Juju does not recognise, there’s no guarantee a load balancer is available, so Juju defaults to a controller service type of ClusterIP. This may not be suitable, so there are three bootstrap options available to tell Juju how to set up the controller service. Part of the solution may require a load balancer for the cluster to be set up manually first, or perhaps an external k8s service via a FQDN will be used (this is a cluster specific implementation decision which Juju needs to be informed about so it can set things up correctly). + +* controller-service-type +* controller-external-name +* controller-external-ips + +Juju advertises those addresses to other controllers, so they must be resolveable from other controllers for cross-model (cross-controller, actually) relations to work. +[/quote] + +------------------------- + diff --git a/tmp/t/10133.md b/tmp/t/10133.md new file mode 100644 index 000000000..47451452c --- /dev/null +++ b/tmp/t/10133.md @@ -0,0 +1,44 @@ +system | 2024-09-16 15:53:04 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-ssh-key](/t/10238), [remove-ssh-key](/t/10119) +**Alias:** ssh-keys + +## Summary +Lists the currently known SSH keys for the current (or specified) model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--full` | false | Show full key instead of just the fingerprint | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju ssh-keys + +To examine the full key, use the '--full' option: + + juju ssh-keys -m jujutest --full + + +## Details +Juju maintains a per-model cache of SSH keys which it copies to each newly +created unit. +This command will display a list of all the keys currently used by Juju in +the current model (or the model specified, if the '-m' option is used). +By default a minimal list is returned, showing only the fingerprint of +each key and its text identifier. By using the '--full' option, the entire +key may be displayed. + + + +--- + +------------------------- + diff --git a/tmp/t/10134.md b/tmp/t/10134.md new file mode 100644 index 000000000..3bf0d24ec --- /dev/null +++ b/tmp/t/10134.md @@ -0,0 +1,84 @@ +system | 2024-09-16 15:56:02 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [info](/t/10103), [find](/t/10187) + +## Summary +Locates and then downloads a CharmHub charm. + +## Usage +```juju download [options] [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--arch` | all | specify an arch <all|amd64|arm64|ppc64el|riscv64|s390x> | +| `--base` | | specify a base | +| `--channel` | | specify a channel to use instead of the default release | +| `--charmhub-url` | https://api.charmhub.io | specify the Charmhub URL for querying the store | +| `--filepath` | | filepath location of the charm to download to | +| `--no-progress` | false | disable the progress bar | +| `--revision` | -1 | specify a revision of the charm to download | +| `--series` | all | specify a series. DEPRECATED use --base | + +## Examples + + juju download postgresql + juju download postgresql --no-progress - > postgresql.charm + + +## Details + +Download a charm to the current directory from the CharmHub store +by a specified name. Downloading for a specific base can be done via +--base. --base can be specified using the OS name and the version of +the OS, separated by @. For example, --base ubuntu@22.04. + +By default, the latest revision in the default channel will be +downloaded. To download the latest revision from another channel, +use --channel. To download a specific revision, use --revision, +which cannot be used together with --arch, --base, --channel or +--series. + +Adding a hyphen as the second argument allows the download to be piped +to stdout. + + +--- + +------------------------- + +charlie4284 | 2024-06-11 09:27:44 UTC | #2 + +@tmihoc Hi Teodora! +I've noticed that the `--revision` parameter is not valid for juju 2.9 ([run log](https://github.com/canonical/github-runner-operator/actions/runs/9462135485/job/26064664566?pr=295#step:22:107)) `AssertionError: failed to download charm, ERROR option provided but not defined: --revision`. + +The 2.9 supported flags are: +```diff + --arch (= "all") +- --base + --channel (= "") + --charmhub-url (= "https://api.charmhub.io") + --filepath (= "") + --no-progress (= false) +- --revision + --series (= "all") +``` +May I add juju supported version column to the table? + +Thank you! + +------------------------- + +tmihoc | 2024-06-11 09:55:11 UTC | #3 + +@charlie4284 Unfortunately, this doc is currently autogenerated from the source and reflects the latest stable release of Juju only. However, we are aware of the need to document at least all the LTS releases and working on a solution. + +PS The fact that our documentation setup doesn't support versioning means we need to get creative. In the past we handled such issues through manual in-line notes. The benefits of autogeneration are too great to go back to manual, but maybe we can handle multiple LTSes via tabbed content. + +------------------------- + diff --git a/tmp/t/10135.md b/tmp/t/10135.md new file mode 100644 index 000000000..df0fdf820 --- /dev/null +++ b/tmp/t/10135.md @@ -0,0 +1,37 @@ +system | 2024-09-16 15:56:10 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-space](/t/10117), [spaces](/t/10236), [reload-spaces](/t/10063), [remove-space](/t/10084), [show-space](/t/10095) + +## Summary +Rename a network space. + +## Usage +```juju rename-space [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--rename` | | the new name for the network space | + +## Examples + +Rename a space from db to fe: + + juju rename-space db fe + + +## Details +Renames an existing space from "old-name" to "new-name". Does not change the +associated subnets and "new-name" must not match another existing space. + +--- + +------------------------- + diff --git a/tmp/t/10136.md b/tmp/t/10136.md new file mode 100644 index 000000000..ffb42bcb6 --- /dev/null +++ b/tmp/t/10136.md @@ -0,0 +1,104 @@ +system | 2024-09-16 15:55:22 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [credentials](/t/10054), [remove-credential](/t/10201), [update-credential](/t/10065), [default-credential](/t/10055), [default-region](/t/10082), [autoload-credentials](/t/10230) + +## Summary +Adds a credential for a cloud to a local client and uploads it to a controller. + +## Usage +```juju add-credential [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `-f`, `--file` | | The YAML file containing credentials to add | +| `--region` | | Cloud region that credential is valid for | + +## Examples + + juju add-credential google + juju add-credential google --client + juju add-credential google -c mycontroller + juju add-credential aws -f ~/credentials.yaml -c mycontroller + juju add-credential aws -f ~/credentials.yaml + juju add-credential aws -f ~/credentials.yaml --client + + +## Details + +The juju add-credential command operates in two modes. + +When called with only the <cloud name> argument, `juju add-credential` will +take you through an interactive prompt to add a credential specific to +the cloud provider. + +Providing the `-f ` option switches to the +non-interactive mode. <credentials.yaml> must be a path to a correctly +formatted YAML-formatted file. + +Sample yaml file shows five credentials being stored against four clouds: + + credentials: + aws: + : + auth-type: access-key + access-key: + secret-key: + azure: + : + auth-type: service-principal-secret + application-id: + application-password: + subscription-id: + lxd: + : + auth-type: interactive + trust-password: + : + auth-type: interactive + trust-password: + google: + : + auth-type: oauth2 + project-id: + private-key: + client-email: + client-id: + +The <credential-name> parameter of each credential is arbitrary, but must +be unique within each <cloud-name>. This allows each cloud to store +multiple credentials. + +The format for a credential is cloud-specific. Thus, it's best to use +'add-credential' command in an interactive mode. This will result in +adding this new credential locally and / or uploading it to a controller +in a correct format for the desired cloud. + + +Notes: +If you are setting up Juju for the first time, consider running +`juju autoload-credentials`. This may allow you to skip adding +credentials manually. + +This command does not set default regions nor default credentials for the +cloud. The commands `juju default-region` and `juju default-credential` +provide that functionality. + +Use --controller option to upload a credential to a controller. + +Use --client option to add a credential to the current client. + + + +--- + +------------------------- + diff --git a/tmp/t/10137.md b/tmp/t/10137.md new file mode 100644 index 000000000..a47bd52ae --- /dev/null +++ b/tmp/t/10137.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:53:36 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [models](/t/10090), [constraints](/t/10060), [set-constraints](/t/10210), [set-model-constraints](/t/10208) + +## Summary +Displays machine constraints for a model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | constraints | Specify output format (constraints|json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju model-constraints + juju model-constraints -m mymodel + + +## Details +Shows constraints that have been set on the model with +`juju set-model-constraints.` +By default, the model is the current model. +Model constraints are combined with constraints set on an application +with `juju set-constraints` for commands (such as 'deploy') that provision +machines/containers for applications. Where model and application constraints overlap, the +application constraints take precedence. +Constraints for a specific application can be viewed with `juju constraints`. + + +--- + +------------------------- + diff --git a/tmp/t/10138.md b/tmp/t/10138.md new file mode 100644 index 000000000..e22e0dfeb --- /dev/null +++ b/tmp/t/10138.md @@ -0,0 +1,50 @@ +system | 2024-09-16 15:51:19 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [show-storage](/t/10184), [add-storage](/t/10159), [remove-storage](/t/10066) +**Alias:** storage + +## Summary +Lists storage details. + +## Usage +```juju list-storage [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--filesystem` | false | List filesystem storage(deprecated) | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--volume` | false | List volume storage(deprecated) | + +## Examples + +List all storage: + + juju storage + +List only filesystem storage: + + juju storage --filesystem + +List only volume storage: + + juju storage --volume + + +## Details + +List information about storage. + + +--- + +------------------------- + diff --git a/tmp/t/10139.md b/tmp/t/10139.md new file mode 100644 index 000000000..6a17f49c6 --- /dev/null +++ b/tmp/t/10139.md @@ -0,0 +1,151 @@ +system | 2024-09-16 15:53:40 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [deploy](/t/10074), [status](/t/10173), [model-config](/t/10096), [controller-config](/t/10237) + +## Summary +Gets, sets, or resets configuration for a deployed application. + +## Usage +```juju config [options] [--branch ] [--reset ] [][=] ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--color` | false | Use ANSI color codes in output | +| `--file` | | path to yaml-formatted configuration file | +| `--format` | yaml | Specify output format (json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-color` | false | Disable ANSI color codes in tabular output | +| `-o`, `--output` | | Specify an output file | +| `--reset` | | Reset the provided comma delimited keys | + +## Examples + +To view all configuration values for an application, run + + juju config mysql --format json + +To set a configuration value for an application, run + + juju config mysql foo=bar + +To set some keys and reset others: + + juju config mysql key1=val1 key2=val2 --reset key3,key4 + +To set a configuration value for an application from a file: + + juju config mysql --file=path/to/cfg.yaml + + +## Details + +To view all configuration values for an application: + + juju config + +By default, the config will be printed in yaml format. You can instead print it +in json format using the --format flag: + + juju config --format json + +To view the value of a single config key, run + + juju config key + +To set config values, run + + juju config key1=val1 key2=val2 ... + +This sets "key1" to "val1", etc. Using the @ directive, you can set a config +key's value to the contents of a file: + + juju config key=@/tmp/configvalue + +You can also reset config keys to their default values: + + juju config --reset key1 + juju config --reset key1,key2,key3 + +You may simultaneously set some keys and reset others: + + juju config key1=val1 key2=val2 --reset key3,key4 + +Config values can be imported from a yaml file using the --file flag: + + juju config --file=path/to/cfg.yaml + +The yaml file should be in the following format: + + apache2: # application name + servername: "example.com" # key1: val1 + lb_balancer_timeout: 60 # key2: val2 + ... + +This allows you to e.g. save an app's config to a file: + + juju config app1 > cfg.yaml + +and then import the config later. You can also read from stdin using "-", +which allows you to pipe config values from one app to another: + + juju config app1 | juju config app2 --file - + +You can simultaneously read config from a yaml file and set/reset config keys +as above. The command-line args will override any values specified in the file. + +By default, any configuration changes will be applied to the currently active +branch. A specific branch can be targeted using the --branch option. Changes +can be immediately be applied to the model by specifying --branch=master. For +example: + + juju config apache2 --branch=master servername=example.com + juju config apache2 --branch test-branch servername=staging.example.com + +Rather than specifying each setting name/value inline, the --file flag option +may be used to provide a list of settings to be updated as a yaml file. The +yaml file contents must include a single top-level key with the application's +name followed by a dictionary of key/value pairs that correspond to the names +and values of the settings to be set. For instance, to configure apache2, +the following yaml file can be used: + + apache2: + servername: "example.com" + lb_balancer_timeout: 60 + +If the above yaml document is stored in a file called config.yaml, the +following command can be used to apply the config changes: + + juju config apache2 --file config.yaml + +Finally, the --reset flag can be used to revert one or more configuration +settings back to their default value as defined in the charm metadata: + + juju config apache2 --reset servername + juju config apache2 --reset servername,lb_balancer_timeout + + +--- + +------------------------- + +ghibourg | 2024-07-09 19:28:10 UTC | #2 + +Hello, the expected format of the configuration YAML file is not rendering properly, making it hard to read. + +Thanks! + +------------------------- + +tmihoc | 2024-07-10 10:38:32 UTC | #3 + +Fixed it here (temporary fix) and [in the source](https://github.com/juju/juju/pull/17701) (permanent fix). Thanks for bringing it up! + +------------------------- + diff --git a/tmp/t/10140.md b/tmp/t/10140.md new file mode 100644 index 000000000..2f6d61a05 --- /dev/null +++ b/tmp/t/10140.md @@ -0,0 +1,178 @@ +system | 2024-09-16 15:55:34 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [consume](/t/10213), [find-offers](/t/10097), [set-firewall-rule](/t/10151), [suspend-relation](/t/10179) +**Alias:** integrate + +## Summary +Integrate two applications. + +## Usage +```juju relate [options] [:] [:]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--via` | | for cross model integrations, specify the egress subnets for outbound traffic | + +## Examples + +Integrate wordpress and percona-cluster, asking Juju to resolve + the endpoint names. Expands to "wordpress:db" (with the requires role) and + "percona-cluster:server" (with the provides role). + + juju integrate wordpress percona-cluster + +Integrate wordpress and postgresql, using an explicit +endpoint name. + + juju integrate wordpress postgresql:db + +Integrate an etcd instance within the current model to centrally managed +EasyRSA Certificate Authority hosted in the "secrets" model. + + juju integrate etcd secrets.easyrsa + +Integrate a wordpress application with a mysql application hosted within the +"prod" model, using the "automation" user. Facilitate firewall management +by specifying the routes used for integration data. + + juju integrate wordpress automation/prod.mysql --via 192.168.0.0/16,10.0.0.0/8 + + +## Details + +Integrate two applications. Integrated applications communicate over a common +interface provided by the Juju controller that enables units to share information. +This topology allows units to share data, without needing direct connectivity +between units is restricted by firewall rules. Charms define the logic for +transferring and interpreting integration data. + +The most common use of 'juju integrate' specifies two applications that co-exist +within the same model: + + juju integrate + +Occasionally, more explicit syntax is required. Juju is able to integrate +units that span models, controllers and clouds, as described below. + + +Integrating applications in the same model + +The most common case specifies two applications, adding specific endpoint +name(s) when required. + + juju integrate [:] [:] + +The role and endpoint names are described by charms' metadata.yaml file. + +The order does not matter, however each side must implement complementary roles. +One side implements the "provides" role and the other implements the "requires" +role. Juju can always infer the role that each side is implementing, so specifying +them is not necessary as command-line arguments. + +<application> is the name of an application that has already been added to the +model. The Applications section of 'juju status' provides a list of current +applications. + +<endpoint> is the name of an endpoint defined within the metadata.yaml +of the charm for <application>. Valid endpoint names are defined within the +"provides:" and "requires:" section of that file. Juju will request that you +specify the <endpoint> if there is more than one possible integration between +the two applications. + + +Subordinate applications + +Subordinate applications are designed to be deployed alongside a primary +application. They must define a container scoped endpoint. When that endpoint +is related to a primary application, wherever a unit of the primary application +is deployed, a corresponding unit of the subordinate application will also be +deployed. Integration with the primary application has the same syntax as +integration any two applications within the same model. + + +Peer integrations + +Integrations within an application between units (known as "peer integrations") do +not need to be added manually. They are created when the 'juju add-unit' and +'juju scale-application' commands are executed. + + +Cross-model integrations + +Applications can be integrated, even when they are deployed to different models. +Those models may be managed by different controllers and/or be hosted on +different clouds. This functionality is known as "cross-model integration" or CMI. + + +Cross-model integrations: different model on the same controller + +Integrating applications in models managed by the same controller +is very similar to adding an integration between applications in the same model: + + juju integrate [:] .[:] + +<model> is the name of the model outside of the current context. This enables the +Juju controller to bridge two models. You can list the currently available +models with 'juju models'. + +To integrate models outside of the current context, add the '-m <model>' option: + + juju integrate -m [:] \ + .[:] + + +Cross-model integrations: different controllers + +Applications can be integrated with a remote application via an "offer URL" that has +been generated by the 'juju offer' command. The syntax for adding a cross-model +integration is similar to adding a local integration: + + juju integrate [:] + +<offer-endpoint> describes the remote application, from the point of view of the +local one. An <offer-endpoint> takes one of two forms: + + + [:] + +<offer-alias> is an alias that has been defined by the 'juju consume' command. +Use the 'juju find-offers' command to list aliases. + +<offer-url> is a path to enable Juju to resolve communication between +controllers and the models they control. + + [[:]/]. + +<controller> is the name of a controller. The 'juju controllers' command +provides a list of controllers. + +<user> is the user account of the model's owner. + + +Cross-model integration: network management + +When the consuming side (the local application) is behind a firewall and/or +NAT is used for outbound traffic, it is possible to use the '--via' option to +inform the offering side (the remote application) the source of traffic to +enable network ports to be opened. + + ... --via [,[, ...]] + + +Further reading: + + https://juju.is/docs/juju/integration + https://juju.is/docs/juju/cross-model-integration + + +--- + +------------------------- + diff --git a/tmp/t/10141.md b/tmp/t/10141.md new file mode 100644 index 000000000..572ae77d0 --- /dev/null +++ b/tmp/t/10141.md @@ -0,0 +1,106 @@ +system | 2024-09-16 15:52:56 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [remove-unit](/t/10125) + +## Summary +Adds one or more units to a deployed application. + +## Usage +```juju add-unit [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--attach-storage` | | Existing storage to attach to the deployed unit (not available on k8s models) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-n`, `--num-units` | 1 | Number of units to add | +| `--to` | | The machine and/or container to deploy the unit in (bypasses constraints) | + +## Examples + +Add five units of mysql on five new machines: + + juju add-unit mysql -n 5 + +Add a unit of mysql to machine 23 (which already exists): + + juju add-unit mysql --to 23 + +Add two units of mysql to existing machines 3 and 4: + + juju add-unit mysql -n 2 --to 3,4 + +Add three units of mysql, one to machine 3 and the others to new +machines: + + juju add-unit mysql -n 3 --to 3 + +Add a unit of mysql into a new LXD container on machine 7: + + juju add-unit mysql --to lxd:7 + +Add two units of mysql into two new LXD containers on machine 7: + + juju add-unit mysql -n 2 --to lxd:7,lxd:7 + +Add three units of mysql, one to a new LXD container on machine 7, +and the others to new machines: + + juju add-unit mysql -n 3 --to lxd:7 + +Add a unit of mysql to LXD container number 3 on machine 24: + + juju add-unit mysql --to 24/lxd/3 + +Add a unit of mysql to LXD container on a new machine: + + juju add-unit mysql --to lxd + + +## Details +The add-unit is used to scale out an application for improved performance or +availability. + +The usage of this command differs depending on whether it is being used on a +k8s or cloud model. + +Many charms will seamlessly support horizontal scaling while others may need +an additional application support (e.g. a separate load balancer). See the +documentation for specific charms to check how scale-out is supported. + +For k8s models the only valid argument is -n, --num-units. +Anything additional will result in an error. + +Example: + +Add five units of mysql: + juju add-unit mysql --num-units 5 + + +For cloud models, by default, units are deployed to newly provisioned machines +in accordance with any application or model constraints. + +This command also supports the placement directive ("--to") for targeting +specific machines or containers, which will bypass application and model +constraints. --to accepts a comma-separated list of placement specifications +(see examples below). If the length of this list is less than the number of +units being added, the remaining units will be added in the default way (i.e. +to new machines). + + + +--- + +------------------------- + +jadonn | 2024-03-19 18:45:21 UTC | #2 + +I think this post needs to have the table of command line flags updated. It has `--m` and `--n` as flags, but they should really be `-m` and `-n. + +------------------------- + diff --git a/tmp/t/10142.md b/tmp/t/10142.md new file mode 100644 index 000000000..01d506a54 --- /dev/null +++ b/tmp/t/10142.md @@ -0,0 +1,58 @@ +system | 2024-09-16 15:55:41 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [deploy](/t/10074) + +## Summary +Compare a bundle with a model and report any differences. + +## Usage +```juju diff-bundle [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--annotations` | false | Include differences in annotations | +| `--arch` | | specify an arch <all|amd64|arm64|ppc64el|riscv64|s390x> | +| `--base` | | specify a base | +| `--channel` | | Channel to use when getting the bundle from Charmhub | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--map-machines` | | Indicates how existing machines correspond to bundle machines | +| `--overlay` | | Bundles to overlay on the primary bundle, applied in order | +| `--series` | | specify a series. DEPRECATED: use --base | + +## Examples + + juju diff-bundle localbundle.yaml + juju diff-bundle charmed-kubernetes + juju diff-bundle charmed-kubernetes --overlay local-config.yaml --overlay extra.yaml + juju diff-bundle charmed-kubernetes --base ubuntu@22.04 + juju diff-bundle -m othermodel hadoop-spark + juju diff-bundle localbundle.yaml --map-machines 3=4 + + +## Details + +Bundle can be a local bundle file or the name of a bundle in +Charmhub. The bundle can also be combined with overlays (in the +same way as the deploy command) before comparing with the model. + +The map-machines option works similarly as for the deploy command, but +existing is always assumed, so it doesn't need to be specified. + +Config values for comparison are always source from the "current" model +generation. + +Specifying a base will retrieve the bundle for the relevant store for +the give base. + + +--- + +------------------------- + diff --git a/tmp/t/10143.md b/tmp/t/10143.md new file mode 100644 index 000000000..a6db365ee --- /dev/null +++ b/tmp/t/10143.md @@ -0,0 +1,33 @@ +system | 2024-09-16 15:54:59 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [collect-metrics](/t/10085) + +## Summary +Retrieve metrics collected by specified entities. + +## Usage +```juju metrics [options] [tag1[...tagN]]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | retrieve metrics collected by all units in the model | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Details + +Display recently collected metrics. + + +--- + +------------------------- + diff --git a/tmp/t/10144.md b/tmp/t/10144.md new file mode 100644 index 000000000..ad0fed54a --- /dev/null +++ b/tmp/t/10144.md @@ -0,0 +1,36 @@ +system | 2024-09-16 15:55:29 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Marks unit errors resolved and re-executes failed hooks. + +## Usage +```juju resolved [options] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | Marks all units in error as resolved | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-retry` | false | Do not re-execute failed hooks on the unit | + +## Examples + + + juju resolved mysql/0 + + juju resolved mysql/0 mysql/1 + + juju resolved --all + + +--- + +------------------------- + diff --git a/tmp/t/10145.md b/tmp/t/10145.md new file mode 100644 index 000000000..4da7a00a3 --- /dev/null +++ b/tmp/t/10145.md @@ -0,0 +1,73 @@ +system | 2024-09-16 15:54:19 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [model-config](/t/10096), [model-defaults](/t/10057), [add-credential](/t/10136), [autoload-credentials](/t/10230) + +## Summary +Adds a workload model. + +## Usage +```juju add-model [options] [cloud|region|(cloud/region)]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--config` | | Path to YAML model configuration file or individual options (--config config.yaml [--config key=value ...]) | +| `--credential` | | Credential used to add the model | +| `--no-switch` | false | Do not switch to the newly created model | +| `--owner` | | The owner of the new model if not the current user | + +## Examples + + juju add-model mymodel + juju add-model mymodel us-east-1 + juju add-model mymodel aws/us-east-1 + juju add-model mymodel --config my-config.yaml --config image-stream=daily + juju add-model mymodel --credential credential_name --config authorized-keys="ssh-rsa ..." + + +## Details +Adding a model is typically done in order to run a specific workload. + +To add a model, you must specify a model name. Model names can be duplicated +across controllers but must be unique per user for any given controller. +In other words, Alice and Bob can each have their own model called "secret" but +Alice can have only one model called "secret" in a controller. +Model names may only contain lowercase letters, digits and hyphens, and +may not start with a hyphen. + +To add a model, Juju requires a credential: +* if you have a default (or just one) credential defined at client + (i.e. in credentials.yaml), then juju will use that; +* if you have no default (and multiple) credentials defined at the client, + then you must specify one using --credential; +* as the admin user you can omit the credential, + and the credential used to bootstrap will be used. + +To add a credential for add-model, use one of the "juju add-credential" or +"juju autoload-credentials" commands. These will add credentials +to the Juju client, which "juju add-model" will upload to the controller +as necessary. + +You may also supply model-specific configuration as well as a +cloud/region to which this model will be deployed. The cloud/region and credentials +are the ones used to create any future resources within the model. + +If no cloud/region is specified, then the model will be deployed to +the same cloud/region as the controller model. If a region is specified +without a cloud qualifier, then it is assumed to be in the same cloud +as the controller model. + +When adding --config, the default-series key is deprecated in favour of +default-base, e.g. ubuntu@22.04. + +--- + +------------------------- + diff --git a/tmp/t/10146.md b/tmp/t/10146.md new file mode 100644 index 000000000..ab3e66651 --- /dev/null +++ b/tmp/t/10146.md @@ -0,0 +1,37 @@ +system | 2024-09-16 15:51:22 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +**Alias:** resolved + +## Summary +Marks unit errors resolved and re-executes failed hooks. + +## Usage +```juju resolve [options] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | Marks all units in error as resolved | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-retry` | false | Do not re-execute failed hooks on the unit | + +## Examples + + + juju resolved mysql/0 + + juju resolved mysql/0 mysql/1 + + juju resolved --all + + +--- + +------------------------- + diff --git a/tmp/t/10147.md b/tmp/t/10147.md new file mode 100644 index 000000000..fbc3ef486 --- /dev/null +++ b/tmp/t/10147.md @@ -0,0 +1,55 @@ +system | 2024-09-16 15:53:16 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [credentials](/t/10054), [add-credential](/t/10136), [update-credential](/t/10065), [remove-credential](/t/10201), [autoload-credentials](/t/10230) +**Alias:** show-credential + +## Summary +Shows credential information stored either on this client or on a controller. + +## Usage +```juju show-credentials [options] [ ]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | yaml | Specify output format (yaml) | +| `-o`, `--output` | | Specify an output file | +| `--show-secrets` | false | Display credential secret attributes | + +## Examples + + juju show-credential google my-admin-credential + juju show-credentials + juju show-credentials --controller mycontroller --client + juju show-credentials --controller mycontroller + juju show-credentials --client + juju show-credentials --show-secrets + + +## Details + +This command displays information about cloud credential(s) stored +either on this client or on a controller for this user. + +To see the contents of a specific credential, supply its cloud and name. +To see all credentials stored for you, supply no arguments. + +To see secrets, content attributes marked as hidden, use --show-secrets option. + +To see credentials from this client, use "--client" option. + +To see credentials from a controller, use "--controller" option. + + +--- + +------------------------- + diff --git a/tmp/t/10148.md b/tmp/t/10148.md new file mode 100644 index 000000000..038fda8d0 --- /dev/null +++ b/tmp/t/10148.md @@ -0,0 +1,32 @@ +system | 2024-09-16 15:53:50 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [controllers](/t/10152), [login](/t/10157), [logout](/t/10183), [models](/t/10090), [users](/t/10175) + +## Summary +Print current login details. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju whoami + + +## Details +Display the current controller, model and logged in user name. + + +--- + +------------------------- + diff --git a/tmp/t/10149.md b/tmp/t/10149.md new file mode 100644 index 000000000..2ec432230 --- /dev/null +++ b/tmp/t/10149.md @@ -0,0 +1,35 @@ +system | 2024-09-16 15:52:34 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-secret-backend](/t/10062), [remove-secret-backend](/t/10194), [show-secret-backend](/t/10059), [update-secret-backend](/t/10176) + +## Summary +Lists secret backends available in the controller. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-c`, `--controller` | | Controller to operate in | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--reveal` | false | Include sensitive backend config content | + +## Examples + + juju secret-backends + juju secret-backends --format yaml + + +## Details + +Displays the secret backends available for storing secret content. + + +--- + +------------------------- + diff --git a/tmp/t/10150.md b/tmp/t/10150.md new file mode 100644 index 000000000..a81af591b --- /dev/null +++ b/tmp/t/10150.md @@ -0,0 +1,86 @@ +system | 2024-09-16 15:53:22 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136), [update-credential](/t/10065), [remove-credential](/t/10201), [default-credential](/t/10055), [autoload-credentials](/t/10230), [show-credential](/t/10105) +**Alias:** credentials + +## Summary +Lists Juju credentials for a cloud. + +## Usage +```juju list-credentials [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--show-secrets` | false | Show secrets, applicable to yaml or json formats only | + +## Examples + + juju credentials + juju credentials aws + juju credentials aws --client + juju credentials --format yaml --show-secrets + juju credentials --controller mycontroller + juju credentials --controller mycontroller --client + + +## Details +This command list credentials from this client and credentials +from a controller. + +Locally stored credentials are client specific and +are used with `juju bootstrap` +and `juju add-model`. It's paramount to understand that +different client devices may have different locally stored credentials +for the same user. + +Remotely stored credentials or controller stored credentials are +stored on the controller. + +An arbitrary "credential name" is used to represent credentials, which are +added either via `juju add-credential` or `juju autoload-credentials`. +Note that there can be multiple sets of credentials and, thus, multiple +names. + +Actual authentication material is exposed with the '--show-secrets' +option in json or yaml formats. Secrets are not shown in tabular format. + +A controller, and subsequently created models, can be created with a +different set of credentials but any action taken within the model (e.g.: +`juju deploy`; `juju add-unit`) applies the credential used +to create that model. This model credential is stored on the controller. + +A credential for 'controller' model is determined at bootstrap time and +will be stored on the controller. It is considered to be controller default. + +Recall that when a controller is created a 'default' model is also +created. This model will use the controller default credential. +To see details of your credentials use "juju show-credential" command. + +When adding a new model, Juju will reuse the controller default credential. +To add a model that uses a different credential, specify a credential +from this client using --credential option. See `juju help add-model` +for more information. + +Credentials denoted with an asterisk '*' are currently set as the user default +for a given cloud. + +Use --controller option to list credentials from a controller. + +Use --client option to list credentials known locally on this client. + + +--- + +------------------------- + diff --git a/tmp/t/10151.md b/tmp/t/10151.md new file mode 100644 index 000000000..331d57075 --- /dev/null +++ b/tmp/t/10151.md @@ -0,0 +1,46 @@ +system | 2024-09-16 15:53:38 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [firewall-rules](/t/10061) + +## Summary +Sets a firewall rule. + +## Usage +```juju set-firewall-rule [options] , --allowlist [,...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--allowlist` | | list of subnets to allowlist | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--whitelist` | | | + +## Examples + + juju set-firewall-rule ssh --allowlist 192.168.1.0/16 + + +## Details + +Firewall rules control ingress to a well known services +within a Juju model. A rule consists of the service name +and a allowlist of allowed ingress subnets. +The currently supported services are: +- ssh +- juju-application-offer + +DEPRECATION WARNING: Firewall rules have been moved to model-config settings "ssh-allow" and +"saas-ingress-allow". This command is deprecated in favour of +reading/writing directly to these settings. + + + +--- + +------------------------- + diff --git a/tmp/t/10152.md b/tmp/t/10152.md new file mode 100644 index 000000000..5e7560597 --- /dev/null +++ b/tmp/t/10152.md @@ -0,0 +1,37 @@ +system | 2024-09-16 15:57:43 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [models](/t/10090), [show-controller](/t/10156) + +## Summary +Lists all controllers. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--refresh` | false | Connect to each controller to download the latest details | + +## Examples + + juju controllers + juju controllers --format json --output ~/tmp/controllers.json + + + +## Details +The output format may be selected with the '--format' option. In the +default tabular output, the current controller is marked with an asterisk. + + + +--- + +------------------------- + diff --git a/tmp/t/10153.md b/tmp/t/10153.md new file mode 100644 index 000000000..bc11fa959 --- /dev/null +++ b/tmp/t/10153.md @@ -0,0 +1,128 @@ +system | 2024-09-16 15:55:39 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [scp](/t/10128) + +## Summary +Initiates an SSH session or executes a command on a Juju machine or container. + +## Usage +```juju ssh [options] <[user@]target> [openssh options] [command]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--container` | | the container name of the target pod | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-host-key-checks` | false | Skip host key checking (INSECURE) | +| `--proxy` | false | Proxy through the API server | +| `--pty` | <auto> | Enable pseudo-tty allocation | +| `--remote` | false | Target on the workload or operator pod (k8s-only) | + +## Examples + +Connect to machine 0: + + juju ssh 0 + +Connect to machine 1 and run command 'uname -a': + + juju ssh 1 uname -a + +Connect to the leader mysql unit: + + juju ssh mysql/leader + +Connect to a specific mysql unit: + + juju ssh mysql/0 + +Connect to a jenkins unit as user jenkins: + + juju ssh jenkins@jenkins/0 + +Connect to a mysql unit with an identity not known to juju (ssh option -i): + + juju ssh mysql/0 -i ~/.ssh/my_private_key echo hello + +**For k8s charms running the workload in a separate pod:** + +Connect to a k8s unit targeting the operator pod by default: + + juju ssh mysql/0 + juju ssh mysql/0 bash + +Connect to a k8s unit targeting the workload pod by specifying --remote: + + juju ssh --remote mysql/0 + +**For k8s charms using the sidecar pattern:** + +Connect to a k8s unit targeting the charm container (the default): + + juju ssh --container charm snappass/0 + +Connect to a k8s unit targeting the redis container: + + juju ssh --container redis snappass/0 + +**For k8s controller:** + +Connect to the api server pod: + + juju ssh --container api-server 0 + +Connect to the mongo db pod: + + juju ssh --container mongodb 0 + + +## Details + +The ssh target is identified by the <target> argument which is either a 'unit +name' or a 'machine id'. Both can be obtained by examining the output to "juju +status". + +Valid unit identifiers are: + a standard unit ID, such as mysql/0 or; + leader syntax of the form <application>/leader, such as mysql/leader. + +If 'user' is specified then the connection is made to that user +account; otherwise, the default 'ubuntu' account, created by Juju, is used. + +The optional command is executed on the remote machine, and any output is sent +back to the user. If no command is specified, then an interactive shell session +will be initiated. + +When "juju ssh" is executed without a terminal attached, e.g. when piping the +output of another command into it, then the default behavior is to not allocate +a pseudo-terminal (pty) for the ssh session; otherwise a pty is allocated. This +behavior can be overridden by explicitly specifying the behavior with +"--pty=true" or "--pty=false". + +The SSH host keys of the target are verified. The --no-host-key-checks option +can be used to disable these checks. Use of this option is not recommended as +it opens up the possibility of a man-in-the-middle attack. + +The default identity known to Juju and used by this command is ~/.ssh/id_rsa + +Options can be passed to the local OpenSSH client (ssh) on platforms +where it is available. This is done by inserting them between the target and +a possible remote command. Refer to the ssh man page for an explanation +of those options. + +For k8s charms, the --container argument is used to identity a specific +container in the pod. For charms which run the workload in a separate pod +to that of the charm, the default ssh target is the charm operator pod. +The workload pod may be specified using the --remote argument. + + + +--- + +------------------------- + diff --git a/tmp/t/10154.md b/tmp/t/10154.md new file mode 100644 index 000000000..ef0dd39e4 --- /dev/null +++ b/tmp/t/10154.md @@ -0,0 +1,50 @@ +system | 2024-09-16 15:53:27 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-user](/t/10193), [register](/t/10160), [show-user](/t/10212), [disable-user](/t/10198), [enable-user](/t/10241) +**Alias:** users + +## Summary +Lists Juju users allowed to connect to a controller or model. + +## Usage +```juju list-users [options] [model-name]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--all` | false | Include disabled users | +| `-c`, `--controller` | | Controller to operate in | +| `--exact-time` | false | Use full timestamp for connection times | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + +Print the users relevant to the current controller: + + juju users + +Print the users relevant to the controller "another": + + juju users -c another + +Print the users relevant to the model "mymodel": + + juju users mymodel + + +## Details +When used without a model name argument, users relevant to a controller are printed. +When used with a model name, users relevant to the specified model are printed. + + + +--- + +------------------------- + diff --git a/tmp/t/10155.md b/tmp/t/10155.md new file mode 100644 index 000000000..1d177d054 --- /dev/null +++ b/tmp/t/10155.md @@ -0,0 +1,55 @@ +system | 2024-09-16 15:57:08 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-k8s](/t/10049), [remove-k8s](/t/10098) + +## Summary +Updates an existing k8s endpoint used by Juju. + +## Usage +```juju update-k8s [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `-f` | | The path to a cloud definition file | + +## Examples + + juju update-k8s microk8s + juju update-k8s myk8s -f path/to/k8s.yaml + juju update-k8s myk8s -f path/to/k8s.yaml --controller mycontroller + juju update-k8s myk8s --controller mycontroller + juju update-k8s myk8s --client --controller mycontroller + juju update-k8s myk8s --client -f path/to/k8s.yaml + + +## Details + +Update k8s cloud information on this client and/or on a controller. + +The k8s cloud can be a built-in cloud like microk8s. + +A k8s cloud can also be updated from a file. This requires a <cloud name> and +a yaml file containing the cloud details. + +A k8s cloud on the controller can also be updated just by using a name of a k8s cloud +from this client. + +Use --controller option to update a k8s cloud on a controller. + +Use --client to update a k8s cloud definition on this client. + + + +--- + +------------------------- + diff --git a/tmp/t/10156.md b/tmp/t/10156.md new file mode 100644 index 000000000..745c59f70 --- /dev/null +++ b/tmp/t/10156.md @@ -0,0 +1,39 @@ +system | 2024-09-16 15:52:49 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [controllers](/t/10152) + +## Summary +Shows detailed information of a controller. + +## Usage +```juju show-controller [options] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--show-password` | false | Show password for logged in user | + +## Examples + + juju show-controller + juju show-controller aws google + + +## Details +Shows extended information about a controller(s) as well as related models +and user login details. + + + +--- + +------------------------- + diff --git a/tmp/t/10157.md b/tmp/t/10157.md new file mode 100644 index 000000000..47f5933a9 --- /dev/null +++ b/tmp/t/10157.md @@ -0,0 +1,67 @@ +system | 2024-09-16 15:51:39 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [disable-user](/t/10198), [enable-user](/t/10241), [logout](/t/10183), [register](/t/10160), [unregister](/t/10165) + +## Summary +Logs a user in to a controller. + +## Usage +```juju login [options] [controller host name or alias]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--no-prompt` | false | don't prompt for password just read a line from stdin | +| `--trust` | false | automatically trust controller CA certificate | +| `-u`, `--user` | | log in as this local user | + +## Examples + + juju login somepubliccontroller + juju login jimm.jujucharms.com + juju login -u bob + + +## Details + +By default, the juju login command logs the user into a controller. +The argument to the command can be a public controller +host name or alias (see Aliases below). + +If no argument is provided, the controller specified with +the -c argument will be used, or the current controller +if that's not provided. + +On success, the current controller is switched to the logged-in +controller. + +If the user is already logged in, the juju login command does nothing +except verify that fact. + +If the -u option is provided, the juju login command will attempt to log +into the controller as that user. + +After login, a token ("macaroon") will become active. It has an expiration +time of 24 hours. Upon expiration, no further Juju commands can be issued +and the user will be prompted to log in again. + +Aliases +------- + +Public controller aliases are provided by a directory service +that is queried to find the host name for a given alias. +The URL for the directory service may be configured +by setting the environment variable JUJU_DIRECTORY. + + +--- + +------------------------- + diff --git a/tmp/t/10158.md b/tmp/t/10158.md new file mode 100644 index 000000000..04a293e72 --- /dev/null +++ b/tmp/t/10158.md @@ -0,0 +1,58 @@ +system | 2024-09-16 15:54:40 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [run](/t/10052), [show-operation](/t/10083), [show-task](/t/10129) +**Alias:** operations + +## Summary +Lists pending, running, or completed operations for specified application, units, machines, or all. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--actions` | | Comma separated list of actions names to filter on | +| `--apps`, `--applications` | | Comma separated list of applications to filter on | +| `--format` | plain | Specify output format (json|plain|yaml) | +| `--limit` | 0 | The maximum number of operations to return | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--machines` | | Comma separated list of machines to filter on | +| `-o`, `--output` | | Specify an output file | +| `--offset` | 0 | Return operations from offset onwards | +| `--status` | | Comma separated list of operation status values to filter on | +| `--units` | | Comma separated list of units to filter on | +| `--utc` | false | Show times in UTC | + +## Examples + + juju operations + juju operations --format yaml + juju operations --actions juju-exec + juju operations --actions backup,restore + juju operations --apps mysql,mediawiki + juju operations --units mysql/0,mediawiki/1 + juju operations --machines 0,1 + juju operations --status pending,completed + juju operations --apps mysql --units mediawiki/0 --status running --actions backup + + + +## Details + +List the operations with the specified query criteria. +When an application is specified, all units from that application are relevant. + +When run without any arguments, operations corresponding to actions for all +application units are returned. +To see operations corresponding to juju run tasks, specify an action name +"juju-exec" and/or one or more machines. + + +--- + +------------------------- + diff --git a/tmp/t/10159.md b/tmp/t/10159.md new file mode 100644 index 000000000..c28d51b83 --- /dev/null +++ b/tmp/t/10159.md @@ -0,0 +1,91 @@ +system | 2024-09-16 15:54:29 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [import-filesystem](/t/10047), [storage](/t/10075), [storage-pools](/t/10228) + +## Summary +Adds storage to a unit after it has been deployed. + +## Usage +```juju add-storage [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +Add a 100MiB tmpfs storage instance for "pgdata" storage to unit postgresql/0: + + juju add-storage postgresql/0 pgdata=tmpfs,100M + +Add 10 1TiB storage instances to "osd-devices" storage to unit ceph-osd/0 from the model's default storage pool: + + juju add-storage ceph-osd/0 osd-devices=1T,10 + +Add a storage instance from the (AWS-specific) ebs-ssd storage pool for "brick" storage to unit gluster/0: + + juju add-storage gluster/0 brick=ebs-ssd + + +Further reading: + +https://juju.is/docs/storage + + +## Details + +Add storage to a pre-existing unit within a model. Storage is allocated from +a storage pool, using parameters provided within a "storage directive". (Use +'juju deploy --storage=<storage-directive>' to provision storage during the +deployment process). + + juju add-storage <unit> <storage-directive> + +<unit> is the ID of a unit that is already in the model. + +<storage-directive> describes to the charm how to refer to the storage, +and where to provision it from. <storage-directive> takes the following form: + + [=] + +<storage-name> is defined in the charm's metadata.yaml file. + +<storage-constraint> is a description of how Juju should provision storage +instances for the unit. They are made up of up to three parts: <storage-pool>, +<count>, and <size>. They can be provided in any order, but we recommend the +following: + + ,, + +Each parameter is optional, so long as at least one is present. So the following +storage constraints are also valid: + + <storage-pool>,<size> + <count>,<size> + <size> + +<storage-pool> is the storage pool to provision storage instances from. Must +be a name from 'juju storage-pools'. The default pool is available via +executing 'juju model-config storage-default-block-source'. + +<count> is the number of storage instances to provision from <storage-pool> of +<size>. Must be a positive integer. The default count is "1". May be restricted +by the charm, which can specify a maximum number of storage instances per unit. + +<size> is the number of bytes to provision per storage instance. Must be a +positive number, followed by a size suffix. Valid suffixes include M, G, T, +and P. Defaults to "1024M", or the which can specify a minimum size required +by the charm. + + +--- + +------------------------- + diff --git a/tmp/t/10160.md b/tmp/t/10160.md new file mode 100644 index 000000000..d2c4ed1c3 --- /dev/null +++ b/tmp/t/10160.md @@ -0,0 +1,61 @@ +system | 2024-09-16 15:53:56 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-user](/t/10193), [change-user-password](/t/10118), [unregister](/t/10165) + +## Summary +Registers a controller. + +## Usage +```juju register [options] |``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--replace` | false | replace any existing controller | + +## Examples + + juju register MFATA3JvZDAnExMxMDQuMTU0LjQyLjQ0OjE3MDcwExAxMC4xMjguMC4yOjE3MDcwBCBEFCaXerhNImkKKabuX5ULWf2Bp4AzPNJEbXVWgraLrAA= + + juju register --replace MFATA3JvZDAnExMxMDQuMTU0LjQyLjQ0OjE3MDcwExAxMC4xMjguMC4yOjE3MDcwBCBEFCaXerhNImkKKabuX5ULWf2Bp4AzPNJEbXVWgraLrAA= + + juju register public-controller.example.com + + +## Details + +The register command adds details of a controller to the local system. +This is done either by completing the user registration process that +began with the 'juju add-user' command, or by providing the DNS host +name of a public controller. + +To complete the user registration process, you should have been provided +with a base64-encoded blob of data (the output of 'juju add-user') +which can be copied and pasted as the <string> argument to 'register'. +You will be prompted for a password, which, once set, causes the +registration string to be voided. In order to start using Juju the user +can now either add a model or wait for a model to be shared with them. +Some machine providers will require the user to be in possession of +certain credentials in order to add a model. + +If a new controller has been spun up to replace an existing one, and you want +to start using that replacement controller instead of the original one, +use the --replace option to overwrite any existing controller details based +on either a name or UUID match. + +When adding a controller at a public address, authentication via some +external third party (for example Ubuntu SSO) will be required, usually +by using a web browser. + + + +--- + +------------------------- + diff --git a/tmp/t/10161.md b/tmp/t/10161.md new file mode 100644 index 000000000..92dc6e206 --- /dev/null +++ b/tmp/t/10161.md @@ -0,0 +1,52 @@ +system | 2024-09-16 15:55:04 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [agreements](/t/10064) + +## Summary +Agree to terms. + +## Usage +```juju agree [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--yes` | false | Agree to terms non interactively | + +## Examples + +Displays terms for somePlan revision 1 and prompts for agreement: + + juju agree somePlan/1 + +Displays the terms for revision 1 of somePlan, revision 2 of otherPlan, and prompts for agreement: + + juju agree somePlan/1 otherPlan/2 + +Agree to the terms without prompting: + + juju agree somePlan/1 otherPlan/2 --yes + + +## Details + +Agree to the terms required by a charm. + +When deploying a charm that requires agreement to terms, use 'juju agree' to +view the terms and agree to them. Then the charm may be deployed. + +Once you have agreed to terms, you will not be prompted to view them again. + + + +--- + +------------------------- + diff --git a/tmp/t/10162.md b/tmp/t/10162.md new file mode 100644 index 000000000..a87920a78 --- /dev/null +++ b/tmp/t/10162.md @@ -0,0 +1,115 @@ +system | 2024-09-16 15:57:10 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [clouds](/t/10182), [update-cloud](/t/10081), [remove-cloud](/t/10216), [update-credential](/t/10065) + +## Summary +Add a cloud definition to Juju. + +## Usage +```juju add-cloud [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--credential` | | Credential to use for new cloud | +| `-f`, `--file` | | The path to a cloud definition file | +| `--force` | false | Force add cloud to the controller | + +## Examples + + juju add-cloud + juju add-cloud --force + juju add-cloud mycloud ~/mycloud.yaml + juju add-cloud --controller mycontroller mycloud + juju add-cloud --controller mycontroller mycloud --credential mycred + juju add-cloud --client mycloud ~/mycloud.yaml + + +## Details + +Juju needs to know how to connect to clouds. A cloud definition +describes a cloud's endpoints and authentication requirements. Each +definition is stored and accessed later as <cloud name>. + +If you are accessing a public cloud, running add-cloud is unlikely to be +necessary. Juju already contains definitions for the public cloud +providers it supports. + +add-cloud operates in two modes: + + juju add-cloud + juju add-cloud + +When invoked without arguments, add-cloud begins an interactive session +designed for working with private clouds. The session will enable you +to instruct Juju how to connect to your private cloud. + +A cloud definition can be provided in a file either as an option -f or as a +positional argument: + + juju add-cloud mycloud ~/mycloud.yaml + juju add-cloud mycloud -f ~/mycloud.yaml + +When <cloud definition file> is provided with <cloud name>, +Juju will validate the content of the file and add this cloud +to this client as well as upload it to a controller. + +Use --controller option to upload a cloud to a controller. + +Use --client option to add cloud to the current client. + +A cloud definition file has the following YAML format: + + clouds: # mandatory + mycloud: # argument + type: openstack # , see below + auth-types: [ userpass ] + regions: + london: + endpoint: https://london.mycloud.com:35574/v3.0/ + +Cloud types for private clouds: + - lxd + - maas + - manual + - openstack + - vsphere + +Cloud types for public clouds: + - azure + - ec2 + - gce + - oci + +When a running controller is updated, the credential for the cloud +is also uploaded. As with the cloud, the credential needs +to have been added to the current client, use add-credential to +do that. If there's only one credential for the cloud it will be +uploaded to the controller automatically by add-cloud command. +However, if the cloud has multiple credentials on this client +you can specify which to upload with the --credential option. + +When adding clouds to a controller, some clouds are whitelisted and can be easily added: + - controller cloud type "kubernetes" supports [lxd maas openstack] + - controller cloud type "lxd" supports [lxd maas openstack] + - controller cloud type "maas" supports [maas openstack] + - controller cloud type "openstack" supports [openstack] + +Other cloud combinations can only be force added as the user must consider +network routability, etc - concerns that are outside of scope of Juju. +When forced addition is desired, use --force. + + + +--- + +------------------------- + diff --git a/tmp/t/10163.md b/tmp/t/10163.md new file mode 100644 index 000000000..70fa40165 --- /dev/null +++ b/tmp/t/10163.md @@ -0,0 +1,58 @@ +system | 2024-09-16 15:57:57 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-machine](/t/10071) + +## Summary +Removes one or more machines from a model. + +## Usage +```juju remove-machine [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--dry-run` | false | Print what this command would be removed without removing | +| `--force` | false | Completely remove a machine and all its dependencies | +| `--keep-instance` | false | Do not stop the running cloud instance | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-prompt` | false | Do not ask for confirmation. Overrides `mode` model config setting | +| `--no-wait` | false | Rush through machine removal without waiting for each individual step to complete | + +## Examples + + juju remove-machine 5 + juju remove-machine 6 --force + juju remove-machine 6 --force --no-wait + juju remove-machine 7 --keep-instance + + +## Details + +Machines are specified by their numbers, which may be retrieved from the +output of `juju status`. + +It is possible to remove machine from Juju model without affecting +the corresponding cloud instance by using --keep-instance option. + +Machines responsible for the model cannot be removed. + +Machines running units or containers can be removed using the '--force' +option; this will also remove those units and containers without giving +them an opportunity to shut down cleanly. + +Machine removal is a multi-step process. Under normal circumstances, Juju will not +proceed to the next step until the current step has finished. +However, when using --force, users can also specify --no-wait to progress through steps +without delay waiting for each step to complete. + + +--- + +------------------------- + diff --git a/tmp/t/10164.md b/tmp/t/10164.md new file mode 100644 index 000000000..6109ea5c5 --- /dev/null +++ b/tmp/t/10164.md @@ -0,0 +1,37 @@ +system | 2024-09-16 15:57:49 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [grant](/t/10196), [revoke-cloud](/t/10104), [add-user](/t/10193) + +## Summary +Grants access level to a Juju user for a cloud. + +## Usage +```juju grant-cloud [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Examples + +Grant user 'joe' 'add-model' access to cloud 'fluffy': + + juju grant-cloud joe add-model fluffy + + +## Details +Valid access levels are: + admin + add-model + +--- + +------------------------- + diff --git a/tmp/t/10165.md b/tmp/t/10165.md new file mode 100644 index 000000000..74e1578dd --- /dev/null +++ b/tmp/t/10165.md @@ -0,0 +1,38 @@ +system | 2024-09-16 15:54:24 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [destroy-controller](/t/10113), [kill-controller](/t/10233), [register](/t/10160) + +## Summary +Unregisters a Juju controller. + +## Usage +```juju unregister [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--no-prompt` | false | Do not ask for confirmation | + +## Examples + + juju unregister my-controller + + +## Details + +Removes local connection information for the specified controller. This +command does not destroy the controller. In order to regain access to an +unregistered controller, it will need to be added again using the juju register +command. + + + +--- + +------------------------- + diff --git a/tmp/t/10166.md b/tmp/t/10166.md new file mode 100644 index 000000000..d94034be5 --- /dev/null +++ b/tmp/t/10166.md @@ -0,0 +1,38 @@ +system | 2024-09-16 15:56:41 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Sets the meter status on an application or unit. + +## Usage +```juju set-meter-status [options] [application or unit] status``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--info` | | Set the meter status info to this string | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju set-meter-status myapp RED + juju set-meter-status myapp/0 AMBER --info "my message" + + + +## Details + +Set meter status on the given application or unit. This command is used +to test the meter-status-changed hook for charms in development. + + +--- + +------------------------- + diff --git a/tmp/t/10167.md b/tmp/t/10167.md new file mode 100644 index 000000000..8f30c9501 --- /dev/null +++ b/tmp/t/10167.md @@ -0,0 +1,54 @@ +system | 2024-09-16 15:56:45 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-ssh-key](/t/10238), [ssh-keys](/t/10202) + +## Summary +Adds a public SSH key from a trusted identity source to a model. + +## Usage +```juju import-ssh-key [options] : ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +Import all public keys associated with user account 'phamilton' on the +GitHub service: + + juju import-ssh-key gh:phamilton + +Multiple identities may be specified in a space delimited list: + + juju import-ssh-key gh:rheinlein lp:iasmiov gh:hharrison + + +## Details +Juju can add SSH keys to its cache from reliable public sources (currently +Launchpad and GitHub), allowing those users SSH access to Juju machines. + +The user identity supplied is the username on the respective service given by +'lp:' or 'gh:'. + +If the user has multiple keys on the service, all the keys will be added. + +Once the keys are imported, they can be viewed with the `juju ssh-keys` +command, where comments will indicate which ones were imported in +this way. + +An alternative to this command is the more manual `juju add-ssh-key`. + + + +--- + +------------------------- + diff --git a/tmp/t/10168.md b/tmp/t/10168.md new file mode 100644 index 000000000..8ea9d52a7 --- /dev/null +++ b/tmp/t/10168.md @@ -0,0 +1,56 @@ +system | 2024-09-16 15:51:18 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [find-offers](/t/10097) + +## Summary +Shows extended information about the offered application. + +## Usage +```juju show-offer [options] [:]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + +To show the extended information for the application 'prod' offered +from the model 'default' on the same Juju controller: + + juju show-offer default.prod + +The supplied URL can also include a username where offers require them. +This will be given as part of the URL retrieved from the +'juju find-offers' command. To show information for the application +'prod' from the model 'default' from the user 'admin': + + juju show-offer admin/default.prod + +To show the information regarding the application 'prod' offered from +the model 'default' on an accessible controller named 'controller': + + juju show-offer controller:default.prod + + + +## Details + +This command is intended to enable users to learn more about the +application offered from a particular URL. In addition to the URL of +the offer, extra information is provided from the readme file of the +charm being offered. + + +--- + +------------------------- + diff --git a/tmp/t/10169.md b/tmp/t/10169.md new file mode 100644 index 000000000..91c22559e --- /dev/null +++ b/tmp/t/10169.md @@ -0,0 +1,55 @@ +system | 2024-09-16 15:53:17 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [credentials](/t/10054), [show-credential](/t/10105), [update-credential](/t/10065) + +## Summary +Relates a remote credential to a model. + +## Usage +```juju set-credential [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +For cloud 'aws', relate remote credential 'bob' to model 'trinity': + + juju set-credential -m trinity aws bob + + +## Details + +This command relates a credential cached on a controller to a specific model. +It does not change/update the contents of an existing active credential. See +command `update-credential` for that. + +The credential specified may exist locally (on the client), remotely (on the +controller), or both. The command will error out if the credential is stored +neither remotely nor locally. + +When remote, the credential will be related to the specified model. + +When local and not remote, the credential will first be uploaded to the +controller and then related. + +This command does not affect an existing relation between the specified +credential and another model. If the credential is already related to a model +this operation will result in that credential being related to two models. + +Use the `show-credential` command to see how remote credentials are related +to models. + + +--- + +------------------------- + diff --git a/tmp/t/10170.md b/tmp/t/10170.md new file mode 100644 index 000000000..514b6b8e3 --- /dev/null +++ b/tmp/t/10170.md @@ -0,0 +1,66 @@ +system | 2024-09-16 15:56:59 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [find-offers](/t/10097), [show-offer](/t/10168) +**Alias:** offers + +## Summary +Lists shared endpoints. + +## Usage +```juju list-offers [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--active-only` | false | only return results where the offer is in use | +| `--allowed-consumer` | | return results where the user is allowed to consume the offer | +| `--application` | | return results matching the application | +| `--connected-user` | | return results where the user has a connection to the offer | +| `--format` | tabular | Specify output format (json|summary|tabular|yaml) | +| `--interface` | | return results matching the interface name | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju offers + juju offers -m model + juju offers --interface db2 + juju offers --application mysql + juju offers --connected-user fred + juju offers --allowed-consumer mary + juju offers hosted-mysql + juju offers hosted-mysql --active-only + + +## Details + +List information about applications' endpoints that have been shared and who is connected. + +The default tabular output shows each user connected (relating to) the offer, and the +relation id of the relation. + +The summary output shows one row per offer, with a count of active/total relations. + +The YAML output shows additional information about the source of connections, including +the source model UUID. + +The output can be filtered by: + - interface: the interface name of the endpoint + - application: the name of the offered application + - connected user: the name of a user who has a relation to the offer + - allowed consumer: the name of a user allowed to consume the offer + - active only: only show offers which are in use (are related to) + + + +--- + +------------------------- + diff --git a/tmp/t/10171.md b/tmp/t/10171.md new file mode 100644 index 000000000..8b33c1d6a --- /dev/null +++ b/tmp/t/10171.md @@ -0,0 +1,37 @@ +system | 2024-09-16 15:57:53 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [remove-application](/t/10067), [add-unit](/t/10141), [remove-unit](/t/10125) + +## Summary +Set the desired number of k8s application units. + +## Usage +```juju scale-application [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju scale-application mariadb 2 + + +## Details + +Scale a k8s application by specifying how many units there should be. +The new number of units can be greater or less than the current number, thus +allowing both scale up and scale down. + + +--- + +------------------------- + diff --git a/tmp/t/10172.md b/tmp/t/10172.md new file mode 100644 index 000000000..1549ce34e --- /dev/null +++ b/tmp/t/10172.md @@ -0,0 +1,50 @@ +system | 2024-09-16 15:52:00 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-secret](/t/11144), [update-secret](/t/11413), [remove-secret](/t/11414) + +## Summary +Shows details for a specific secret. + +## Usage +```juju show-secret [options] |``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--format` | yaml | Specify output format (json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `-r`, `--revision` | 0 | | +| `--reveal` | false | Reveal secret values, applicable to yaml or json formats only | +| `--revisions` | false | Show the secret revisions metadata | + +## Examples + + juju show-secret my-secret + juju show-secret 9m4e2mr0ui3e8a215n4g + juju show-secret secret:9m4e2mr0ui3e8a215n4g --revision 2 + juju show-secret 9m4e2mr0ui3e8a215n4g --revision 2 --reveal + juju show-secret 9m4e2mr0ui3e8a215n4g --revisions + juju show-secret 9m4e2mr0ui3e8a215n4g --reveal + + +## Details + +Displays the details of a specified secret. + +For controller/model admins, the actual secret content is exposed +with the '--reveal' option in json or yaml formats. + +Use --revision to inspect a particular revision, else latest is used. +Use --revisions to see the metadata for each revision. + + +--- + +------------------------- + diff --git a/tmp/t/10173.md b/tmp/t/10173.md new file mode 100644 index 000000000..321199334 --- /dev/null +++ b/tmp/t/10173.md @@ -0,0 +1,117 @@ +system | 2024-09-16 15:55:12 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [machines](/t/10078), [show-model](/t/10191), [show-status-log](/t/10204), [storage](/t/10075) + +## Summary +Report the status of the model, its machines, applications and units. + +## Usage +```juju status [options] [ [...]]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--color` | false | Use ANSI color codes in tabular output | +| `--format` | tabular | Specify output format (json|line|oneline|short|summary|tabular|yaml) | +| `--integrations` | false | Show 'integrations' section in tabular output | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-color` | false | Disable ANSI color codes in tabular output | +| `-o`, `--output` | | Specify an output file | +| `--relations` | false | The same as '--integrations' | +| `--retry-count` | 3 | Number of times to retry API failures | +| `--retry-delay` | 100ms | Time to wait between retry attempts | +| `--storage` | false | Show 'storage' section in tabular output | +| `--utc` | false | Display timestamps in the UTC timezone | +| `--watch` | 0s | Watch the status every period of time | + +## Examples + +Report the status of units hosted on machine 0: + + juju status 0 + +Report the status of the the mysql application: + + juju status mysql + +Report the status for applications that start with nova-: + + juju status nova-* + +Include information about storage and integrations in output: + + juju status --storage --integrations + +Provide output as valid JSON: + + juju status --format=json + +Watch the status every five seconds: + + juju status --watch 5s + +Show only applications/units in active status: + + juju status active + +Show only applications/units in error status: + + juju status error + + +## Details + +Report the model's status, optionally filtered by names of applications or +units. When selectors are present, filter the report to exclude entities that +do not match. + + juju status [ [...]] + +<selector> selects machines, units or applications from the model to display. +Wildcard characters (*) enable multiple entities to be matched at the same +time. + + (||)[*] + +When an entity that matches <selector> is integrated with other applications, the +status of those applications will also be presented. By default (without a +<selector>) the status of all applications and their units will be displayed. + + +Altering the output format + +The '--format' option allows you to specify how the status report is formatted. + + --format=tabular (default) + Display information about all aspects of the model in a + human-centric manner. Omits some information by default. + Use the '--integrations' and '--storage' options to include + all available information. + + --format=line + --format=short + --format=oneline + Reports information from units. Includes their IP address, + open ports and the status of the workload and agent. + + --format=summary + Reports aggregated information about the model. Includes + a description of subnets and ports that are in use, the + counts of applications, units, and machines by status code. + + --format=json + --format=yaml + Provide information in a JSON or YAML formats for + programmatic use. + + +--- + +------------------------- + diff --git a/tmp/t/10174.md b/tmp/t/10174.md new file mode 100644 index 000000000..368ffe6ce --- /dev/null +++ b/tmp/t/10174.md @@ -0,0 +1,47 @@ +system | 2024-09-16 15:57:51 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [status](/t/10173), [refresh](/t/10189), [upgrade-machine](/t/10188) + +## Summary +Set an application's base. + +## Usage +```juju set-application-base [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +Set the base for the ubuntu application to ubuntu@20.04: + + juju set-application-base ubuntu ubuntu@20.04 + + +## Details + +The specified application's base value will be set within juju. Any subordinates +of the application will also have their base set to the provided value. A base +can be specified using the OS name and the version of the OS, separated by @. + +This will not change the base of any existing units, rather new units will use +the new base when deployed. + +It is recommended to only do this after upgrade-machine has been run for +machine containing all existing units of the application. + +To ensure correct binaries, run 'juju refresh' before running 'juju add-unit'. + + +--- + +------------------------- + diff --git a/tmp/t/10175.md b/tmp/t/10175.md new file mode 100644 index 000000000..b5775287b --- /dev/null +++ b/tmp/t/10175.md @@ -0,0 +1,49 @@ +system | 2024-09-16 15:56:28 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-user](/t/10193), [register](/t/10160), [show-user](/t/10212), [disable-user](/t/10198), [enable-user](/t/10241) + +## Summary +Lists Juju users allowed to connect to a controller or model. + +## Usage +```juju users [options] [model-name]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--all` | false | Include disabled users | +| `-c`, `--controller` | | Controller to operate in | +| `--exact-time` | false | Use full timestamp for connection times | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + +Print the users relevant to the current controller: + + juju users + +Print the users relevant to the controller "another": + + juju users -c another + +Print the users relevant to the model "mymodel": + + juju users mymodel + + +## Details +When used without a model name argument, users relevant to a controller are printed. +When used with a model name, users relevant to the specified model are printed. + + + +--- + +------------------------- + diff --git a/tmp/t/10176.md b/tmp/t/10176.md new file mode 100644 index 000000000..515d7b59e --- /dev/null +++ b/tmp/t/10176.md @@ -0,0 +1,54 @@ +system | 2024-09-16 15:51:54 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-secret-backend](/t/10062), [secret-backends](/t/10149), [remove-secret-backend](/t/10194), [show-secret-backend](/t/10059) + +## Summary +Update an existing secret backend on the controller. + +## Usage +```juju update-secret-backend [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-c`, `--controller` | | Controller to operate in | +| `--config` | | path to yaml-formatted configuration file | +| `--force` | false | force update even if the backend is unreachable | +| `--reset` | | Reset the provided comma delimited config keys | + +## Examples + + juju update-secret-backend myvault --config /path/to/cfg.yaml + juju update-secret-backend myvault name=myvault2 + juju update-secret-backend myvault token-rotate=10m --config /path/to/cfg.yaml + juju update-secret-backend myvault endpoint=https://vault.io:8200 token=s.1wshwhw + juju update-secret-backend myvault token-rotate=0 + juju update-secret-backend myvault --reset namespace,ca-cert + + +## Details + +Updates a new secret backend for storing secret content. + +You must specify a name for the backend to update, +followed by any necessary backend specific config values. +Config may be specified as key values ot read from a file. +Any key values override file content if both are specified. + +Config attributes may be reset back to the default value using --reset. + +To rotate the backend access credential/token (if specified), use +the "token-rotate" config and supply a duration. To reset any existing +token rotation period, supply a value of 0. + + + +--- + +------------------------- + diff --git a/tmp/t/10177.md b/tmp/t/10177.md new file mode 100644 index 000000000..13b24b5ff --- /dev/null +++ b/tmp/t/10177.md @@ -0,0 +1,43 @@ +system | 2024-09-16 15:53:12 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Displays information about an application. + +## Usage +```juju show-application [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|smart|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju show-application mysql + juju show-application mysql wordpress + + juju show-application myapplication + +where "myapplication" is the application name alias; see "juju help deploy" for more information. + + +## Details + +The command takes deployed application names or aliases as an argument. + +The command does an exact search. It does not support wildcards. + + +--- + +------------------------- + diff --git a/tmp/t/10178.md b/tmp/t/10178.md new file mode 100644 index 000000000..9f5b95b29 --- /dev/null +++ b/tmp/t/10178.md @@ -0,0 +1,128 @@ +system | 2024-09-16 15:57:26 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [models](/t/10090), [model-config](/t/10096) +**Alias:** model-defaults + +## Summary +Displays or sets default configuration settings for new models. + +## Usage +```juju model-default [options] [[<=value>] ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--cloud` | | The cloud to target | +| `--color` | false | Use ANSI color codes in output | +| `--file` | | path to yaml-formatted configuration file | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `--ignore-read-only-fields` | false | Ignore read only fields that might cause errors to be emitted while processing yaml documents | +| `--no-color` | false | Disable ANSI color codes in tabular output | +| `-o`, `--output` | | Specify an output file | +| `--region` | | The region or cloud/region to target | +| `--reset` | | Reset the provided comma delimited keys | + +## Examples + +Display all model config default values: + + juju model-defaults + +Display the value of http-proxy model config default: + + juju model-defaults http-proxy + +Display the value of http-proxy model config default for the aws cloud: + + juju model-defaults --cloud=aws http-proxy + +Display the value of http-proxy model config default for the aws cloud +and us-east-1 region: + + juju model-defaults --region=aws/us-east-1 http-proxy + +Display the value of http-proxy model config default for the us-east-1 region: + + juju model-defaults --region=us-east-1 http-proxy + +Set the value of ftp-proxy model config default to 10.0.0.1:8000: + + juju model-defaults ftp-proxy=10.0.0.1:8000 + +Set the value of ftp-proxy model config default to 10.0.0.1:8000 in the +us-east-1 region: + + juju model-defaults --region=us-east-1 ftp-proxy=10.0.0.1:8000 + +Set model default values for the aws cloud as defined in path/to/file.yaml: + + juju model-defaults --cloud=aws --file path/to/file.yaml + +Reset the value of default-base and test-mode to default: + + juju model-defaults --reset default-base,test-mode + +Reset the value of http-proxy for the us-east-1 region to default: + + juju model-defaults --region us-east-1 --reset http-proxy + + +## Details + +To view all model default values for the current controller, run + juju model-defaults +You can target a specific controller using the -c flag: + juju model-defaults -c +By default, the output will be printed in a tabular format. You can instead +print it in json or yaml format using the --format flag: + juju model-defaults --format json + juju model-defaults --format yaml + +To view the value of a single model default, run + juju model-defaults key +To set default model config values, run + juju model-defaults key1=val1 key2=val2 ... +You can also reset default keys to their original values: + juju model-defaults --reset key1 + juju model-defaults --reset key1,key2,key3 +You may simultaneously set some keys and reset others: + juju model-defaults key1=val1 key2=val2 --reset key3,key4 + +Default values can be imported from a yaml file using the --file flag: + juju model-defaults --file=path/to/cfg.yaml +This allows you to e.g. save a controller's model defaults to a file: + juju model-defaults --format=yaml > cfg.yaml +and then import these later. Note that the output of model-defaults may +include read-only values, which will cause an error when importing later. +To prevent the error, use the --ignore-read-only-fields flag: + juju model-defaults --file=cfg.yaml --ignore-read-only-fields + +You can also read from stdin using "-", which allows you to pipe default model +values from one controller to another: + juju model-defaults -c c1 --format=yaml \ + | juju model-defaults -c c2 --file=- --ignore-read-only-fields +You can simultaneously read config from a yaml file and set config keys +as above. The command-line args will override any values specified in the file. + +Model default configuration settings are specific to the cloud on which the +model is deployed. If the controller hosts more than one cloud, the cloud +(and optionally region) must be specified using the --cloud flag. This flag +accepts arguments in the following forms: + --cloud= (specified cloud, all regions) + --region= (default cloud, specified region) + --region=/ (specified cloud and region) + --cloud= --region= (specified cloud and region) + + + +--- + +------------------------- + diff --git a/tmp/t/10179.md b/tmp/t/10179.md new file mode 100644 index 000000000..f85bf7db6 --- /dev/null +++ b/tmp/t/10179.md @@ -0,0 +1,40 @@ +system | 2024-09-16 15:56:29 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [integrate](/t/10207), [offers](/t/10051), [remove-relation](/t/10110), [resume-relation](/t/10123) + +## Summary +Suspends a relation to an application offer. + +## Usage +```juju suspend-relation [options] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--message` | | reason for suspension | + +## Examples + + juju suspend-relation 123 + juju suspend-relation 123 --message "reason for suspending" + juju suspend-relation 123 456 --message "reason for suspending" + + +## Details + +A relation between an application in another model and an offer in this model will be suspended. +The relation-departed and relation-broken hooks will be run for the relation, and the relation +status will be set to suspended. The relation is specified using its id. + + +--- + +------------------------- + diff --git a/tmp/t/10180.md b/tmp/t/10180.md new file mode 100644 index 000000000..f775d12e7 --- /dev/null +++ b/tmp/t/10180.md @@ -0,0 +1,49 @@ +system | 2024-09-16 15:56:43 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [credentials](/t/10054), [add-credential](/t/10136), [remove-credential](/t/10201), [autoload-credentials](/t/10230) +**Alias:** default-credential + +## Summary +Sets local default credentials for a cloud on this client. + +## Usage +```juju set-default-credentials [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--reset` | false | Reset default credential for the cloud | + +## Examples + + juju default-credential google credential_name + juju default-credential google + juju default-credential google --reset + + +## Details +The default credentials are specified with a "credential name". + +A credential name is created during the process of adding credentials either +via `juju add-credential` or `juju autoload-credentials`. +Credential names can be listed with `juju credentials`. + +This command sets a locally stored credential to be used as a default. +Default credentials avoid the need to specify a particular set of +credentials when more than one are available for a given cloud. + +To unset previously set default credential for a cloud, use --reset option. + +To view currently set default credential for a cloud, use the command +without a credential name argument. + + +--- + +------------------------- + diff --git a/tmp/t/10181.md b/tmp/t/10181.md new file mode 100644 index 000000000..9fba3e198 --- /dev/null +++ b/tmp/t/10181.md @@ -0,0 +1,40 @@ +system | 2024-09-16 15:57:24 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [status](/t/10173) +**Alias:** machines + +## Summary +Lists machines in a model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--color` | false | Force use of ANSI color codes | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--utc` | false | Display time as UTC in RFC3339 format | + +## Examples + + juju machines + + +## Details + +By default, the tabular format is used. +The following sections are included: ID, STATE, DNS, INS-ID, SERIES, AZ +Note: AZ above is the cloud region's availability zone. + + + +--- + +------------------------- + diff --git a/tmp/t/10182.md b/tmp/t/10182.md new file mode 100644 index 000000000..2b71bac16 --- /dev/null +++ b/tmp/t/10182.md @@ -0,0 +1,66 @@ +system | 2024-09-16 15:53:20 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-cloud](/t/10162), [credentials](/t/10054), [controllers](/t/10152), [regions](/t/10112), [default-credential](/t/10055), [default-region](/t/10082), [show-cloud](/t/10215), [update-cloud](/t/10081), [update-public-clouds](/t/10115) + +## Summary +Lists all clouds available to Juju. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | Show all available clouds | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju clouds + juju clouds --format yaml + juju clouds --controller mycontroller + juju clouds --controller mycontroller --client + juju clouds --client + + +## Details +Display the fundamental properties for each cloud known to Juju: +name, number of regions, number of registered credentials, default region, type, etc... + +Clouds known to this client are the clouds known to Juju out of the box +along with any which have been added with `add-cloud --client`. These clouds can be +used to create a controller and can be displayed using --client option. + +Clouds may be listed that are co-hosted with the Juju client. When the LXD hypervisor +is detected, the 'localhost' cloud is made available. When a microk8s installation is +detected, the 'microk8s' cloud is displayed. + +Use --controller option to list clouds from a controller. +Use --client option to list clouds from this client. +This command's default output format is 'tabular'. Use 'json' and 'yaml' for +machine-readable output. + +Cloud metadata sometimes changes, e.g. providers add regions. Use the `update-public-clouds` +command to update public clouds or `update-cloud` to update other clouds. + +Use the `regions` command to list a cloud's regions. + +Use the `show-cloud` command to get more detail, such as regions and endpoints. + +Further reading: + + Documentation: https://juju.is/docs/olm/manage-clouds + microk8s: https://microk8s.io/docs + LXD hypervisor: https://documentation.ubuntu.com/lxd + + +--- + +------------------------- + diff --git a/tmp/t/10183.md b/tmp/t/10183.md new file mode 100644 index 000000000..c5cc14ac2 --- /dev/null +++ b/tmp/t/10183.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:54:26 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [change-user-password](/t/10118), [login](/t/10157) + +## Summary +Logs a Juju user out of a controller. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--force` | false | Force logout when a locally recorded password is detected | + +## Examples + + juju logout + + +## Details + +If another client has logged in as the same user, they will remain logged +in. This command only affects the local client. + +The command will fail if the user has not yet set a password +(`juju change-user-password`). This scenario is only possible after +`juju bootstrap`since `juju register` sets a password. The +failing behaviour can be overridden with the '--force' option. + +If the same user is logged in with another client system, that user session +will not be affected by this command; it only affects the local client. + +By default, the controller is the current controller. + + + +--- + +------------------------- + diff --git a/tmp/t/10184.md b/tmp/t/10184.md new file mode 100644 index 000000000..d154a4ebd --- /dev/null +++ b/tmp/t/10184.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:57:39 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [storage](/t/10075), [attach-storage](/t/10126), [detach-storage](/t/10089), [remove-storage](/t/10066) + +## Summary +Shows storage instance information. + +## Usage +```juju show-storage [options] [...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju show-storage storage-id + + +## Details + +Show extended information about storage instances. +Storage instances to display are specified by storage IDs. +Storage IDs are positional arguments to the command and do not need to be comma +separated when more than one ID is desired. + + + +--- + +------------------------- + diff --git a/tmp/t/10186.md b/tmp/t/10186.md new file mode 100644 index 000000000..abe7cd8dc --- /dev/null +++ b/tmp/t/10186.md @@ -0,0 +1,53 @@ +system | 2024-09-16 15:51:37 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +List subnets known to Juju. + +## Usage +```juju subnets [options] [--space ] [--zone ] [--format yaml|json] [--output ]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--space` | | Filter results by space name | +| `--zone` | | Filter results by zone name | + +## Examples + +To list all subnets known to Juju: + + juju subnets + +To list subnets associated with a specific network space: + + juju subnets --space my-space + +To list subnets associated with a specific availability zone: + + juju subnets --zone my-zone + + +## Details +Displays a list of all subnets known to Juju. Results can be filtered +using the optional --space and/or --zone arguments to only display +subnets associated with a given network space and/or availability zone. + +Like with other Juju commands, the output and its format can be changed +using the --format and --output (or -o) optional arguments. Supported +output formats include "yaml" (default) and "json". To redirect the +output to a file, use --output. + +--- + +------------------------- + diff --git a/tmp/t/10187.md b/tmp/t/10187.md new file mode 100644 index 000000000..67d8a2ace --- /dev/null +++ b/tmp/t/10187.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:53:29 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [info](/t/10103), [download](/t/10134) + +## Summary +Queries the CharmHub store for available charms or bundles. + +## Usage +```juju find [options] [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--category` | | filter by a category name | +| `--channel` | | filter by channel" | +| `--charmhub-url` | https://api.charmhub.io | specify the Charmhub URL for querying the store | +| `--columns` | nbvps | display the columns associated with a find search. The following columns are supported: - n: Name - b: Bundle - v: Version - p: Publisher - s: Summary - a: Architecture - o: OS - S: Supports | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | +| `--publisher` | | search by a given publisher | +| `--type` | | search by a given type <charm|bundle> | + +## Examples + + juju find wordpress + + +## Details + +The find command queries the CharmHub store for available charms or bundles. + + +--- + +------------------------- + diff --git a/tmp/t/10188.md b/tmp/t/10188.md new file mode 100644 index 000000000..4bb66db0d --- /dev/null +++ b/tmp/t/10188.md @@ -0,0 +1,74 @@ +system | 2024-09-16 15:55:46 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [machines](/t/10078), [status](/t/10173), [refresh](/t/10189), [set-application-base](/t/10174) + +## Summary +Upgrade the Ubuntu base of a machine. + +## Usage +```juju upgrade-machine [options] [args]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--force` | false | Upgrade even if the base is not supported by the charm and/or related subordinate charms. | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-y`, `--yes` | false | Agree that the operation cannot be reverted or canceled once started without being prompted. | + +## Examples + +Prepare machine 3 for upgrade to base "ubuntu@18.04"": + + juju upgrade-machine 3 prepare ubuntu@18.04 + +Prepare machine 4 for upgrade to base "ubuntu@20.04" even if there are +applications running units that do not support the target base: + + juju upgrade-machine 4 prepare ubuntu@20.04 --force + +Complete upgrade of machine 5, indicating that all automatic and any +necessary manual upgrade steps have completed successfully: + + juju upgrade-machine 5 complete + + +## Details + +Upgrade a machine's operating system release. + +upgrade-machine allows users to perform a managed upgrade of the operating system +release of a machine using a base. This command is performed in two steps; +prepare and complete. + +The "prepare" step notifies Juju that a base upgrade is taking place for a given +machine and as such Juju guards that machine against operations that would +interfere with the upgrade process. A base can be specified using the OS name +and the version of the OS, separated by @. + +The "complete" step notifies juju that the managed upgrade has been successfully +completed. + +It should be noted that once the prepare command is issued there is no way to +cancel or abort the process. Once you commit to prepare you must complete the +process or you will end up with an unusable machine! + +The requested base must be explicitly supported by all charms deployed to +the specified machine. To override this constraint the --force option may be used. + +The --force option should be used with caution since using a charm on a machine +running an unsupported base may cause unexpected behavior. Alternately, if the +requested base is supported in later revisions of the charm, upgrade-charm can +run beforehand. + + + +--- + +------------------------- + diff --git a/tmp/t/10189.md b/tmp/t/10189.md new file mode 100644 index 000000000..01bf7a976 --- /dev/null +++ b/tmp/t/10189.md @@ -0,0 +1,132 @@ +system | 2024-09-16 15:54:04 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [deploy](/t/10074) + +## Summary +Refresh an application's charm. + +## Usage +```juju refresh [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--bind` | | Configure application endpoint bindings to spaces | +| `--channel` | | Channel to use when getting the charm from Charmhub | +| `--config` | | Either a path to yaml-formatted application config file or a key=value pair | +| `--force` | false | Allow a charm to be refreshed which bypasses LXD profile allow list | +| `--force-series` | false | Refresh even if series of deployed applications are not supported by the new charm | +| `--force-units` | false | Refresh all units immediately, even if in error state | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--path` | | Refresh to a charm located at path | +| `--resource` | | Resource to be uploaded to the controller | +| `--revision` | -1 | Explicit revision of current charm | +| `--storage` | | Charm storage constraints | +| `--switch` | | Crossgrade to a different charm | +| `--trust` | unset | Allows charm to run hooks that require access credentials | + +## Examples + +To refresh the storage constraints for the application foo: + + juju refresh foo --storage cache=ssd,10G + +To refresh the application config from a file for application foo: + + juju refresh foo --config config.yaml + +To refresh the resources for application foo: + + juju refresh foo --resource bar=/some/file.tgz --resource baz=./docs/cfg.xml + + +## Details + +When no options are set, the application's charm will be refreshed to the latest +revision available in the repository from which it was originally deployed. An +explicit revision can be chosen with the --revision option. + +A path will need to be supplied to allow an updated copy of the charm +to be located. + +Deploying from a path is intended to suit the workflow of a charm author working +on a single client machine; use of this deployment method from multiple clients +is not supported and may lead to confusing behaviour. Each local charm gets +uploaded with the revision specified in the charm, if possible, otherwise it +gets a unique revision (highest in state + 1). + +When deploying from a path, the --path option is used to specify the location from +which to load the updated charm. Note that the directory containing the charm must +match what was originally used to deploy the charm as a superficial check that the +updated charm is compatible. + +Resources may be uploaded at upgrade time by specifying the --resource option. +Following the resource option should be name=filepath pair. This option may be +repeated more than once to upload more than one resource. + + juju refresh foo --resource bar=/some/file.tgz --resource baz=./docs/cfg.xml + +Where bar and baz are resources named in the metadata for the foo charm. + +Storage constraints may be added or updated at upgrade time by specifying +the --storage option, with the same format as specified in "juju deploy". +If new required storage is added by the new charm revision, then you must +specify constraints or the defaults will be applied. + + juju refresh foo --storage cache=ssd,10G + +Charm settings may be added or updated at upgrade time by specifying the +--config option, pointing to a YAML-encoded application config file. + + juju refresh foo --config config.yaml + +If the new version of a charm does not explicitly support the application's series, the +upgrade is disallowed unless the --force-series option is used. This option should be +used with caution since using a charm on a machine running an unsupported series may +cause unexpected behavior. + +The --switch option allows you to replace the charm with an entirely different one. +The new charm's URL and revision are inferred as they would be when running a +deploy command. + +Please note that --switch is dangerous, because juju only has limited +information with which to determine compatibility; the operation will succeed, +regardless of potential havoc, so long as the following conditions hold: + +- The new charm must declare all relations that the application is currently + participating in. +- All config settings shared by the old and new charms must + have the same types. + +The new charm may add new relations and configuration settings. + +The new charm may also need to be granted access to trusted credentials. +Use --trust to grant such access. +Or use --trust=false to revoke such access. + +--switch and --path are mutually exclusive. + +--path and --revision are mutually exclusive. The revision of the updated charm +is determined by the contents of the charm at the specified path. + +--switch and --revision are mutually exclusive. + +Use of the --force-units option is not generally recommended; units upgraded +while in an error state will not have upgrade-charm hooks executed, and may +cause unexpected behavior. + +--force option for LXD Profiles is not generally recommended when upgrading an +application; overriding profiles on the container may cause unexpected +behavior. + + +--- + +------------------------- + diff --git a/tmp/t/10190.md b/tmp/t/10190.md new file mode 100644 index 000000000..8d3f0770c --- /dev/null +++ b/tmp/t/10190.md @@ -0,0 +1,70 @@ +system | 2024-09-16 15:52:29 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [destroy-controller](/t/10113) + +## Summary +Terminate all machines/containers and resources for a non-controller model. + +## Usage +```juju destroy-model [options] [:]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--destroy-storage` | false | Destroy all storage instances in the model | +| `--force` | false | Force destroy model ignoring any errors | +| `--no-prompt` | false | Do not ask for confirmation | +| `--no-wait` | false | Rush through model destruction without waiting for each individual step to complete | +| `--release-storage` | false | Release all storage instances from the model, and management of the controller, without destroying them | +| `-t`, `--timeout` | -1s | Timeout for each step of force model destruction | + +## Examples + + juju destroy-model --no-prompt mymodel + juju destroy-model --no-prompt mymodel --destroy-storage + juju destroy-model --no-prompt mymodel --release-storage + juju destroy-model --no-prompt mymodel --force + juju destroy-model --no-prompt mymodel --force --no-wait + + +## Details + +Destroys the specified model. This will result in the non-recoverable +removal of all the units operating in the model and any resources stored +there. Due to the irreversible nature of the command, it will prompt for +confirmation (unless overridden with the '-y' option) before taking any +action. + +If there is persistent storage in any of the models managed by the +controller, then you must choose to either destroy or release the +storage, using --destroy-storage or --release-storage respectively. + +Sometimes, the destruction of the model may fail as Juju encounters errors +and failures that need to be dealt with before a model can be destroyed. +However, at times, there is a need to destroy a model ignoring +all operational errors. In these rare cases, use --force option but note +that --force will also remove all units of the application, its subordinates +and, potentially, machines without given them the opportunity to shutdown cleanly. + +Model destruction is a multi-step process. Under normal circumstances, Juju will not +proceed to the next step until the current step has finished. +However, when using --force, users can also specify --no-wait to progress through steps +without delay waiting for each step to complete. + +WARNING: Passing --force with --timeout will continue the final destruction without +consideration or respect for clean shutdown or resource cleanup. If timeout +elapses with --force, you may have resources left behind that will require +manual cleanup. If --force --timeout 0 is passed, the model is brutally +removed with haste. It is recommended to use graceful destroy (without --force or --no-wait). + + +--- + +------------------------- + diff --git a/tmp/t/10191.md b/tmp/t/10191.md new file mode 100644 index 000000000..97243f120 --- /dev/null +++ b/tmp/t/10191.md @@ -0,0 +1,29 @@ +system | 2024-09-16 15:54:02 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-model](/t/10145) + +## Summary +Shows information about the current or specified model. + +## Usage +```juju show-model [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | yaml | Specify output format (json|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Details +Show information about the current or specified model. + +--- + +------------------------- + diff --git a/tmp/t/10192.md b/tmp/t/10192.md new file mode 100644 index 000000000..218c0403f --- /dev/null +++ b/tmp/t/10192.md @@ -0,0 +1,39 @@ +system | 2024-09-16 15:54:13 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-space](/t/10117), [spaces](/t/10236), [reload-spaces](/t/10063), [rename-space](/t/10135), [show-space](/t/10095), [remove-space](/t/10084) + +## Summary +Update a network space's CIDR. + +## Usage +```juju move-to-space [options] [--format yaml|json] [ ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--force` | false | Allow to force a move of subnets to a space even if they are in use on another machine. | +| `--format` | human | Specify output format (human|json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + +Move a list of CIDRs from their space to a new space: + + juju move-to-space db-space 172.31.1.0/28 172.31.16.0/20 + + +## Details +Replaces the list of associated subnets of the space. Since subnets +can only be part of a single space, all specified subnets (using their +CIDRs) "leave" their current space and "enter" the one we're updating. + +--- + +------------------------- + diff --git a/tmp/t/10193.md b/tmp/t/10193.md new file mode 100644 index 000000000..3735a52f6 --- /dev/null +++ b/tmp/t/10193.md @@ -0,0 +1,45 @@ +system | 2024-09-16 15:52:02 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [register](/t/10160), [grant](/t/10196), [users](/t/10175), [show-user](/t/10212), [disable-user](/t/10198), [enable-user](/t/10241), [change-user-password](/t/10118), [remove-user](/t/10130) + +## Summary +Adds a Juju user to a controller. + +## Usage +```juju add-user [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Examples + + juju add-user bob + juju add-user --controller mycontroller bob + + +## Details + +The user's details are stored within the controller and will be removed when +the controller is destroyed. + +A user unique registration string will be printed. This registration string +must be used by the newly added user as supplied to complete the registration +process. + +Some machine providers will require the user to be in possession of certain +credentials in order to create a model. + + + +--- + +------------------------- + diff --git a/tmp/t/10194.md b/tmp/t/10194.md new file mode 100644 index 000000000..db7b5f1fd --- /dev/null +++ b/tmp/t/10194.md @@ -0,0 +1,39 @@ +system | 2024-09-16 15:51:31 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-secret-backend](/t/10062), [secret-backends](/t/10149), [show-secret-backend](/t/10059), [update-secret-backend](/t/10176) + +## Summary +Removes a secret backend from the controller. + +## Usage +```juju remove-secret-backend [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-c`, `--controller` | | Controller to operate in | +| `--force` | false | force removal even if the backend stores in-use secrets | + +## Examples + + juju remove-secret-backend myvault + juju remove-secret-backend myvault --force + + +## Details + +Removes a secret backend, used for storing secret content. +If the backend is being used to store secrets currently in use, +the --force option can be supplied to force the removal, but be +warned, this will affect charms which use those secrets. + + +--- + +------------------------- + diff --git a/tmp/t/10195.md b/tmp/t/10195.md new file mode 100644 index 000000000..0f432f988 --- /dev/null +++ b/tmp/t/10195.md @@ -0,0 +1,114 @@ +system | 2024-09-16 15:51:27 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [run](/t/10052), [ssh](/t/10153) + +## Summary +Run the commands on the remote targets specified. + +## Usage +```juju exec [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-a`, `--app`, `--application` | | One or more application names | +| `--all` | false | Run the commands on all the machines | +| `--background` | false | Run the task in the background | +| `--color` | false | Use ANSI color codes in output | +| `--execution-group` | | Commands in the same execution group are run sequentially | +| `--format` | plain | Specify output format (json|plain|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--machine` | | One or more machine ids | +| `--no-color` | false | Disable ANSI color codes in output | +| `-o`, `--output` | | Specify an output file | +| `--operator` | false | Run the commands on the operator (k8s-only) | +| `--parallel` | true | Run the commands in parallel without first acquiring a lock | +| `-u`, `--unit` | | One or more unit ids | +| `--utc` | false | Show times in UTC | +| `--wait` | 0s | Maximum wait time for a task to complete | + +## Examples + + + juju exec --all -- hostname -f + + juju exec --unit hello/0 env + + juju exec --unit controller/0 juju-engine-report + + +## Details + +Run a shell command on the specified targets. Only admin users of a model +are able to use this command. + +Targets are specified using either machine ids, application names or unit +names. At least one target specifier is needed. + +Multiple values can be set for --machine, --application, and --unit by using +comma separated values. + +Depending on the type of target, the user which the command runs as will be: + unit -> "root" + machine -> "ubuntu" +The target and user are independent of whether --all or --application are used. +For example, --all will run as "ubuntu" on machines and "root" on units. +And --application will run as "root" on all units of that application. + +Some options are shortened for usabilty purpose in CLI +--application can also be specified as --app and -a +--unit can also be specified as -u + +Valid unit identifiers are: + a standard unit ID, such as mysql/0 or; + leader syntax of the form <application>/leader, such as mysql/leader. + +If the target is an application, the command is run on all units for that +application. For example, if there was an application "mysql" and that application +had two units, "mysql/0" and "mysql/1", then + --application mysql +is equivalent to + --unit mysql/0,mysql/1 + +If --operator is provided on k8s models, commands are executed on the operator +instead of the workload. On IAAS models, --operator has no effect. + +Commands run for applications or units are executed in a 'hook context' for +the unit. + +Commands run on machines via the --machine argument are run in parallel +by default. +If you want commands to be run sequentially in order of submission, +use --parallel=false. +Such commands will first acquire a global execution lock on the host machine +before running, and release the lock when done. +It's also possible to group commands so that those in the same group run +sequentially, but in parallel with other groups. This is done using +--execution-group=somegroup. + +--all is provided as a simple way to run the command on all the machines +in the model. If you specify --all you cannot provide additional +targets. + +Since juju exec creates tasks, you can query for the status of commands +started with juju run by calling +"juju operations --machines <id>,... --actions juju-exec". + +If you need to pass options to the command being run, you must precede the +command and its arguments with "--", to tell "juju exec" to stop processing +those arguments. For example: + + juju exec --all -- hostname -f + + + +--- + +------------------------- + diff --git a/tmp/t/10196.md b/tmp/t/10196.md new file mode 100644 index 000000000..c746fa929 --- /dev/null +++ b/tmp/t/10196.md @@ -0,0 +1,74 @@ +system | 2024-09-16 15:52:21 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [revoke](/t/10077), [add-user](/t/10193), [grant-cloud](/t/10164) + +## Summary +Grants access level to a Juju user for a model, controller, or application offer. + +## Usage +```juju grant [options] [ ... | ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Examples + +Grant user 'joe' 'read' access to model 'mymodel': + + juju grant joe read mymodel + +Grant user 'jim' 'write' access to model 'mymodel': + + juju grant jim write mymodel + +Grant user 'sam' 'read' access to models 'model1' and 'model2': + + juju grant sam read model1 model2 + +Grant user 'joe' 'read' access to application offer 'fred/prod.hosted-mysql': + + juju grant joe read fred/prod.hosted-mysql + +Grant user 'jim' 'consume' access to application offer 'fred/prod.hosted-mysql': + + juju grant jim consume fred/prod.hosted-mysql + +Grant user 'sam' 'read' access to application offers 'fred/prod.hosted-mysql' and 'mary/test.hosted-mysql': + + juju grant sam read fred/prod.hosted-mysql mary/test.hosted-mysql + + + +## Details + +By default, the controller is the current controller. + +Users with read access are limited in what they can do with models: +`juju models`, `juju machines`, and `juju status` + +Valid access levels for models are: + read + write + admin + +Valid access levels for controllers are: + login + superuser + +Valid access levels for application offers are: + read + consume + admin + +--- + +------------------------- + diff --git a/tmp/t/10197.md b/tmp/t/10197.md new file mode 100644 index 000000000..86a7d79a0 --- /dev/null +++ b/tmp/t/10197.md @@ -0,0 +1,51 @@ +system | 2024-09-16 15:57:45 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [download-backup](/t/10240) + +## Summary +Create a backup. + +## Usage +```juju create-backup [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--filename` | juju-backup-<date>-<time>.tar.gz | Download to this file | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-download` | false | Do not download the archive. DEPRECATED. | + +## Examples + + juju create-backup + juju create-backup --no-download + + +## Details + +This command requests that Juju creates a backup of its state. +You may provide a note to associate with the backup. + +By default, the backup archive and associated metadata are downloaded. + +Use --no-download to avoid getting a local copy of the backup downloaded +at the end of the backup process. In this case it is recommended that the +model config attribute "backup-dir" be set to point to a path where the +backup archives should be stored long term. This could be a remotely mounted +filesystem; the same path must exist on each controller if using HA. + +Use --verbose to see extra information about backup. + +To access remote backups stored on the controller, see 'juju download-backup'. + + +--- + +------------------------- + diff --git a/tmp/t/10198.md b/tmp/t/10198.md new file mode 100644 index 000000000..347942e43 --- /dev/null +++ b/tmp/t/10198.md @@ -0,0 +1,37 @@ +system | 2024-09-16 15:56:00 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [users](/t/10175), [enable-user](/t/10241), [login](/t/10157) + +## Summary +Disables a Juju user. + +## Usage +```juju disable-user [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Examples + + juju disable-user bob + + +## Details +A disabled Juju user is one that cannot log in to any controller. +This command has no affect on models that the disabled user may have +created and/or shared nor any applications associated with that user. + + + +--- + +------------------------- + diff --git a/tmp/t/10199.md b/tmp/t/10199.md new file mode 100644 index 000000000..413808c3c --- /dev/null +++ b/tmp/t/10199.md @@ -0,0 +1,67 @@ +system | 2024-09-16 15:52:43 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-cloud](/t/10162), [credentials](/t/10054), [controllers](/t/10152), [regions](/t/10112), [default-credential](/t/10055), [default-region](/t/10082), [show-cloud](/t/10215), [update-cloud](/t/10081), [update-public-clouds](/t/10115) +**Alias:** clouds + +## Summary +Lists all clouds available to Juju. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | Show all available clouds | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju clouds + juju clouds --format yaml + juju clouds --controller mycontroller + juju clouds --controller mycontroller --client + juju clouds --client + + +## Details +Display the fundamental properties for each cloud known to Juju: +name, number of regions, number of registered credentials, default region, type, etc... + +Clouds known to this client are the clouds known to Juju out of the box +along with any which have been added with `add-cloud --client`. These clouds can be +used to create a controller and can be displayed using --client option. + +Clouds may be listed that are co-hosted with the Juju client. When the LXD hypervisor +is detected, the 'localhost' cloud is made available. When a microk8s installation is +detected, the 'microk8s' cloud is displayed. + +Use --controller option to list clouds from a controller. +Use --client option to list clouds from this client. +This command's default output format is 'tabular'. Use 'json' and 'yaml' for +machine-readable output. + +Cloud metadata sometimes changes, e.g. providers add regions. Use the `update-public-clouds` +command to update public clouds or `update-cloud` to update other clouds. + +Use the `regions` command to list a cloud's regions. + +Use the `show-cloud` command to get more detail, such as regions and endpoints. + +Further reading: + + Documentation: https://juju.is/docs/olm/manage-clouds + microk8s: https://microk8s.io/docs + LXD hypervisor: https://documentation.ubuntu.com/lxd + + +--- + +------------------------- + diff --git a/tmp/t/10200.md b/tmp/t/10200.md new file mode 100644 index 000000000..e394e554a --- /dev/null +++ b/tmp/t/10200.md @@ -0,0 +1,39 @@ +system | 2024-09-16 15:51:50 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [agree](/t/10161) +**Alias:** agreements + +## Summary +List user's agreements. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju agreements + + +## Details + +Charms may require a user to accept its terms in order for it to be deployed. +In other words, some applications may only be installed if a user agrees to +accept some terms defined by the charm. + +This command lists the terms that the user has agreed to. + + +--- + +------------------------- + diff --git a/tmp/t/10201.md b/tmp/t/10201.md new file mode 100644 index 000000000..9ee523194 --- /dev/null +++ b/tmp/t/10201.md @@ -0,0 +1,57 @@ +system | 2024-09-16 15:58:16 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136), [autoload-credentials](/t/10230), [credentials](/t/10054), [default-credential](/t/10055), [set-credential](/t/10169), [update-credential](/t/10065) + +## Summary +Removes Juju credentials for a cloud. + +## Usage +```juju remove-credential [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--force` | false | Force remove controller side credential, ignore validation errors | + +## Examples + + juju remove-credential google credential_name + juju remove-credential google credential_name --client + juju remove-credential google credential_name -c mycontroller + juju remove-credential google credential_name -c mycontroller --force + + + +## Details +The credential to be removed is specified by a "credential name". +Credential names, and optionally the corresponding authentication +material, can be listed with `juju credentials`. + +Use --controller option to remove credentials from a controller. + +When removing cloud credential from a controller, Juju performs additional +checks to ensure that there are no models using this credential. +Occasionally, these check may not be desired by the user and can be by-passed using --force. +If force remove was performed and some models were still using the credential, these models +will be left with un-reachable machines. +Consequently, it is not recommended as a default remove action. +Models with un-reachable machines are most commonly fixed by using another cloud credential, +see ' + "'juju set-credential'" + ' for more information. + + +Use --client option to remove credentials from the current client. + + + +--- + +------------------------- + diff --git a/tmp/t/10202.md b/tmp/t/10202.md new file mode 100644 index 000000000..ffcf20d3b --- /dev/null +++ b/tmp/t/10202.md @@ -0,0 +1,43 @@ +system | 2024-09-16 15:56:14 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-ssh-key](/t/10238), [remove-ssh-key](/t/10119) + +## Summary +Lists the currently known SSH keys for the current (or specified) model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--full` | false | Show full key instead of just the fingerprint | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju ssh-keys + +To examine the full key, use the '--full' option: + + juju ssh-keys -m jujutest --full + + +## Details +Juju maintains a per-model cache of SSH keys which it copies to each newly +created unit. +This command will display a list of all the keys currently used by Juju in +the current model (or the model specified, if the '-m' option is used). +By default a minimal list is returned, showing only the fingerprint of +each key and its text identifier. By using the '--full' option, the entire +key may be displayed. + + + +--- + +------------------------- + diff --git a/tmp/t/10203.md b/tmp/t/10203.md new file mode 100644 index 000000000..bc61c6da4 --- /dev/null +++ b/tmp/t/10203.md @@ -0,0 +1,57 @@ +system | 2024-09-16 15:56:25 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [run](/t/10052), [show-operation](/t/10083), [show-task](/t/10129) + +## Summary +Lists pending, running, or completed operations for specified application, units, machines, or all. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--actions` | | Comma separated list of actions names to filter on | +| `--apps`, `--applications` | | Comma separated list of applications to filter on | +| `--format` | plain | Specify output format (json|plain|yaml) | +| `--limit` | 0 | The maximum number of operations to return | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--machines` | | Comma separated list of machines to filter on | +| `-o`, `--output` | | Specify an output file | +| `--offset` | 0 | Return operations from offset onwards | +| `--status` | | Comma separated list of operation status values to filter on | +| `--units` | | Comma separated list of units to filter on | +| `--utc` | false | Show times in UTC | + +## Examples + + juju operations + juju operations --format yaml + juju operations --actions juju-exec + juju operations --actions backup,restore + juju operations --apps mysql,mediawiki + juju operations --units mysql/0,mediawiki/1 + juju operations --machines 0,1 + juju operations --status pending,completed + juju operations --apps mysql --units mediawiki/0 --status running --actions backup + + + +## Details + +List the operations with the specified query criteria. +When an application is specified, all units from that application are relevant. + +When run without any arguments, operations corresponding to actions for all +application units are returned. +To see operations corresponding to juju run tasks, specify an action name +"juju-exec" and/or one or more machines. + + +--- + +------------------------- + diff --git a/tmp/t/10204.md b/tmp/t/10204.md new file mode 100644 index 000000000..23d88f0b7 --- /dev/null +++ b/tmp/t/10204.md @@ -0,0 +1,84 @@ +system | 2024-09-16 15:58:18 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [status](/t/10173) + +## Summary +Output past statuses for the specified entity. + +## Usage +```juju show-status-log [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--days` | 0 | Returns the logs for the past <days> days (cannot be combined with -n or --date) | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `--from-date` | | Returns logs for any date after the passed one, the expected date format is YYYY-MM-DD (cannot be combined with -n or --days) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-n` | 0 | Returns the last N logs (cannot be combined with --days or --date) | +| `-o`, `--output` | | Specify an output file | +| `--type` | unit | Type of statuses to be displayed [application|container|juju-container|juju-machine|juju-unit|machine|model|saas|unit|workload] | +| `--utc` | false | Display time as UTC in RFC3339 format | + +## Examples + +Show the status history for the specified unit: + + juju show-status-log mysql/0 + +Show the status history for the specified unit with the last 30 logs: + + juju show-status-log mysql/0 -n 30 + +Show the status history for the specified unit with the logs for the past 2 days: + + juju show-status-log mysql/0 -days 2 + +Show the status history for the specified unit with the logs for any date after 2020-01-01: + + juju show-status-log mysql/0 --from-date 2020-01-01 + +Show the status history for the specified application: + + juju show-status-log -type application wordpress + +Show the status history for the specified machine: + + juju show-status-log 0 + +Show the status history for the model: + + juju show-status-log -type model + + +## Details + +This command will report the history of status changes for +a given entity. +The statuses are available for the following types. +-type supports: + application: statuses for the specified application + container: statuses from the agent that is managing containers + juju-container: statuses from the containers only and not their host machines + juju-machine: status of the agent that is managing a machine + juju-unit: statuses from the agent that is managing a unit + machine: statuses that occur due to provisioning of a machine + model: statuses for the model itself + saas: statuses for the specified SAAS application + unit: statuses for specified unit and its workload + workload: statuses for unit's workload + + and sorted by time of occurrence. + The default is unit. + + +--- + +------------------------- + diff --git a/tmp/t/10205.md b/tmp/t/10205.md new file mode 100644 index 000000000..9dbcad81c --- /dev/null +++ b/tmp/t/10205.md @@ -0,0 +1,109 @@ +system | 2024-09-16 15:56:12 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [disabled-commands](/t/10220), [enable-command](/t/10111) + +## Summary +Disable commands for the model. + +## Usage +```juju disable-command [options] [message...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +To prevent the model from being destroyed: + + juju disable-command destroy-model "Check with SA before destruction." + +To prevent the machines, applications, units and relations from being removed: + + juju disable-command remove-object + +To prevent changes to the model: + + juju disable-command all "Model locked down" + + +## Details + +Juju allows to safeguard deployed models from unintentional damage by preventing +execution of operations that could alter model. + +This is done by disabling certain sets of commands from successful execution. +Disabled commands must be manually enabled to proceed. + +Some commands offer a --force option that can be used to bypass the disabling. + +Commands that can be disabled are grouped based on logical operations as follows: + +"destroy-model" prevents: + destroy-controller + destroy-model + +"remove-object" prevents: + destroy-controller + destroy-model + detach-storage + remove-application + remove-machine + remove-relation + remove-saas + remove-storage + remove-unit + +"all" prevents: + add-machine + integrate + add-unit + add-ssh-key + add-user + attach-resource + attach-storage + change-user-password + config + consume + deploy + destroy-controller + destroy-model + disable-user + enable-ha + enable-user + expose + import-filesystem + import-ssh-key + model-defaults + model-config + reload-spaces + remove-application + remove-machine + remove-relation + remove-ssh-key + remove-unit + remove-user + resolved + retry-provisioning + run + scale-application + set-application-base + set-credential + set-constraints + sync-agents + unexpose + refresh + upgrade-model + + +--- + +------------------------- + diff --git a/tmp/t/10206.md b/tmp/t/10206.md new file mode 100644 index 000000000..95dc5a78f --- /dev/null +++ b/tmp/t/10206.md @@ -0,0 +1,60 @@ +system | 2024-09-16 15:51:33 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Ensure that sufficient controllers exist to provide redundancy. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--constraints` | | Additional machine constraints | +| `--format` | simple | Specify output format (json|simple|yaml) | +| `-n` | 0 | Number of controllers to make available | +| `-o`, `--output` | | Specify an output file | +| `--to` | | The machine(s) to become controllers, bypasses constraints | + +## Examples + +Ensure that the controller is still in highly available mode. If there is only 1 controller running, this will ensure there +are 3 running. If you have previously requested more than 3, +then that number will be ensured. + + juju enable-ha + +Ensure that 5 controllers are available: + + juju enable-ha -n 5 + +Ensure that 7 controllers are available, with newly created +controller machines having at least 8GB RAM. + + juju enable-ha -n 7 --constraints mem=8G + +Ensure that 7 controllers are available, with machines server1 and +server2 used first, and if necessary, newly created controller +machines having at least 8GB RAM. + + juju enable-ha -n 7 --to server1,server2 --constraints mem=8G + + +## Details + +To ensure availability of deployed applications, the Juju infrastructure +must itself be highly available. The enable-ha command will ensure +that the specified number of controller machines are used to make up the +controller. + +An odd number of controllers is required. + + +--- + +------------------------- + diff --git a/tmp/t/10207.md b/tmp/t/10207.md new file mode 100644 index 000000000..13c58cc8d --- /dev/null +++ b/tmp/t/10207.md @@ -0,0 +1,190 @@ +system | 2024-09-16 15:55:56 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [consume](/t/10213), [find-offers](/t/10097), [set-firewall-rule](/t/10151), [suspend-relation](/t/10179) + +## Summary +Integrate two applications. + +## Usage +```juju integrate [options] [:] [:]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--via` | | for cross model integrations, specify the egress subnets for outbound traffic | + +## Examples + +Integrate wordpress and percona-cluster, asking Juju to resolve + the endpoint names. Expands to "wordpress:db" (with the requires role) and + "percona-cluster:server" (with the provides role). + + juju integrate wordpress percona-cluster + +Integrate wordpress and postgresql, using an explicit +endpoint name. + + juju integrate wordpress postgresql:db + +Integrate an etcd instance within the current model to centrally managed +EasyRSA Certificate Authority hosted in the "secrets" model. + + juju integrate etcd secrets.easyrsa + +Integrate a wordpress application with a mysql application hosted within the +"prod" model, using the "automation" user. Facilitate firewall management +by specifying the routes used for integration data. + + juju integrate wordpress automation/prod.mysql --via 192.168.0.0/16,10.0.0.0/8 + + +## Details + +Integrate two applications. Integrated applications communicate over a common +interface provided by the Juju controller that enables units to share information. +This topology allows units to share data, without needing direct connectivity +between units is restricted by firewall rules. Charms define the logic for +transferring and interpreting integration data. + +The most common use of 'juju integrate' specifies two applications that co-exist +within the same model: + + juju integrate + +Occasionally, more explicit syntax is required. Juju is able to integrate +units that span models, controllers and clouds, as described below. + + +Integrating applications in the same model + +The most common case specifies two applications, adding specific endpoint +name(s) when required. + + juju integrate [:] [:] + +The role and endpoint names are described by charms' metadata.yaml file. + +The order does not matter, however each side must implement complementary roles. +One side implements the "provides" role and the other implements the "requires" +role. Juju can always infer the role that each side is implementing, so specifying +them is not necessary as command-line arguments. + +<application> is the name of an application that has already been added to the +model. The Applications section of 'juju status' provides a list of current +applications. + +<endpoint> is the name of an endpoint defined within the metadata.yaml +of the charm for <application>. Valid endpoint names are defined within the +"provides:" and "requires:" section of that file. Juju will request that you +specify the <endpoint> if there is more than one possible integration between +the two applications. + + +Subordinate applications + +Subordinate applications are designed to be deployed alongside a primary +application. They must define a container scoped endpoint. When that endpoint +is related to a primary application, wherever a unit of the primary application +is deployed, a corresponding unit of the subordinate application will also be +deployed. Integration with the primary application has the same syntax as +integration any two applications within the same model. + + +Peer integrations + +Integrations within an application between units (known as "peer integrations") do +not need to be added manually. They are created when the 'juju add-unit' and +'juju scale-application' commands are executed. + + +Cross-model integrations + +Applications can be integrated, even when they are deployed to different models. +Those models may be managed by different controllers and/or be hosted on +different clouds. This functionality is known as "cross-model integration" or CMI. + + +Cross-model integrations: different model on the same controller + +Integrating applications in models managed by the same controller +is very similar to adding an integration between applications in the same model: + + juju integrate [:] .[:] + +<model> is the name of the model outside of the current context. This enables the +Juju controller to bridge two models. You can list the currently available +models with 'juju models'. + +To integrate models outside of the current context, add the '-m <model>' option: + + juju integrate -m [:] \ + .[:] + + +Cross-model integrations: different controllers + +Applications can be integrated with a remote application via an "offer URL" that has +been generated by the 'juju offer' command. The syntax for adding a cross-model +integration is similar to adding a local integration: + + juju integrate [:] + +<offer-endpoint> describes the remote application, from the point of view of the +local one. An <offer-endpoint> takes one of two forms: + + + [:] + +<offer-alias> is an alias that has been defined by the 'juju consume' command. +Use the 'juju find-offers' command to list aliases. + +<offer-url> is a path to enable Juju to resolve communication between +controllers and the models they control. + + [[:]/]. + +<controller> is the name of a controller. The 'juju controllers' command +provides a list of controllers. + +<user> is the user account of the model's owner. + + +Cross-model integration: network management + +When the consuming side (the local application) is behind a firewall and/or +NAT is used for outbound traffic, it is possible to use the '--via' option to +inform the offering side (the remote application) the source of traffic to +enable network ports to be opened. + + ... --via [,[, ...]] + + +Further reading: + + https://juju.is/docs/juju/integration + https://juju.is/docs/juju/cross-model-integration + + +--- + +------------------------- + +javierdelapuente | 2024-06-28 10:25:52 UTC | #2 + +[quote="system, post:1, topic:10207"] +`juju integrate [options] [:] [:]` +[/quote] + +Later in the document, instead or `relation`, it is used `endpoint` like: ` +juju integrate [:] [:] +` +I think only one name should be used. + +------------------------- + diff --git a/tmp/t/10208.md b/tmp/t/10208.md new file mode 100644 index 000000000..6181739e0 --- /dev/null +++ b/tmp/t/10208.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:57:16 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [models](/t/10090), [model-constraints](/t/10137), [constraints](/t/10060), [set-constraints](/t/10210) + +## Summary +Sets machine constraints on a model. + +## Usage +```juju set-model-constraints [options] = ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju set-model-constraints cores=8 mem=16G + juju set-model-constraints -m mymodel root-disk=64G + + +## Details +Sets constraints on the model that can be viewed with +`juju model-constraints`. By default, the model is the current model. +Model constraints are combined with constraints set for an application with +`juju set-constraints` for commands (such as 'deploy') that provision +machines/containers for applications. Where model and application constraints overlap, the +application constraints take precedence. +Constraints for a specific application can be viewed with `juju constraints`. + + +--- + +------------------------- + diff --git a/tmp/t/10209.md b/tmp/t/10209.md new file mode 100644 index 000000000..fa94bf509 --- /dev/null +++ b/tmp/t/10209.md @@ -0,0 +1,34 @@ +system | 2024-09-16 15:56:40 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Retries provisioning for failed machines. + +## Usage +```juju retry-provisioning [options] [...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--all` | false | retry provisioning all failed machines | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + + juju retry-provisioning 0 + + juju retry-provisioning 0 1 + + juju retry-provisioning --all + + +--- + +------------------------- + diff --git a/tmp/t/10210.md b/tmp/t/10210.md new file mode 100644 index 000000000..13625ac77 --- /dev/null +++ b/tmp/t/10210.md @@ -0,0 +1,46 @@ +system | 2024-09-16 15:57:18 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [constraints](/t/10060), [model-constraints](/t/10137), [set-model-constraints](/t/10208) + +## Summary +Sets machine constraints for an application. + +## Usage +```juju set-constraints [options] = ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju set-constraints mysql mem=8G cores=4 + juju set-constraints -m mymodel apache2 mem=8G arch=amd64 + + +## Details +Sets constraints for an application, which are used for all new machines +provisioned for that application. They can be viewed with `juju constraints`. +By default, the model is the current model. +Application constraints are combined with model constraints, set with `juju +set-model-constraints`, for commands (such as 'juju deploy') that +provision machines for applications. Where model and application constraints +overlap, the application constraints take precedence. +Constraints for a specific model can be viewed with `juju model-constraints`. +This command requires that the application to have at least one unit. To apply +constraints to +the first unit set them at the model level or pass them as an argument +when deploying. + + +--- + +------------------------- + diff --git a/tmp/t/10211.md b/tmp/t/10211.md new file mode 100644 index 000000000..d54726575 --- /dev/null +++ b/tmp/t/10211.md @@ -0,0 +1,56 @@ +system | 2024-09-16 15:51:56 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [create-storage-pool](/t/10093), [remove-storage-pool](/t/10068) +**Alias:** storage-pools + +## Summary +List storage pools. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--name` | | Only show pools with these names | +| `-o`, `--output` | | Specify an output file | +| `--provider` | | Only show pools of these provider types | + +## Examples + +List all storage pools: + + juju storage-pools + +List only pools of type kubernetes, azure, ebs: + + juju storage-pools --provider kubernetes,azure,ebs + +List only pools named pool1 and pool2: + + juju storage-pools --name pool1,pool2 + + +## Details + +The user can filter on pool type, name. + +If no filter is specified, all current pools are listed. +If at least 1 name and type is specified, only pools that match both a name +AND a type from criteria are listed. +If only names are specified, only mentioned pools will be listed. +If only types are specified, all pools of the specified types will be listed. + +Both pool types and names must be valid. +Valid pool types are pool types that are registered for Juju model. + + +--- + +------------------------- + diff --git a/tmp/t/10212.md b/tmp/t/10212.md new file mode 100644 index 000000000..eb885d07d --- /dev/null +++ b/tmp/t/10212.md @@ -0,0 +1,40 @@ +system | 2024-09-16 15:54:46 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-user](/t/10193), [register](/t/10160), [users](/t/10175) + +## Summary +Show information about a user. + +## Usage +```juju show-user [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-c`, `--controller` | | Controller to operate in | +| `--exact-time` | false | Use full timestamp for connection times | +| `--format` | yaml | Specify output format (json|yaml) | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju show-user + juju show-user jsmith + juju show-user --format json + juju show-user --format yaml + + +## Details +By default, the YAML format is used and the user name is the current +user. + + +--- + +------------------------- + diff --git a/tmp/t/10213.md b/tmp/t/10213.md new file mode 100644 index 000000000..05c4d6943 --- /dev/null +++ b/tmp/t/10213.md @@ -0,0 +1,44 @@ +system | 2024-09-16 15:51:35 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [integrate](/t/10207), [offer](/t/10080), [remove-saas](/t/10087) + +## Summary +Add a remote offer to the model. + +## Usage +```juju consume [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju consume othermodel.mysql + juju consume owner/othermodel.mysql + juju consume anothercontroller:owner/othermodel.mysql + + +## Details +Adds a remote offer to the model. Relations can be created later using "juju relate". + +The path to the remote offer is formatted as follows: + + [:][/]. + +If the controller name is omitted, Juju will use the currently active +controller. Similarly, if the model owner is omitted, Juju will use the user +that is currently logged in to the controller providing the offer. + + +--- + +------------------------- + diff --git a/tmp/t/10214.md b/tmp/t/10214.md new file mode 100644 index 000000000..e842340f7 --- /dev/null +++ b/tmp/t/10214.md @@ -0,0 +1,35 @@ +system | 2024-09-16 15:57:36 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-secret](/t/11144), [remove-secret](/t/11414), [show-secret](/t/10172), [update-secret](/t/11413) + +## Summary +Lists secrets available in the model. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--owner` | | Include secrets for the specified owner | + +## Examples + + juju secrets + juju secrets --format yaml + + +## Details + +Displays the secrets available for charms to use if granted access. + + +--- + +------------------------- + diff --git a/tmp/t/10215.md b/tmp/t/10215.md new file mode 100644 index 000000000..1dad0f8ed --- /dev/null +++ b/tmp/t/10215.md @@ -0,0 +1,52 @@ +system | 2024-09-16 15:52:59 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [clouds](/t/10182), [add-cloud](/t/10162), [update-cloud](/t/10081) + +## Summary +Shows detailed information for a cloud. + +## Usage +```juju show-cloud [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `--format` | display | Specify output format (display|json|yaml) | +| `--include-config` | false | Print available config option details specific to the specified cloud | +| `-o`, `--output` | | Specify an output file | + +## Examples + + juju show-cloud google + juju show-cloud azure-china --output ~/azure_cloud_details.txt + juju show-cloud myopenstack --controller mycontroller + juju show-cloud myopenstack --client + juju show-cloud myopenstack --client --controller mycontroller + + +## Details + +Provided information includes 'defined' (public, built-in), 'type', +'auth-type', 'regions', 'endpoints', and cloud specific configuration +options. + +If ‘--include-config’ is used, additional configuration (key, type, and +description) specific to the cloud are displayed if available. + +Use --controller option to show a cloud from a controller. + +Use --client option to show a cloud known on this client. + + +--- + +------------------------- + diff --git a/tmp/t/10216.md b/tmp/t/10216.md new file mode 100644 index 000000000..319801eb8 --- /dev/null +++ b/tmp/t/10216.md @@ -0,0 +1,44 @@ +system | 2024-09-16 15:57:14 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-cloud](/t/10162), [update-cloud](/t/10081), [clouds](/t/10182) + +## Summary +Removes a cloud from Juju. + +## Usage +```juju remove-cloud [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | + +## Examples + + juju remove-cloud mycloud + juju remove-cloud mycloud --client + juju remove-cloud mycloud --controller mycontroller + + +## Details + +Remove a cloud from Juju. + +If --controller is used, also remove the cloud from the specified controller, +if it is not in use. + +If --client is specified, Juju removes the cloud from this client. + + + +--- + +------------------------- + diff --git a/tmp/t/10217.md b/tmp/t/10217.md new file mode 100644 index 000000000..be990c5b3 --- /dev/null +++ b/tmp/t/10217.md @@ -0,0 +1,41 @@ +system | 2024-09-16 15:54:09 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [create-storage-pool](/t/10093), [remove-storage-pool](/t/10068), [storage-pools](/t/10228) + +## Summary +Update storage pool attributes. + +## Usage +```juju update-storage-pool [options] [= [=...]]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +Update the storage-pool named iops with new configuration details: + + juju update-storage-pool operator-storage volume-type=provisioned-iops iops=40 + +Update which provider the pool is for: + + juju update-storage-pool lxd-storage type=lxd-zfs + + +## Details + +Update configuration attributes for a single existing storage pool. + + +--- + +------------------------- + diff --git a/tmp/t/10218.md b/tmp/t/10218.md new file mode 100644 index 000000000..5f1ddf53a --- /dev/null +++ b/tmp/t/10218.md @@ -0,0 +1,50 @@ +system | 2024-09-16 15:57:32 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [attach-resource](/t/10124), [charm-resources](/t/10099) + +## Summary +Show the resources for an application or unit. + +## Usage +```juju resources [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--details` | false | show detailed information about resources used by each unit. | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + +To list resources for an application: + + juju resources mysql + +To list resources for a unit: + + juju resources mysql/0 + +To show detailed information about resources used by a unit: + + juju resources mysql/0 --details + + +## Details + +This command shows the resources required by and those in use by an existing +application or unit in your model. When run for an application, it will also show any +updates available for resources from a store. + + +--- + +------------------------- + diff --git a/tmp/t/10219.md b/tmp/t/10219.md new file mode 100644 index 000000000..c9ce85686 --- /dev/null +++ b/tmp/t/10219.md @@ -0,0 +1,35 @@ +system | 2024-09-16 15:53:24 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [actions](/t/10069), [run](/t/10052) + +## Summary +Shows detailed information about an action. + +## Usage +```juju show-action [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju show-action postgresql backup + + +## Details + +Show detailed information about an action on the target application. + + +--- + +------------------------- + diff --git a/tmp/t/10220.md b/tmp/t/10220.md new file mode 100644 index 000000000..8e1cf189f --- /dev/null +++ b/tmp/t/10220.md @@ -0,0 +1,88 @@ +system | 2024-09-16 15:51:15 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [disable-command](/t/10205), [enable-command](/t/10111) + +## Summary +List disabled commands. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--all` | false | Lists for all models (administrative users only) | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Details + +List disabled commands for the model. + +Commands that can be disabled are grouped based on logical operations as follows: + +"destroy-model" prevents: + destroy-controller + destroy-model + +"remove-object" prevents: + destroy-controller + destroy-model + detach-storage + remove-application + remove-machine + remove-relation + remove-saas + remove-storage + remove-unit + +"all" prevents: + add-machine + integrate + add-unit + add-ssh-key + add-user + attach-resource + attach-storage + change-user-password + config + consume + deploy + destroy-controller + destroy-model + disable-user + enable-ha + enable-user + expose + import-filesystem + import-ssh-key + model-defaults + model-config + reload-spaces + remove-application + remove-machine + remove-relation + remove-ssh-key + remove-unit + remove-user + resolved + retry-provisioning + run + scale-application + set-application-base + set-credential + set-constraints + sync-agents + unexpose + refresh + upgrade-model + + +--- + +------------------------- + diff --git a/tmp/t/10221.md b/tmp/t/10221.md new file mode 100644 index 000000000..7994883de --- /dev/null +++ b/tmp/t/10221.md @@ -0,0 +1,56 @@ +system | 2024-09-16 15:57:38 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [expose](/t/10109) + +## Summary +Removes public availability over the network for an application. + +## Usage +```juju unexpose [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--endpoints` | | Unexpose only the ports that charms have opened for this comma-delimited list of endpoints | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju unexpose apache2 + +To unexpose only the ports that charms have opened for the "www", or "www" and "logs" endpoints: + + juju unexpose apache2 --endpoints www + + juju unexpose apache2 --endpoints www,logs + + +## Details +Adjusts the firewall rules and any relevant security mechanisms of the +cloud to deny public access to the application. + +Applications are unexposed by default when they get created. If exposed via +the "juju expose" command, they can be unexposed by running the "juju unexpose" +command. + +If no additional options are specified, the command will unexpose the +application (if exposed). + +The --endpoints option may be used to restrict the effect of this command to +the list of ports opened for a comma-delimited list of endpoints. + +Note that when the --endpoints option is provided, the application will still +remain exposed if any other of its endpoints are still exposed. However, if +none of its endpoints remain exposed, the application will be instead unexposed. + + +--- + +------------------------- + diff --git a/tmp/t/10228.md b/tmp/t/10228.md new file mode 100644 index 000000000..ea3e5c473 --- /dev/null +++ b/tmp/t/10228.md @@ -0,0 +1,55 @@ +system | 2024-09-16 15:53:42 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [create-storage-pool](/t/10093), [remove-storage-pool](/t/10068) + +## Summary +List storage pools. + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--name` | | Only show pools with these names | +| `-o`, `--output` | | Specify an output file | +| `--provider` | | Only show pools of these provider types | + +## Examples + +List all storage pools: + + juju storage-pools + +List only pools of type kubernetes, azure, ebs: + + juju storage-pools --provider kubernetes,azure,ebs + +List only pools named pool1 and pool2: + + juju storage-pools --name pool1,pool2 + + +## Details + +The user can filter on pool type, name. + +If no filter is specified, all current pools are listed. +If at least 1 name and type is specified, only pools that match both a name +AND a type from criteria are listed. +If only names are specified, only mentioned pools will be listed. +If only types are specified, all pools of the specified types will be listed. + +Both pool types and names must be valid. +Valid pool types are pool types that are registered for Juju model. + + +--- + +------------------------- + diff --git a/tmp/t/10229.md b/tmp/t/10229.md new file mode 100644 index 000000000..c4b8ee5f1 --- /dev/null +++ b/tmp/t/10229.md @@ -0,0 +1,73 @@ +system | 2024-09-16 15:53:08 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [ssh](/t/10153), [debug-code](/t/10048) +**Alias:** debug-hooks + +## Summary +Launch a tmux session to debug hooks and/or actions. + +## Usage +```juju debug-hook [options] [hook or action names]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--container` | | the container name of the target pod | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-host-key-checks` | false | Skip host key checking (INSECURE) | +| `--proxy` | false | Proxy through the API server | +| `--pty` | <auto> | Enable pseudo-tty allocation | +| `--remote` | false | Target on the workload or operator pod (k8s-only) | + +## Examples + +Debug all hooks and actions of unit '0': + + juju debug-hooks mysql/0 + +Debug all hooks and actions of the leader: + + juju debug-hooks mysql/leader + +Debug the 'config-changed' hook of unit '1': + + juju debug-hooks mysql/1 config-changed + +Debug the 'pull-site' action and 'update-status' hook of unit '0': + + juju debug-hooks hello-kubecon/0 pull-site update-status + + +## Details + +The command launches a tmux session that will intercept matching hooks and/or +actions. + +Initially, the tmux session will take you to '/var/lib/juju' or '/home/ubuntu'. +As soon as a matching hook or action is fired, the tmux session will +automatically navigate you to '/var/lib/juju/agents/<unit-id>/charm' with a +properly configured environment. Unlike the 'juju debug-code' command, +the fired hooks and/or actions are not executed directly; instead, the user +needs to manually run the dispatch script inside the charm's directory. + +For more details on debugging charm code, see the charm SDK documentation. + +Valid unit identifiers are: + a standard unit ID, such as mysql/0 or; + leader syntax of the form <application>/leader, such as mysql/leader. + +If no hook or action is specified, all hooks and actions will be intercepted. + +See the "juju help ssh" for information about SSH related options +accepted by the debug-hooks command. + + +--- + +------------------------- + diff --git a/tmp/t/10230.md b/tmp/t/10230.md new file mode 100644 index 000000000..552bbc4f8 --- /dev/null +++ b/tmp/t/10230.md @@ -0,0 +1,76 @@ +system | 2024-09-16 15:57:28 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136), [credentials](/t/10054), [default-credential](/t/10055), [remove-credential](/t/10201) + +## Summary +Attempts to automatically detect and add credentials for a cloud. + +## Usage +```juju autoload-credentials [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | + +## Examples + + juju autoload-credentials + juju autoload-credentials --client + juju autoload-credentials --controller mycontroller + juju autoload-credentials --client --controller mycontroller + juju autoload-credentials aws + + +## Details + +The command searches well known, cloud-specific locations on this client. +If credential information is found, it is presented to the user +in a series of prompts to facilitated interactive addition and upload. +An alternative to this command is `juju add-credential` + +After validating the contents, credentials are added to +this Juju client if --client is specified. + +To upload credentials to a controller, use --controller option. + +Below are the cloud types for which credentials may be autoloaded, +including the locations searched. + +EC2 + Credentials and regions: + 1. On Linux, $HOME/.aws/credentials and $HOME/.aws/config + 2. Environment variables AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY + +GCE + Credentials: + 1. A JSON file whose path is specified by the + GOOGLE_APPLICATION_CREDENTIALS environment variable + 2. On Linux, $HOME/.config/gcloud/application_default_credentials.json + Default region is specified by the CLOUDSDK_COMPUTE_REGION environment + variable. + 3. On Windows, %APPDATA%\gcloud\application_default_credentials.json + +OpenStack + Credentials: + 1. On Linux, $HOME/.novarc + 2. Environment variables OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, + OS_DOMAIN_NAME + +LXD + Credentials: + 1. On Linux, $HOME/.config/lxc/config.yml + + + +--- + +------------------------- + diff --git a/tmp/t/10231.md b/tmp/t/10231.md new file mode 100644 index 000000000..f177e14c3 --- /dev/null +++ b/tmp/t/10231.md @@ -0,0 +1,71 @@ +system | 2024-09-16 15:51:46 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-credential](/t/10136), [credentials](/t/10054), [remove-credential](/t/10201), [set-credential](/t/10169) +**Alias:** update-credential + +## Summary +Updates a controller credential for a cloud. + +## Usage +```juju update-credentials [options] [ []]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--client` | false | Client operation | +| `-f`, `--file` | | The YAML file containing credential details to update | +| `--force` | false | Force update controller side credential, ignore validation errors | +| `--region` | | Cloud region that credential is valid for | + +## Examples + + juju update-credential aws mysecrets + juju update-credential -f mine.yaml + juju update-credential -f mine.yaml --client + juju update-credential aws -f mine.yaml + juju update-credential azure --region brazilsouth -f mine.yaml + juju update-credential -f mine.yaml --controller mycontroller --force + + +## Details +Cloud credentials are used for model operations and manipulations. +Since it is common to have long-running models, it is also common to +have these cloud credentials become invalid during models' lifetime. +When this happens, a user must update the cloud credential that +a model was created with to the new and valid details on controller. + +This command allows to update an existing, already-stored, named, +cloud-specific credential on a controller as well as the one from this client. + +Use --controller option to update a credential definition on a controller. + +When updating cloud credential on a controller, Juju performs additional +checks to ensure that the models that use this credential can still +access cloud instances after the update. Occasionally, these checks may not be desired +by the user and can be by-passed using --force option. +Force update may leave some models with un-reachable machines. +Consequently, it is not recommended as a default update action. +Models with un-reachable machines are most commonly fixed by using another cloud credential, +see ' + "'juju set-credential'" + ' for more information. + +Use --client to update a credential definition on this client. +If a user will use a different client, say a different laptop, +the update will not affect that client's (laptop's) copy. + +Before credential is updated, the new content is validated. For some providers, +cloud credentials are region specific. To validate the credential for a non-default region, +use --region. + + + +--- + +------------------------- + diff --git a/tmp/t/10232.md b/tmp/t/10232.md new file mode 100644 index 000000000..1de1a80f3 --- /dev/null +++ b/tmp/t/10232.md @@ -0,0 +1,42 @@ +system | 2024-09-16 15:55:50 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [run](/t/10052), [show-action](/t/10219) +**Alias:** actions + +## Summary +List actions defined for an application. + +## Usage +```juju list-actions [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | default | Specify output format (default|json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--schema` | false | Display the full action schema | + +## Examples + + juju actions postgresql + juju actions postgresql --format yaml + juju actions postgresql --schema + + +## Details + +List the actions available to run on the target application, with a short +description. To show the full schema for the actions, use --schema. + + +--- + +------------------------- + diff --git a/tmp/t/10233.md b/tmp/t/10233.md new file mode 100644 index 000000000..0499522df --- /dev/null +++ b/tmp/t/10233.md @@ -0,0 +1,44 @@ +system | 2024-09-16 15:52:18 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [destroy-controller](/t/10113), [unregister](/t/10165) + +## Summary +Forcibly terminate all machines and other associated resources for a Juju controller. + +## Usage +```juju kill-controller [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--no-prompt` | false | Do not ask for confirmation | +| `-t`, `--timeout` | 5m0s | Timeout before direct destruction | + +## Details + +Forcibly destroy the specified controller. If the API server is accessible, +this command will attempt to destroy the controller model and all models +and their resources. + +If the API server is unreachable, the machines of the controller model will be +destroyed through the cloud provisioner. If there are additional machines, +including machines within models, these machines will not be destroyed +and will never be reconnected to the Juju controller being destroyed. + +The normal process of killing the controller will involve watching the +models as they are brought down in a controlled manner. If for some reason the +models do not stop cleanly, there is a default five minute timeout. If no change +in the model state occurs for the duration of this timeout, the command will +stop watching and destroy the models directly through the cloud provider. + + +--- + +------------------------- + diff --git a/tmp/t/10234.md b/tmp/t/10234.md new file mode 100644 index 000000000..315fbefb8 --- /dev/null +++ b/tmp/t/10234.md @@ -0,0 +1,51 @@ +system | 2024-09-16 15:55:06 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [resources](/t/10218), [attach-resource](/t/10124) +**Alias:** charm-resources + +## Summary +Display the resources for a charm in a repository. + +## Usage +```juju list-charm-resources [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--channel` | stable | the channel of the charm | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | + +## Examples + +Display charm resources for the postgresql charm: + + juju charm-resources postgresql + +Display charm resources for mycharm in the 2.0/edge channel: + + juju charm-resources mycharm --channel 2.0/edge + + + +## Details + +This command will report the resources and the current revision of each +resource for a charm in a repository. + +Channel can be specified with --channel. If not provided, stable is used. + +Where a channel is not supplied, stable is used. + + +--- + +------------------------- + diff --git a/tmp/t/10235.md b/tmp/t/10235.md new file mode 100644 index 000000000..c08b9fca8 --- /dev/null +++ b/tmp/t/10235.md @@ -0,0 +1,46 @@ +system | 2024-09-16 15:54:48 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [find-offers](/t/10097), [offer](/t/10080) + +## Summary +Removes one or more offers specified by their URL. + +## Usage +```juju remove-offer [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--force` | false | remove the offer as well as any relations to the offer | +| `-y`, `--yes` | false | Do not prompt for confirmation | + +## Examples + + juju remove-offer prod.model/hosted-mysql + juju remove-offer prod.model/hosted-mysql --force + juju remove-offer hosted-mysql + + +## Details + +Remove one or more application offers. + +If the --force option is specified, any existing relations to the +offer will also be removed. + +Offers to remove are normally specified by their URL. +It's also possible to specify just the offer name, in which case +the offer is considered to reside in the current model. + + +--- + +------------------------- + diff --git a/tmp/t/10236.md b/tmp/t/10236.md new file mode 100644 index 000000000..088c9b417 --- /dev/null +++ b/tmp/t/10236.md @@ -0,0 +1,44 @@ +system | 2024-09-16 15:53:33 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-space](/t/10117), [reload-spaces](/t/10063) + +## Summary +List known spaces, including associated subnets. + +## Usage +```juju spaces [options] [--short] [--format yaml|json] [--output ]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--short` | false | only display spaces. | + +## Examples + +List spaces and their subnets: + + juju spaces + +List spaces: + + juju spaces --short + + +## Details +Displays all defined spaces. By default both spaces and their subnets are displayed. +Supplying the --short option will list just the space names. +The --output argument allows the command's output to be redirected to a file. + +--- + +------------------------- + diff --git a/tmp/t/10237.md b/tmp/t/10237.md new file mode 100644 index 000000000..a945a3bd5 --- /dev/null +++ b/tmp/t/10237.md @@ -0,0 +1,207 @@ +system | 2024-09-16 15:53:54 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [controllers](/t/10152), [model-config](/t/10096), [show-cloud](/t/10215) + +## Summary +Displays or sets configuration settings for a controller. + +## Usage +```juju controller-config [options] [[=] ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | +| `--color` | false | Use ANSI color codes in output | +| `--file` | | path to yaml-formatted configuration file | +| `--format` | tabular | Specify output format (json|tabular|yaml) | +| `--ignore-read-only-fields` | false | Ignore read only fields that might cause errors to be emitted while processing yaml documents | +| `--no-color` | false | Disable ANSI color codes in tabular output | +| `-o`, `--output` | | Specify an output file | + +## Examples + +Print all config values for the current controller: + + juju controller-config + +Print the value of "api-port" for the current controller: + + juju controller-config api-port + +Print all config values for the controller "mycontroller": + + juju controller-config -c mycontroller + +Set the "auditing-enabled" and "audit-log-max-backups" keys: + + juju controller-config auditing-enabled=true audit-log-max-backups=5 + +Set the current controller's config from a yaml file: + + juju controller-config --file path/to/file.yaml + + +## Details + +To view all configuration values for the current controller, run + juju controller-config +You can target a specific controller using the -c flag: + juju controller-config -c +By default, the config will be printed in a tabular format. You can instead +print it in json or yaml format using the --format flag: + juju controller-config --format json + juju controller-config --format yaml + +To view the value of a single config key, run + juju controller-config key +To set config values, run + juju controller-config key1=val1 key2=val2 ... + +Config values can be imported from a yaml file using the --file flag: + juju controller-config --file=path/to/cfg.yaml +This allows you to e.g. save a controller's config to a file: + juju controller-config --format=yaml > cfg.yaml +and then import the config later. Note that the output of controller-config +may include read-only values, which will cause an error when importing later. +To prevent the error, use the --ignore-read-only-fields flag: + juju controller-config --file=cfg.yaml --ignore-read-only-fields + +You can also read from stdin using "-", which allows you to pipe config values +from one controller to another: + juju controller-config -c c1 --format=yaml \ + | juju controller-config -c c2 --file=- --ignore-read-only-fields +You can simultaneously read config from a yaml file and set config keys +as above. The command-line args will override any values specified in the file. + +The following keys are available: + + agent-logfile-max-backups: + type: int + description: The number of old agent log files to keep (compressed) + agent-logfile-max-size: + type: string + description: The maximum size of the agent log file + agent-ratelimit-max: + type: int + description: The maximum size of the token bucket used to ratelimit agent connections + agent-ratelimit-rate: + type: string + description: The time taken to add a new token to the ratelimit bucket + api-port-open-delay: + type: string + description: "The duration that the controller will wait \nbetween when the controller + has been deemed to be ready to open \nthe api-port and when the api-port is actually + opened \n(only used when a controller-api-port value is set)." + application-resource-download-limit: + type: int + description: The maximum number of concurrent resources downloads per application + audit-log-capture-args: + type: bool + description: Determines if the audit log contains the arguments passed to API methods + audit-log-exclude-methods: + type: list + description: The list of Facade.Method names that aren't interesting for audit logging + purposes. + audit-log-max-backups: + type: int + description: The number of old audit log files to keep (compressed) + audit-log-max-size: + type: string + description: The maximum size for the current controller audit log file + auditing-enabled: + type: bool + description: Determines if the controller records auditing information + caas-image-repo: + type: string + description: The docker repo to use for the jujud operator and mongo images + controller-api-port: + type: int + description: |- + An optional port that may be set for controllers + that have a very heavy load. If this port is set, this port is used by + the controllers to talk to each other - used for the local API connection + as well as the pubsub forwarders, and the raft workers. If this value is + set, the api-port isn't opened until the controllers have started properly. + controller-name: + type: string + description: The canonical name of the controller + controller-resource-download-limit: + type: int + description: The maximum number of concurrent resources downloads across all the + applications on the controller + features: + type: list + description: A list of runtime changeable features to be updated + juju-ha-space: + type: string + description: The network space within which the MongoDB replica-set should communicate + juju-mgmt-space: + type: string + description: The network space that agents should use to communicate with controllers + max-agent-state-size: + type: int + description: The maximum size (in bytes) of internal state data that agents can + store to the controller + max-charm-state-size: + type: int + description: The maximum size (in bytes) of charm-specific state that units can + store to the controller + max-debug-log-duration: + type: string + description: The maximum duration that a debug-log session is allowed to run + max-prune-txn-batch-size: + type: int + description: (deprecated) The maximum number of transactions evaluated in one go + when pruning + max-prune-txn-passes: + type: int + description: (deprecated) The maximum number of batches processed when pruning + migration-agent-wait-time: + type: string + description: The maximum during model migrations that the migration worker will + wait for agents to report on phases of the migration + model-logfile-max-backups: + type: int + description: The number of old model log files to keep (compressed) + model-logfile-max-size: + type: string + description: The maximum size of the log file written out by the controller on behalf + of workers running for a model + model-logs-size: + type: string + description: The size of the capped collections used to hold the logs for the models + mongo-memory-profile: + type: string + description: Sets mongo memory profile + prune-txn-query-count: + type: int + description: The number of transactions to read in a single query + prune-txn-sleep-time: + type: string + description: The amount of time to sleep between processing each batch query + public-dns-address: + type: string + description: Public DNS address (with port) of the controller. + query-tracing-enabled: + type: bool + description: Enable query tracing for the dqlite driver + query-tracing-threshold: + type: string + description: "The minimum duration of a query for it to be traced. The lower the + \nthreshold, the more queries will be output. A value of 0 means all queries \nwill + be output if tracing is enabled." + + + + +--- + +------------------------- + diff --git a/tmp/t/10238.md b/tmp/t/10238.md new file mode 100644 index 000000000..915fe9a4d --- /dev/null +++ b/tmp/t/10238.md @@ -0,0 +1,53 @@ +system | 2024-09-16 15:56:53 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [ssh-keys](/t/10202), [remove-ssh-key](/t/10119), [import-ssh-key](/t/10167) + +## Summary +Adds a public SSH key to a model. + +## Usage +```juju add-ssh-key [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju add-ssh-key "ssh-rsa qYfS5LieM79HIOr535ret6xy + AAAAB3NzaC1yc2EAAAADAQA6fgBAAABAQCygc6Rc9XgHdhQqTJ + Wsoj+I3xGrOtk21xYtKijnhkGqItAHmrE5+VH6PY1rVIUXhpTg + pSkJsHLmhE29OhIpt6yr8vQSOChqYfS5LieM79HIOJEgJEzIqC + 52rCYXLvr/BVkd6yr4IoM1vpb/n6u9o8v1a0VUGfc/J6tQAcPR + ExzjZUVsfjj8HdLtcFq4JLYC41miiJtHw4b3qYu7qm3vh4eCiK + 1LqLncXnBCJfjj0pADXaL5OQ9dmD3aCbi8KFyOEs3UumPosgmh + VCAfjjHObWHwNQ/ZU2KrX1/lv/+lBChx2tJliqQpyYMiA3nrtS + jfqQgZfjVF5vz8LESQbGc6+vLcXZ9KQpuYDt joe@ubuntu" + +For ease of use it is possible to use shell substitution to pass the key +to the command: + + juju add-ssh-key "$(cat ~/mykey.pub)" + + + +## Details +Juju maintains a per-model cache of public SSH keys which it copies to +each unit (including units already deployed). By default this includes the +key of the user who created the model (assuming it is stored in the +default location ~/.ssh/). Additional keys may be added with this command, +quoting the entire public key as an argument. + + + +--- + +------------------------- + diff --git a/tmp/t/10239.md b/tmp/t/10239.md new file mode 100644 index 000000000..2564bb63b --- /dev/null +++ b/tmp/t/10239.md @@ -0,0 +1,61 @@ +system | 2024-09-16 15:55:52 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-unit](/t/10141), [remove-unit](/t/10125) + +## Summary +Displays information about a unit. + +## Usage +```juju show-unit [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--app` | false | only show application relation data | +| `--endpoint` | | only show relation data for the specified endpoint | +| `--format` | yaml | Specify output format (json|smart|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--related-unit` | | only show relation data for the specified unit | + +## Examples + +To show information about a unit: + + juju show-unit mysql/0 + +To show information about multiple units: + + juju show-unit mysql/0 wordpress/1 + +To show only the application relation data for a unit: + + juju show-unit mysql/0 --app + +To show only the relation data for a specific endpoint: + + juju show-unit mysql/0 --endpoint db + +To show only the relation data for a specific related unit: + + juju show-unit mysql/0 --related-unit wordpress/2 + + +## Details + +The command takes deployed unit names as an argument. + +Optionally, relation data for only a specified endpoint +or related unit may be shown, or just the application data. + + +--- + +------------------------- + diff --git a/tmp/t/10240.md b/tmp/t/10240.md new file mode 100644 index 000000000..d0aa002b3 --- /dev/null +++ b/tmp/t/10240.md @@ -0,0 +1,39 @@ +system | 2024-09-16 15:53:58 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [create-backup](/t/10197) + +## Summary +Download a backup archive file. + +## Usage +```juju download-backup [options] /full/path/to/backup/on/controller``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--filename` | | Download target | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju download-backup /full/path/to/backup/on/controller + + +## Details + +download-backup retrieves a backup archive file. + +If --filename is not used, the archive is downloaded to a temporary +location and the filename is printed to stdout. + + +--- + +------------------------- + diff --git a/tmp/t/10241.md b/tmp/t/10241.md new file mode 100644 index 000000000..46423a18d --- /dev/null +++ b/tmp/t/10241.md @@ -0,0 +1,34 @@ +system | 2024-09-16 15:52:25 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [users](/t/10175), [disable-user](/t/10198), [login](/t/10157) + +## Summary +Re-enables a previously disabled Juju user. + +## Usage +```juju enable-user [options] ``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-c`, `--controller` | | Controller to operate in | + +## Examples + + juju enable-user bob + + +## Details +An enabled Juju user is one that can log in to a controller. + + +--- + +------------------------- + diff --git a/tmp/t/10242.md b/tmp/t/10242.md new file mode 100644 index 000000000..eb41f66ec --- /dev/null +++ b/tmp/t/10242.md @@ -0,0 +1,72 @@ +system | 2024-09-16 15:52:54 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [ssh](/t/10153), [debug-code](/t/10048) + +## Summary +Launch a tmux session to debug hooks and/or actions. + +## Usage +```juju debug-hooks [options] [hook or action names]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--container` | | the container name of the target pod | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--no-host-key-checks` | false | Skip host key checking (INSECURE) | +| `--proxy` | false | Proxy through the API server | +| `--pty` | <auto> | Enable pseudo-tty allocation | +| `--remote` | false | Target on the workload or operator pod (k8s-only) | + +## Examples + +Debug all hooks and actions of unit '0': + + juju debug-hooks mysql/0 + +Debug all hooks and actions of the leader: + + juju debug-hooks mysql/leader + +Debug the 'config-changed' hook of unit '1': + + juju debug-hooks mysql/1 config-changed + +Debug the 'pull-site' action and 'update-status' hook of unit '0': + + juju debug-hooks hello-kubecon/0 pull-site update-status + + +## Details + +The command launches a tmux session that will intercept matching hooks and/or +actions. + +Initially, the tmux session will take you to '/var/lib/juju' or '/home/ubuntu'. +As soon as a matching hook or action is fired, the tmux session will +automatically navigate you to '/var/lib/juju/agents/<unit-id>/charm' with a +properly configured environment. Unlike the 'juju debug-code' command, +the fired hooks and/or actions are not executed directly; instead, the user +needs to manually run the dispatch script inside the charm's directory. + +For more details on debugging charm code, see the charm SDK documentation. + +Valid unit identifiers are: + a standard unit ID, such as mysql/0 or; + leader syntax of the form <application>/leader, such as mysql/leader. + +If no hook or action is specified, all hooks and actions will be intercepted. + +See the "juju help ssh" for information about SSH related options +accepted by the debug-hooks command. + + +--- + +------------------------- + diff --git a/tmp/t/10243.md b/tmp/t/10243.md new file mode 100644 index 000000000..5ffca1708 --- /dev/null +++ b/tmp/t/10243.md @@ -0,0 +1,42 @@ +system | 2024-09-16 15:55:31 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [add-machine](/t/10071) + +## Summary +Show a machine's status. + +## Usage +```juju show-machine [options] ...``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--color` | false | Force use of ANSI color codes | +| `--format` | yaml | Specify output format (json|tabular|yaml) | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `-o`, `--output` | | Specify an output file | +| `--utc` | false | Display time as UTC in RFC3339 format | + +## Examples + + juju show-machine 0 + juju show-machine 1 2 3 + + +## Details + +Show a specified machine on a model. Default format is in yaml, +other formats can be specified with the "--format" option. +Available formats are yaml, tabular, and json + + +--- + +------------------------- + diff --git a/tmp/t/10244.md b/tmp/t/10244.md new file mode 100644 index 000000000..35de8579e --- /dev/null +++ b/tmp/t/10244.md @@ -0,0 +1,52 @@ +system | 2024-09-16 15:51:23 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [spaces](/t/10236), [show-space](/t/10095), [show-application](/t/10177) + +## Summary +Change bindings for a deployed application. + +## Usage +```juju bind [options] [] [= ...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--force` | false | Allow endpoints to be bound to spaces that might not be available to all existing units | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + +To update the default binding for the application and automatically update all +existing endpoint bindings that were referencing the old default, you can use +the following syntax: + + juju bind foo new-default + +To bind individual endpoints to a space you can use the following syntax: + + juju bind foo endpoint-1=space-1 endpoint-2=space-2 + +Finally, the above commands can be combined to update both the default space +and individual endpoints in one go: + + juju bind foo new-default endpoint-1=space-1 + + + +## Details + +In order to be able to bind any endpoint to a space, all machines where the +application units are deployed to are required to be configured with an address +in that space. However, you can use the --force option to bypass this check. + + +--- + +------------------------- + diff --git a/tmp/t/1033.md b/tmp/t/1033.md new file mode 100644 index 000000000..3d0cf17f9 --- /dev/null +++ b/tmp/t/1033.md @@ -0,0 +1,323 @@ +system | 2024-05-29 22:45:06 UTC | #1 + + + +> See also: [Action](/t/6208) + +This document demonstrates how to manage actions. + + +**Contents:** +- [List all actions](#heading--list-all-actions) +- [Show details about an action](#heading--show-details-about-an-action) +- [Run an action](#heading--run-an-action) +- [Manage action tasks](#heading--manage-action-tasks) +- [Manage action operations](#heading--manage-action-operations) +- [Debug an action](#heading--debug-an-action) + + + +

List all actions

+ +[tabs] +[tab version="juju"] + +To list the actions defined for a deployed application, use the `actions` command followed by the deployed charm's name. For example, assuming you've already deployed the `git` charm, you can find out the actions it supports as below: + +```text +juju actions git +``` + +This should output: + +```text +Action Description +add-repo Create a git repository. +add-repo-user Give a user permissions to access a repository. +add-user Create a new user. +get-repo Return the repository's path. +list-repo-users List all users who have access to a repository. +list-repos List existing git repositories. +list-user-repos List all the repositories a user has access to. +list-users List all users. +remove-repo Remove a git repository. +remove-repo-user Revoke a user's permissions to access a repository. +remove-user Remove a user. +``` + +By passing various options, you can also do a number of other things such as specify a model or an output format or request the full schema for all the actions of an application. Below we demonstrate the `--schema` and `--format` options: + +```text +juju actions git --schema --format yaml +``` + +Partial output: + +```text +add-repo: + additionalProperties: false + description: Create a git repository. + properties: + repo: + description: Name of the git repository. + type: string + required: + - repo + title: add-repo + type: object +``` + +[note] +The full schema is under the `properties` key of the root action. Actions rely on [JSON-Schema](http://json-schema.org) for validation. The top-level keys shown for the action (`description` and `properties`) may include future additions to the feature. +[/note] + +> See more: [`juju actions`](/t/10069) + +[/tab] +[tab version="terraform juju"] + +[/tab] +[tab version="python libjuju"] +To list the actions defined for a deployed application, use the `get_actions()` method on the `Application` object to get all the actions defined for this application. + +```python +await my_app.get_actions() +``` + +> See more: [`Application (object)`](https://pythonlibjuju.readthedocs.io/en/latest/narrative/application.html), [`get_actions (method)`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.application.html#juju.application.Application.get_actions) + +[/tab] +[/tabs] + + +

Show details about an action

+ +[tabs] +[tab version="juju"] + +To see detailed information about an application action, use the `show-action` command followed by the name of the charm and the name of the action. For example, the code below will show detailed information about the `backup` action of the `postgresql` application. + +```text +juju show-action postgresql backup +``` + + + +> See more: [`juju show-action`](/t/10219) + +[/tab] +[tab version="terraform juju"] + +[/tab] +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Run an action

+ +[note type=information] +**Did you know?** When you run an action, how the action is run depends on the type of the charm. If your charm is a machine charm, actions are executed on the same machine as the application. If your charm is a Kubernetes charm implementing the sidecar pattern, the action is run in the charm container. +[/note] + +[tabs] +[tab version="juju"] + +To run an action on a unit, use the `run` command followed by the name of the unit and the name of the action you want to run. + +```text +juju run mysql/3 backup +``` + + + +By using various options, you can choose to run the action in the background, specify a timeout time, pass a list of actions in the form of a YAML file, etc. See the command reference doc for more. + +Running an action returns the overall operation ID as well as the individual task ID(s) for each unit. + + + +> See more: [`juju run`](/t/10052) (before `juju v.3.0`, `run-action`) + +[/tab] +[tab version="terraform juju"] + +[/tab] +[tab version="python libjuju"] +To run an action on a unit, use the `run_action()` method on a Unit object of a deployed application. + +Note that "running" an action on a unit, enqueues an action to be performed. The result will be an Action object to interact with. You will need to call `action.wait()` on that object to wait for the action to complete and retrieve the results. + +```python +# Assume we deployed a git application +my_app = await model.deploy('git', application_name='git', channel='stable') +my_unit = my_app.units[0] + +action = await my_unit.run_action('add-repo', repo='myrepo') +await action.wait() # will return the result for the action +``` +> See more: [`Unit (object)`](https://pythonlibjuju.readthedocs.io/en/latest/narrative/unit.html), [`Action (object)`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.action.html#juju.action.Action), [`Unit.run_action (method)`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.unit.html#juju.unit.Unit.run_action), [`Action.wait() (method)`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.action.html#juju.action.Action.wait) + +[/tab] +[/tabs] + +

Manage action tasks

+> See also: [Task](/t/7933) + +- [Show details about a task](#heading--show-details-about-a-task) +- [Cancel a task](#heading--cancel-a-task) + +

Show details about a task

+ +[tabs] +[tab version="juju"] + +To drill down to the result of running an action on a specific unit (the stdout, stderror, log messages, etc.), use the `show-task` command followed by the task ID (returned by the `run` command). For example, + +```text +juju show-task 1 +``` + +> See more: [`juju show-task`](/t/10129) + +[/tab] +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Cancel a task

+ +[tabs] +[tab version="juju"] + +Suppose you've run an action but would now like to cancel the resulting pending or running task. You can do so using the `cancel-task` command. For example: + +```text +juju cancel-task 1 +``` + +> See more: [`juju cancel-task`](/t/10053) + +[/tab] +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Manage action operations

+> See also: [Operation](/t/7934) + +- [View the pending, running, or completed operations](#heading--view-the-pending-running-or-completed-operations) +- [Show details about an operation](#heading--show-details-about-an-operation) + +

View the pending, running, or completed operations

+ +[tabs] +[tab version="juju"] + +To view the pending, running, or completed status of each `juju run ... ` invocation, run the `operations` command: + +```text +juju operations +``` + +This will show the operations corresponding to the actions for all the application units. You can filter this by passing various options (e.g., `--actions backup`, `--units mysql/0`, `--machines 0,1`, `--status pending,completed`, etc.). + +> See more: [`juju operations`](/t/10203) + +[/tab] +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Show details about an operation

+ +[tabs] +[tab version="juju"] + +To see the status of the individual tasks belonging to a given operation, run the `show-operation` command followed by the operation ID. + +```text +juju show-operation 1 +``` + +As usual, by adding various options, you can specify an output format, choose to watch indefinitely or specify a timeout time, etc. + +> See more: [`juju show-operation`](/t/10083) + +[/tab] +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Debug an action

+ +[tabs] +[tab version="juju"] + +To debug an action (or more), use the `debug-hooks` command followed by the name of the unit and the name(s) of the action(s). For example, if you want to check the `add-repo` action of the `git` charm, use: + +```text +juju debug-hooks git/0 add-repo +``` + +> See more: [`juju debug-code`](/t/10048), [`juju debug-hooks`](/t/10242), [Charm SDK | How to debug a charm](https://juju.is/docs/sdk/debug-a-charm) + +[/tab] +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +
+ +> **Contributors:** @cderici, @pedroleaoc, @pmatulis, @tmihoc, @wallyworld + +------------------------- + +pedroleaoc | 2021-06-08 18:06:35 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:01 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/1041.md b/tmp/t/1041.md new file mode 100644 index 000000000..4798f24ab --- /dev/null +++ b/tmp/t/1041.md @@ -0,0 +1,170 @@ +system | 2023-05-26 13:57:46 UTC | #1 + +> See also: +> - [File 'icon.svg'](/t/7153) +> - [How to publish your charm on Charmhub](/t/how-to-publish-your-charm-on-charmhub/4462) +> - [How to add docs to your charm page on Charmhub](/t/3784) + +You've released your charm to the `stable` channel on Charmhub. This document shows you how to make it more stand out by adding a unique and recognisable icon for it. + +**Contents:** + +- [Icon specifications](#heading--icon-specifications) +- [Creating an icon](#heading--creating-an-icon) +- [Open the template](#heading--open-the-template) +- [Add colour](#heading--add-colour) +- [Draw something](#heading--draw-something) +- [Validate your icon](#validate-icon) +- [And finally... some quick Dos and Don'ts](#heading--and-finally-some-quick-dos-and-donts) + +

Icon specifications

+ + +Before we start actually making the icon though, we should be aware of the specifications required by the charm store. This is to ensure a consistent experience for the users, and icons failing to meet this spec will be rejected. + +A charm icon is an SVG format image where the canvas size is 100x100 pixels. It consists of a circle with a flat color and a logo. +It has to be saved as `icon.svg` in your charm's root directory. + +There is no specification to design the logo: it can be a white (or black) monochromatic symbol, a colored logo, or whatever is best. However, it's best to leave some padding between the edges of the circle and the logo. + +

Creating an icon

+ + +If meeting the above spec seems more complicated than creating your charm in the first place, then fear not, because we have an easy step-by-step guide for you. Before you start you will need: + +- A vector graphic editor. We strongly recommend the cross-platform and most excellent [Inkscape](http://www.inkscape.org) for all your vector graphic needs. +- [The template file.](https://assets.ubuntu.com/v1/fc0260eb-icon.svg) (right-click > Save link as...) +- An existing logo you can import, or the ability to draw one in Inkscape. + +Once you have those, fire up Inkscape and we can begin! + +

Open the template

+ + +From Inkscape load the **icon.svg** file. Select the Layer called "Background Circle", either from the drop down at the bottom, or from the layer dialog. + +![Step one](https://assets.ubuntu.com/v1/067f88a5-author-charm-icons-1.png) + +

Add colour

+ +Select **Object** and then **Fill and Stroke** from the menu to adjust the color. + +![Step two](https://assets.ubuntu.com/v1/0bff03c4-author-charm-icons-2.png) + + +

Draw something

+ + +Draw your shape within the circle. If you already have a vector logo, you can import it and scale it within the guides. Inkscape also has plenty of drawing tools for creating complex images. + +If you import a bitmap image to use, be sure to convert it into a vector file and delete the bitmap. + +![Step four](https://assets.ubuntu.com/v1/2ef5c7f5-author-charm-icons-3.png) + +*Cloud icon: "Cloud by unlimicon from the Noun Project" [CC BY]* + +[note type=[positive] +To add the icon to the charm's Charmhub page, save it as `icon.svg`, + place it in the root directory of the charm, and then publish the charm to `latest/stable`. +[/note] + +

Validate your icon

+ +You can validate your icon at [charmhub.io/icon-validator](https://charmhub.io/icon-validator). The page checks the most basic issues that prevent icons working. + +![Icon Validator screenshot](upload://lQbz7TOCLHnq1dEBe98qxPMxWkK.png) + +

And finally... some quick Dos and Don'ts

+ +Icons should not be overly complicated. Charm icons are displayed in various sizes (from 160x160 to 32x32 pixels) and they should be always legible. In Inkscape, the ‘Icon preview’ tool can help you to check the sharpness of your icons at small sizes. + +Symbols should have a similar weight on all icons: avoid too thin strokes and use the whole space available to draw the symbol within the limits defined by the padding. However, if the symbol is much wider than it is high, it may overflow onto the horizontal padding area to ensure its weight is consistent. + +Do not use glossy materials unless they are parts of a logo that you are not allowed to modify. + +[note] +BEWARE: unless your charm has (or has had at some point) a release in the `stable` channel, the icon will not be visible. That is because charmhub only updates the metadata for a charm on stable channel releases [(by design)](https://snapcraft.io/blog/better-snap-metadata-handling-coming-your-way-soon). +So either release to `stable` and then roll it back, or wait until your charm is ready for a "stable" `stable` release. +[/note] + +------------------------- + +riccardo-magrini | 2020-04-17 19:46:16 UTC | #2 + +hi there, +I try to realize an icon as you've suggested us but after to run the build of my local charm and deploy that on Juju Gui I don't see that, is there another thing to make? thanks + +------------------------- + +timClicks | 2020-04-19 21:01:06 UTC | #3 + +Have you saved the icon as icon.svg in the root of the charm directory? + +------------------------- + +riccardo-magrini | 2020-04-20 09:19:45 UTC | #4 + +Hi @timClicks + +yes it has icon.svg format... on another post I've received this answer + +> There is an authentication limitation in how the GUI interacts with Juju around displaying icons for local charms which is probably what you’re seeing there. We won’t be addressing this in the GUI but it’s on our roadmap to resolve in the upcoming replacement for the GUI, the Juju Dashboard. + +Being that a my proof to create a charm, maybe the issue is that. + +![Screenshot%20from%202020-04-18%2000-01-47|690x514](upload://zW2eJXb840ircyTUu58qjzVlShe.png) + +------------------------- + +timClicks | 2020-04-26 20:21:00 UTC | #5 + +That looks like a scaling issue. Are you embedding a bitmap (png/json) into the SVG file? + +------------------------- + +addyess | 2021-05-12 21:25:24 UTC | #6 + +Awesome tutorial. Thank you so much + +------------------------- + +pedroleaoc | 2021-06-08 18:06:23 UTC | #7 + + + +------------------------- + +ppasotti | 2022-06-17 06:30:43 UTC | #10 + +Added a clarification that the icon won't show unless you've had a stable release with the icon. + +------------------------- + +pedroleaoc | 2022-10-14 11:30:29 UTC | #11 + + + +------------------------- + +jugmac00 | 2023-07-26 08:47:09 UTC | #12 + +Thanks a lot for this tutorial! + +The one thing I was missing is how to actually configure the charm to use it or where to put it so that charmcraft automatically uses it. + +This was especially confusing, as the existing charms we have do not have an `icon.svg` in their root, but nonetheless show one on charmhub.io. + +I did not find anything in the documentation, but at some point my colleagues mentioned that we have an `icon.svg` in a base charm which is used in all the other charms - so we have some kind of default icon. + +Would it be possible to include some information about how this all works? + +Thank you! + +------------------------- + +jugmac00 | 2023-07-26 09:30:20 UTC | #13 + +It looks like this is documented here https://charm-tools.readthedocs.io/en/latest/tactics.html + +------------------------- + diff --git a/tmp/t/1051.md b/tmp/t/1051.md new file mode 100644 index 000000000..86db89117 --- /dev/null +++ b/tmp/t/1051.md @@ -0,0 +1,425 @@ +system | 2024-09-26 07:33:32 UTC | #1 + +To add integration capabilities to a charm, you’ll have to define the relation in your charm’s charmcraft.yaml file and then add relation event handlers in your charm’s `src/charm.py` file. + +> See first: [Juju | Relation (integration)](https://juju.is/docs/juju/relation), [Juju | How to manage relations](https://juju.is/docs/juju/manage-relations) + +## Implement the relation + +### Declare the relation in charmcraft.yaml + +To integrate with another charm, or with itself (to communicate with other units of the same charm), declare the required and optional relations in your charm’s `charmcraft.yaml` file. + +[note type=caution] + +**If you're using an existing interface:** + +Make sure to consult [the `charm-relations-interfaces` repository](https://github.com/canonical/charm-relation-interfaces) for guidance about how to implement them correctly. + +**If you're defining a new interface:** + +Make sure to add your interface to [the `charm-relations-interfaces` repository](https://github.com/canonical/charm-relation-interfaces). + +[/note] + +To exchange data with other units of the same charm, define one or more `peers` endpoints including an interface name for each. Each peer relation must have an endpoint, which your charm will use to refer to the relation (as [`ops.Relation.name`](https://ops.readthedocs.io/en/latest/#ops.Relation.name)). + +```yaml +peers: + replicas: + interface: charm_gossip +``` + +To exchange data with another charm, define a `provides` or `requires` endpoint including an interface name. By convention, the interface name should be unique in the ecosystem. Each relation must have an endpoint, which your charm will use to refer to the relation (as [`ops.Relation.name`](https://ops.readthedocs.io/en/latest/#ops.Relation.name)). + +```yaml +provides: + smtp: + interface: smtp +``` + +```yaml +requires: + db: + interface: postgresql + limit: 1 +``` + +Note that implementing a cross-model relation is done in the same way as one between applications in the same model. The ops library does not distinguish between data from a different model or from the same model as the one the charm is deployed to. + +Which side of the relation is the “provider” or the “requirer” is often arbitrary, but if one side has a workload that is a server and the other a client, then the server side should be the provider. This becomes important for how Juju sets up network permissions in cross-model relations. + +> See more: [File ‘charmcraft.yaml’](https://juju.is/docs/sdk/charmcraft-yaml) + +If the relation is with a subordinate charm, make sure to set the `scope` field to `container`. + +```yaml +requires: + log-forwarder: + interface: rsyslog-forwarder + scope: container +``` + +Other than this, implement a subordinate relation in the same way as any other relation. Note however that subordinate units cannot see each other’s peer data. + +> See also: [Charm taxonomy](https://juju.is/docs/sdk/charm-taxonomy#heading--subordinate-charms) + +### Add code to use the integration + +[tabs] + +[tab version="Using a Charm Library"] + +For most integrations, you will now want to progress with using the charm library recommended by the charm that you are integrating with. Read the documentation for the other charm on Charmhub and follow the instructions, which will typically involve adding a requirer object in your charm’s `__init__` and then observing custom events. + +In most cases, the charm library will handle observing the Juju relation events, and your charm will only need to interact with the library’s custom API. Come back to this guide when you are ready to add tests. + +> See more: [Charmhub](https://charmhub.io) + +[/tab] + +[tab version="Implementing your own interface"] + +If you are developing your own interface - most commonly for charm-specific peer data exchange, then you will need to observe the Juju relation events and add appropriate handlers. + +#### Set up a relation + +To do initial setup work when a charm is first integrated with another charm (or, in the case of a peer relation, when a charm is first deployed) your charm will need to observe the relation-created event. For example, a charm providing a database relation might need to create the database and credentials, so that the requirer charm can use the database. In the `src/charm.py` file, in the `__init__` function of your charm, set up `relation-created` event observers for the relevant relations and pair those with an event handler. + +The name of the event to observe is combined with the name of the endpoint. With an endpoint named “db”, to observe `relation-created`, our code would look like: + +```python +framework.observe(self.on.db_relation_created, self._on_db_relation_created) +``` + +Now, in the body of the charm definition, define the event handler. In this example, if we are the leader unit, then we create a database and pass the credentials to use it to the charm on the other side via the relation data: + +```python +def _on_db_relation_created(self, event: ops.RelationCreatedEvent): + if not self.unit.is_leader(): + return + credentials = self.create_database(event.app.name) + event.relation.data[event.app].update(credentials) +``` + +The event object that is passed to the handler has a `relation` property, which contains an [`ops.Relation`](https://ops.readthedocs.io/en/latest/#ops.Relation) object. Your charm uses this object to find out about the relation (such as which units are included, in the [`.units` attribute](https://ops.readthedocs.io/en/latest/#ops.Relation.units), or whether the relation is broken, in the [`.active` attribute](https://ops.readthedocs.io/en/latest/#ops.Relation.active)) and to get and set data in the relation databag. + +> See more: [ops.RelationCreatedEvent](https://ops.readthedocs.io/en/latest/#ops.RelationCreatedEvent) + +To do additional setup work when each unit joins the relation (both when the charms are first integrated and when additional units are added to the charm), your charm will need to observe the `relation-joined` event. In the `src/charm.py` file, in the `__init__` function of your charm, set up `relation-joined` event observers for the relevant relations and pair those with an event handler. For example: + +```python +framework.observe(self.on.smtp_relation_joined, self._on_smtp_relation_joined) +``` + +Now, in the body of the charm definition, define the event handler. In this example, a “smtp_credentials” key is set in the unit data with the ID of a secret: + +```python +def _on_smtp_relation_joined(self, event: ops.RelationJoinedEvent): + smtp_credentials_secret_id = self.create_smtp_user(event.unit.name) + event.relation.data[event.unit]["smtp_credentials"] = smtp_credentials_secret_id +``` + +> See more: [ops.RelationJoinedEvent](https://ops.readthedocs.io/en/latest/#ops.RelationJoinedEvent) + +#### Exchange data with other units + +To use data received through the relation, have your charm observe the `relation-changed` event. In the `src/charm.py` file, in the `__init__` function of your charm, set up `relation-changed` event observers for each of the defined relations. For example: + +```python +framework.observe(self.on.replicas_relation_changed, self._update_configuration) +``` + +> See more: [[ops.RelationChangedEvent](https://ops.readthedocs.io/en/latest/#ops.RelationChangedEvent)](https://discourse.charmhub.io/t/relation-name-relation-changed-event/6475), [Juju | Relation (integration)](https://juju.is/docs/juju/relation#heading--permissions-around-relation-databags) + +Most of the time, you should use the same holistic handler as when receiving other data, such as `secret-changed` and `config-changed`. To access the relation(s) in your holistic handler, use the [`ops.Model.get_relation`](https://ops.readthedocs.io/en/latest/#ops.Model.get_relation) method or [`ops.Model.relations`](https://ops.readthedocs.io/en/latest/#ops.Model.relations) attribute. + +> See also: [Juju | Holistic vs Delta Charms](https://juju.is/docs/sdk/holistic-vs-delta-charms) + +If your change will have at most one relation on the endpoint, to get the `Relation` object use `Model.get_relation`; for example: + +```python +rel = self.model.get_relation("db") +if not rel: + # Handle the case where the relation does not yet exist. +``` + +If your charm may have multiple relations on the endpoint, to get the relation objects use `Model.relations` rather than `Model.get_relation` with the relation ID; for example: + +```python +for rel in self.model.relations.get('smtp', ()): + # Do something with the relation object. +``` + +Once your charm has the relation object, it can be used in exactly the same way as when received from an event. + +Now, in the body of the charm definition, define the holistic event handler. In this example, we check if the relation exists yet, and for a provided secret using the ID provided in the relation data, and if we have both of those then we push that into a workload configuration: + +```python +def _update_configuration(self, _: ops.Eventbase): + # This handles secret-changed and relation-changed. + db_relation = self.model.get_relation('db') + if not db_relation: + # We’re not integrated with the database charm yet. + return + secret_id = db_relation.data[self.model.app]['credentials'] + if not secret_id: + # The credentials haven’t been added to the relation by the remote app yet. + return + secret_contents = self.model.get_secret(id=secret_id).get_contents(refresh=True) + self.push_configuration( + username=secret['username'], + password=secret['password'], + ) +``` + +#### Exchange data across the various relations + +To add data to the relation databag, use the [`.data` attribute](https://ops.readthedocs.io/en/latest/#ops.Relation.data) much as you would a dictionary, after selecting whether to write to the app databag (leaders only) or unit databag. For example, to copy a value from the charm config to the relation data: + +```python +def _on_config_changed(self, event: ops.ConfigChangedEvent): + if relation := self.model.get_relation('ingress'): + relation.data[self.app]["domain"] = self.model.config["domain"] +``` + +To read data from the relation databag, again use the `.data` attribute, selecting the appropriate databag, and then using it as if it were a regular dictionary. + +The charm can inspect the contents of the remote unit databags: + +```python +def _on_database_relation_changed(self, event: ops.RelationChangedEvent): + remote_units_databags = { + event.relation.data[unit] for unit in event.relation.units if unit.app is not self.app + } +``` + +Or the peer unit databags: + +```python +def _on_database_relation_changed(self, e: ops.RelationChangedEvent): + peer_units_databags = { + event.relation.data[unit] for unit in event.relation.units if unit.app is self.app + } +``` + +Or the remote leader databag: + +```python +def _on_database_relation_changed(self, event: ops.RelationChangedEvent): + remote_app_databag = event.relation.data[relation.app] +``` + +Or the local application databag: + +```python +def _on_database_relation_changed(self, event: ops.RelationChangedEvent): + local_app_databag = event.relation.data[self.app] +``` + +Or the local unit databag: + +```python +def _on_database_relation_changed(self, event: ops.RelationChangedEvent): + local_unit_databag = event.relation.data[self.unit] +``` + +If the charm does not have permission to do an operation (e.g. because it is not the leader unit), an exception will be raised. + +#### Clean up when a relation is removed + +To do clean-up work when a unit in the relation is removed (for example, removing per-unit credentials), have your charm observe the `relation-departed` event. In the `src/charm.py` file, in the `__init__` function of your charm, set up `relation-departed` event observers for the relevant relations and pair those with an event handler. For example: + +```python +framework.observe(self.on.smtp_relation_departed, self._on_smtp_relation_departed) +``` + +Now, in the body of the charm definition, define the event handler. For example: + +```python +def _on_smtp_relation_departed(self, event: ops.RelationDepartedEvent): + if self.unit != event.departing_unit: + self.remove_smtp_user(event.unit.name) +``` + +> See more: [ops.RelationDepartedEvent](https://ops.readthedocs.io/en/latest/#ops.RelationDepartedEvent) + +To clean up after a relation is entirely removed, have your charm observe the `relation-broken` event. In the `src/charm.py` file, in the `__init__` function of your charm, set up `relation-broken` events for the relevant relations and pair those with an event handler. For example: + +```python +framework.observe(self.on.db_relation_broken, self._on_db_relation_broken) +``` + +Now, in the body of the charm definition, define the event handler. For example: + +```python +def _on_db_relation_broken(self, event: ops.RelationBrokenEvent): + if not self.is_leader(): + return + self.drop_database(event.app.name) +``` + +> See more: [ops.RelationBrokenEvent](https://ops.readthedocs.io/en/latest/#ops.RelationBrokenEvent) + +[/tab] + +[/tabs] + +## Test the relation + +### Write unit tests + +To write unit tests covering your charm’s behaviour when working with relations, in your `unit/test_charm.py` file, create a `Harness` object and use it to simulate adding and removing relations, or the remote app providing data. For example: + +```python +@pytest.fixture() +def harness(): + harness = testing.Harness(MyCharm) + yield harness + harness.cleanup() + +def test_new_smtp_relation(harness): + # Before the test begins, we have integrated a remote app + # with this charm, so we call add_relation() before begin(). + relation_id = harness.add_relation('smtp', 'consumer_app') + harness.begin() + # For the test, we simulate a unit joining the relation. + harness.add_relation_unit() + assert 'smtp_credentials’ in harness.get_relation_data(relation_id, 'consumer_app/0' ) + +def test_db_relation_broken(harness): + relation_id = harness.add_relation('db', 'postgresql') + harness.begin() + harness.remove_relation(relation_id) + assert harness.charm.get_db() is None + +def test_receive_db_credentials(harness): + relation_id = harness.add_relation('db', 'postgresql') + harness.begin() + harness.update_relation_data(relation_id, harness.charm.app, {'credentials-id': 'secret:xxx'}) + assert harness.charm.db_tables_created() +``` + +> See more: [ops.testing.Harness](https://ops.readthedocs.io/en/latest/harness.html#ops.testing.Harness) + +### Write scenario tests + +For each relation event that your charm observes, write at least one Scenario test. Create a `Relation` object that defines the relation, include that in the input state, run the relation event, and assert that the output state is what you’d expect. For example: + +```python +ctx = scenario.Context(MyCharm) +relation = scenario.Relation(id=1, endpoint='smtp', remote_units_data={1: {}}) +state_in = scenario.State(relations=[relation]) +state_out = context.run(relation.joined_event(remote_unit_id=1), state=state_in) +assert 'smtp_credentials' in state_out.relations[0].remote_units_data[1] +``` + +> See more: [Scenario Relations](https://github.com/canonical/ops-scenario/#relations) + +### Write integration tests + +Write an integration test that verifies that your charm behaves as expected in a real Juju environment. Other than when testing peer relations, as well as deploying your own charm, the test needs to deploy a second application, either the real charm or a facade that provides enough functionality to test against. + +The pytest-operator plugin provides methods to deploy multiple charms. For example: + +```python +# This assumes that your integration tests already include the standard +# build and deploy test that the charmcraft profile provides. + +@pytest.mark.abort_on_fail +async def test_active_when_deploy_db_facade(ops_test: OpsTest): + await ops_test.model.deploy('facade') + await ops_test.model.integrate(APP_NAME + ':postgresql', 'facade:provide-postgresql') + + present_facade('postgresql', model=ops_test.model_name, + app_data={ + 'credentials': 'secret:abc', + }) + + await ops_test.model.wait_for_idle( + apps=[APP_NAME], + status='active', + timeout=600, + ) +``` + +> See more: [pytest-operator](https://pypi.org/project/pytest-operator/) + +> Contributors: @benhoyt, @florezal , @ghibourg, @jameinel, @jnsgruk, @ppasotti, @rbarry, @rwcarlsen, @sed-i, @tony-meyer, @tmihoc, @toto + +------------------------- + +erik-lonroth | 2020-02-10 06:10:32 UTC | #2 + +[quote="system, post:1, topic:1051"] +Below is a list of the interfaces for which we have compiled documentation and reference implementations: +[/quote] + +@timClicks is the only documented interface mysql? + +------------------------- + +timClicks | 2020-02-10 18:24:28 UTC | #3 + +This is old documentation. My understanding is the Juju team (at the time) decided to move away from documenting interfaces, so that each layer would be the only implementation. I've made a few small steps to change that with the #docs:interfaces category. + +https://discourse.jujucharms.com/t/interface-http/2392 + +https://discourse.jujucharms.com/t/interface-tls-certificates/2395 + +https://discourse.jujucharms.com/t/interface-pgsql/2393 + +@zicklag has also contributed an interface for communicating with RethinkDB: + +https://discourse.jujucharms.com/t/interface-reql/2569 + +------------------------- + +facundo | 2020-03-20 18:56:09 UTC | #4 + +[quote="system, post:1, topic:1051"] +and `relation- set` to pass information ba +[/quote] + +Typo: extra space in 'relation-set' + +------------------------- + +timClicks | 2020-03-21 02:06:37 UTC | #5 + +Fixed. Thanks :slight_smile: + +------------------------- + +emcp | 2021-07-17 19:03:25 UTC | #6 + +is it out of date to think of adding an interface for apache thrift ? should I just use layers and relate the IP's and Ports ? + +https://thrift.apache.org/ + +------------------------- + +erik-lonroth | 2021-07-19 21:08:21 UTC | #7 + +It depends if your services is meant to be able to run on different vms/containers - or - would be considered an integrated part of your core service. + +It's not easy to know always how much or little to manage with a single charm. It's YOUR decision as a developer really to decide. + +I normally refractor as I learn more about my every day operations of my service. + +I might start with less components and increase as needed. Or allow for different scenarios using the charm I develop. + +------------------------- + +pedroleaoc | 2022-04-07 09:24:50 UTC | #8 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:50 UTC | #9 + + + +------------------------- + diff --git a/tmp/t/1058.md b/tmp/t/1058.md new file mode 100644 index 000000000..d005bdec6 --- /dev/null +++ b/tmp/t/1058.md @@ -0,0 +1,176 @@ +system | 2024-03-21 14:40:56 UTC | #1 + +> See also: [Bundle](/t/5645) + +**Contents:** + +- [Create a bundle](#heading--create-a-bundle) +- [Pack a bundle](#heading--pack-a-bundle) +- [Publish a bundle on Charmhub](#heading--publish-a-bundle-on-charmhub) + + +

Create a bundle

+ + +To create a bundle, create a `.yaml` file with your desired configuration. + +[note type=positive] +If you don't want to start from scratch, export the contents of your model to a `.yaml` file via `juju export-bundle --filename .yaml` or download the `.yaml` of an existing bundle from Charmhub. + +> See more: [Juju | How to compare and export the contents of a model to a bundle](https://juju.is/docs/juju/manage-models#heading--compare-and-export-the-contents-of-a-model-to-a-bundle) + +[/note] + +> See more: [File `.yaml`](/t/5679) + +

Pack a bundle

+ +To pack a bundle, in the directory where you have your `bundle.yaml` file (and possibly other files, e.g., a `README.md` file), create a `charmcraft.yaml` file suitable for a bundle (at the minimum: `type: bundle`), then run `charmcraft pack` to pack the bundle. The result is a `.zip` file. + +> See more: [`charmcraft pack`](/t/6129) + + +

Publish a bundle on Charmhub

+ +The process is identical to that for a simple charm except that, at the step where you register the name, for bundles the command is `register-bundle`. + + +> See more: [How to publish a charm on Charmhub](/t/4462) + +------------------------- + +jlosito | 2020-03-18 15:09:38 UTC | #2 + +This page makes use of both the services and applications key. I think the services key is deprecated from what I understand. Should the instances where the services key is used be changed to applications just to be consistent? + +------------------------- + +thumper | 2020-03-31 04:57:36 UTC | #3 + +Yes it should. Applications is the correct key to use. + +------------------------- + +jlosito | 2020-03-31 18:04:59 UTC | #4 + +Made the change from services to applications. + +------------------------- + +hloeung | 2020-06-19 05:32:32 UTC | #5 + +[quote="system, post:1, topic:1058"] +storage: database: 20M,mariadb-pv +[/quote] + +Is this right? Or should it be **mariadb-pv,20M** (so pool,size)? + +------------------------- + +timClicks | 2020-06-19 05:36:30 UTC | #6 + +Yes the pool should be first + +------------------------- + +aurelien-lourot | 2021-10-08 13:58:20 UTC | #8 + +Discussed with @jameinel and @simonrichardson: since Juju 2.9 it is necessary to set `arch=amd64` on the applications too, not only on the machines, otherwise deploying on lxd containers will fail with + +``` +0/lxd/3 down pending focal need agent binaries for arch arm64, only found [amd64] +``` + +This is because by not having the architecture specified on the application, juju will by default think we are deploying amd64 applications. I believe this has to do with architecture-dependent charms on the new charmhub and I heard this is mentioned in Juju 2.9's release notes. This should be reflected in these examples. Thanks! + +------------------------- + +jameinel | 2021-10-08 13:59:43 UTC | #9 + +You might be able to set a model constraint and thus have things deployed into that model default to the architecture you want. +`juju set-model-constraints arch=s390x` is the sort of thing you should be able to specify. + +------------------------- + +heitor | 2022-02-11 18:52:37 UTC | #10 + +Is it possible to specify a file with configuration options for a charm when using bundles? + +For example: + +```yaml +applications: + mediawiki: + charm: "cs:mediawiki-5" + num_units: 1 + options: ./mediawiki.config +``` + +Should be equivalent to `juju deploy cs:mediawiki-5 --config ./mediawiki.config`. + +Is this possible in a bundle? + +------------------------- + +jameinel | 2022-03-10 19:20:28 UTC | #11 + +Not with that exact syntax. I believe you can use 'include-file://' and/or 'include-base64://' +We have a test case for that here: +https://github.com/juju/juju/blob/5c2de081e2d217c5142bd04feff88524288b1a81/cmd/juju/application/bundle/bundle_test.go#L241-L257 + +Now I believe it essentially just includes the file as though you had written the YAML content directly. So I believe it supports objects, but it might be that it only supports explicit keys. (eg the value for a key can be the contents of an included file) + +------------------------- + +heitor | 2022-03-11 14:43:54 UTC | #12 + +This is very handy! I will test that! + +edit: +Unfortunately it is quite limited to reading strings only. This totally crashed: +``` +$ cat bundle.yaml +applications: + grafana: + charm: grafana + options: + port: include-file://config + +$ cat config +1234 + +$ juju deploy --verbose ./bundle.yaml +Executing changes: +- upload charm grafana from charm-hub with architecture=amd64 +- set application options for grafana + setting options: + port: "1234\n" +Deploy of bundle completed. +``` + +`grafana` was expecting an int but got a string with a `\n` in the end. + +Should I create a bug report for this? + +Also, I tried using the `include-file` in the `options: include-file://config` but that gave me an error: +``` +ERROR cannot deploy bundle: cannot unmarshal bundle contents: unmarshal document 0: yaml: unmarshal errors: + line 3: cannot unmarshal !!str `include...` into map[string]interface {} +``` + +Opened https://bugs.launchpad.net/juju/+bug/1964616 to track this :slight_smile: + +------------------------- + +pedroleaoc | 2022-04-07 08:32:23 UTC | #13 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:19 UTC | #14 + + + +------------------------- + diff --git a/tmp/t/10583.md b/tmp/t/10583.md new file mode 100644 index 000000000..51d4ae6e5 --- /dev/null +++ b/tmp/t/10583.md @@ -0,0 +1,96 @@ +ppasotti | 2023-10-04 01:47:06 UTC | #1 + +> Source: [GitHub](https://github.com/canonical/ops-scenario) +> +> See also: [How to write a functional test with Scenario](/t/10585) + + +Scenario (`scenario`) is a testing framework for charms written with [Ops (`ops`)](/t/5527). It excels at **functional** testing of charm code, making it ideal to write: + +- **state-transition** tests +- **contract** tests + +The core idea of Scenario is that a charm is best conceived of as an opaque input-output function, where: +- the input is a monolithic data structure called [State](/t/10621), representing the data that Juju makes available to the charm (as represented by the [Context](/t/10622) object) at runtime +- the output is the State after the charm has been triggered to execute by some [Event](/t/10623) and has had a chance to interact with the input State. + +![image|690x384](upload://4neRbaEchNEyQroSuSfm0q9UFmB.png) + +Scenario tests are written by declaring an initial state, then running the charm context with that state and an event as parameters to obtain the output state. You can then write assertions to verify that the output state looks like you expect it to; e.g. that a databag has been updated to contain a certain piece of data, or that the application status has been set to `blocked`, etc... + + +# Example + +An example scenario test using [`Relation`](/t/10653) might look like: + +```python +import ops + +from scenario import Relation, State, Context + + +# This charm copies over remote app data to local unit data +class MyCharm(ops.CharmBase): + ... + + def _on_event(self, e): + rel = e.relation + assert rel.app.name == 'remote' + assert rel.data[self.unit]['abc'] == 'foo' + rel.data[self.unit]['abc'] = rel.data[e.app]['cde'] + + +def test_relation_data(): + # use scenario.Context to declare what charm we are testing + ctx = Context(MyCharm, + meta={"name": "foo"}) + + # ARRANGE: declare the input state + state_in = State(relations=[ + Relation( + endpoint="foo", + interface="bar", + remote_app_name="remote", + local_unit_data={"abc": "foo"}, + remote_app_data={"cde": "baz!"}, + ), + ]) + + # ACT: run an event in the context and pass the input state to it + state_out = ctx.run('start', state_in) + + # ASSERT that the output state looks the way you expect it to + assert state_out.relations[0].local_unit_data == {"abc": "baz!"} + # you can directly compare State data structures to assert a certain delta + assert state_out.relations == [ + Relation( + endpoint="foo", + interface="bar", + remote_app_name="remote", + local_unit_data={"abc": "baz!"}, + remote_app_data={"cde": "baz!"}, + ), + ] +``` + +------------------------- + +benhoyt | 2023-05-15 22:32:43 UTC | #2 + +This looks like a good start! I wonder about the term "functional testing", though, as that term usually denotes more system-level tests. ("Unit testing" isn't quite the right fit, either -- hmmm, I'm not sure.) + +There are several very short pages here, so it seemed like lots of jumping around. IMO this guide would be better as a single page doc. + +------------------------- + +ppasotti | 2023-05-16 06:22:03 UTC | #3 + +This is a reference, so each page is meant to define and highlight a single concept. The operator pages are the same. Besides, this leaves room to add more content :) + +About the term 'functional testing', I think @gruyaume or @gruyaume made a good point during the last sprint: +- unit tests exercise a single function or piece of logic +- scenario tests are more end-to-end in that they simulate a single juju event which might mean multiple event handlers are called, custom events are emitted, etc... so they're definitely not **unit**. +- scenario tests are black-box (with some exceptions): state in, state out, check that the state out is what you want it to be. which is [what functional tests do](https://g.co/kgs/dfBv1K) + +------------------------- + diff --git a/tmp/t/10585.md b/tmp/t/10585.md new file mode 100644 index 000000000..290bd10ef --- /dev/null +++ b/tmp/t/10585.md @@ -0,0 +1,81 @@ +ppasotti | 2024-09-13 04:28:55 UTC | #1 + +> See also: [Scenario (`scenario`)](/t/10583) + +First of all, install scenario + +`pip install ops-scenario` + +Then, open a new `test_foo.py` file where you will put the test code. + +```python +# import the necessary objects from scenario and ops +from scenario import State, Context +import ops +``` +> See more: [State (Scenario)](/t/10621). + +Then declare a new charm type: +```python +class MyCharm(ops.CharmBase): + pass +``` +And finally we can write a test function. The test code should use a Context object to encapsulate the charm type being tested (`MyCharm`) and any necessary metadata, then declare the initial `State` the charm will be presented when run, and `run` the context with an `event` and that initial state as parameters. +In code: + +```python +def test_charm_runs(): + # arrange: + # create a Context to specify what code we will be running + ctx = Context(MyCharm, meta={'name': 'my-charm'}) + # and create a State to specify what simulated data the charm being run will access + state_in = State() + # act: + # ask the context to run an event, e.g. 'start', with the state we have previously created + state_out = ctx.run(ctx.on.start(), state_in) + # assert: + # verify that the output state looks like you expect it to + assert state_out.status.unit.name == 'unknown' +``` + +> See more: +> - [State](/t/10621) +> - [Context](/t/10622) +> - [Event](/t/10623) + +[note] +If you like using unittest, you should rewrite this as a method of some TestCase subclass. +[/note] + +# Mocking beyond the State + +If you wish to use Scenario to test an existing charm type, you will probably need to mock out certain calls that are not covered by the [State](/t/10621) data structure. +In that case, you will have to manually mock, patch or otherwise simulate those calls on top of what Scenario does for you. + +For example, suppose that the charm we're testing uses the `KubernetesServicePatch`. To update the test above to mock that object, modify the test file to contain: + +```python +import pytest +from unittest import patch + +@pytest.fixture +def my_charm(): + with patch("charm.KubernetesServicePatch"): + yield MyCharm +``` + +Then you should rewrite the test to pass the patched charm type to the Context, instead of the unpatched one. In code: +```python +def test_charm_runs(my_charm): + # arrange: + # create a Context to specify what code we will be running + ctx = Context(my_charm, meta={'name': 'my-charm'}) + # ... +``` + +[note] +If you use pytest, you should put the `my_charm` fixture in a toplevel `conftest.py`, as it will likely be shared between all your scenario tests. +[/note] + +------------------------- + diff --git a/tmp/t/10586.md b/tmp/t/10586.md new file mode 100644 index 000000000..43e45e562 --- /dev/null +++ b/tmp/t/10586.md @@ -0,0 +1,174 @@ +ppasotti | 2023-10-04 07:56:40 UTC | #1 + +In this guide we will go through how to write Scenario tests for a charm library we are developing: + +`/lib/charms/my_charm/v0/my_lib.py` + +The intended behaviour of this library (requirer side) is to copy data from the provider app databags and collate it in the own application databag. +The requirer side library does not interact with any lifecycle event; it only listens to relation events. + +# Setup + +Assuming you have a library file already set up and ready to go (see `charmcraft create-lib` otherwise), you now need to + +`pip install ops-scenario` and create a test file in `/tests/scenario/test_my_lib.py` + + +# Base test + +```python +# `/tests/scenario/test_my_lib.py` +import pytest +import ops +from scenario import Context, State +from lib.charms.my_Charm.v0.my_lib import MyObject + +class MyTestCharm(ops.CharmBase): + META = { + "name": "my-charm" + } + def __init__(self, framework): + super().__init__(framework) + self.obj = MyObject(self) + framework.observe(self.on.start, self._on_start) + + def _on_start(self, _): + pass + + +@pytest.fixture +def context(): + return Context(MyTestCharm, meta=MyTestCharm.META) + +@pytest.mark.parametrize('event', ( + 'start', 'install', 'stop', 'remove', 'update-status', #... +)) +def test_charm_runs(context, event): + """Verify that MyObject can initialize and process any event except relation events.""" + # arrange + state_in = State() + # act + context.run(event, state_in) +``` + +# Simple use cases + +## Relation endpoint wrapper lib + +If `MyObject` is a relation endpoint wrapper such as [`traefik's ingress-per-unit`](https://github.com/canonical/traefik-k8s-operator/blob/main/lib/charms/traefik_k8s/v1/ingress_per_unit.py) lib, +a frequent pattern is to allow customizing the name of the endpoint that the object is wrapping. We can write a scenario test like so: + +```python +# `/tests/scenario/test_my_lib.py` +import pytest +import ops +from scenario import Context, State, Relation +from lib.charms.my_Charm.v0.my_lib import MyObject + + +@pytest.fixture(params=["foo", "bar"]) +def endpoint(request): + return request.param + + +@pytest.fixture +def my_charm_type(endpoint): + class MyTestCharm(ops.CharmBase): + META = { + "name": "my-charm", + "requires": + {endpoint: {"interface": "my_interface"}} + } + + def __init__(self, framework): + super().__init__(framework) + self.obj = MyObject(self, endpoint=endpoint) + framework.observe(self.on.start, self._on_start) + + def _on_start(self, _): + pass + + return MyTestCharm + + +@pytest.fixture +def context(my_charm_type): + return Context(my_charm_type, meta=my_charm_type.META) + + +def test_charm_runs(context): + """Verify that the charm executes regardless of how we name the requirer endpoint.""" + # arrange + state_in = State() + # act + context.run('start', state_in) + + +@pytest.mark.parametrize('n_relations', (1, 2, 7)) +def test_charm_runs_with_relations(context, endpoint, n_relations): + """Verify that the charm executes when there are one or more relations on the endpoint.""" + # arrange + state_in = State(relations=[ + Relation(endpoint=endpoint, interface='my-interface', remote_app_name=f"remote_{n}") for n in range(n_relations) + ]) + # act + state_out = context.run('start', state_in) + # assert + for relation in state_out.relations: + assert not relation.local_app_data # remote side didn't publish any data. + + +@pytest.mark.parametrize('n_relations', (1, 2, 7)) +def test_relation_changed_behaviour(context, endpoint, n_relations): + """Verify that the charm lib does what it should on relation changed.""" + # arrange + relations = [Relation( + endpoint=endpoint, interface='my-interface', remote_app_name=f"remote_{n}", + remote_app_data={"foo": f"my-data-{n}"} + ) for n in range(n_relations)] + state_in = State(relations=relations) + # act + state_out: State = context.run(relations[0].changed_event, state_in) + # assert + for relation in state_out.relations: + assert relation.local_app_data == {"collation": ';'.join(f"my-data-{n}" for n in range(n_relations))} +``` + +# Advanced use cases + +## Testing internal (charm-facing) library APIs + +Suppose that `MyObject` has a `data` method that exposes to the charm a list containing the remote databag contents (the `my-data-N` we have seen above). +We can use `scenario.Context.manager` to run code within the lifetime of the Context like so: + +```python +import pytest +import ops +from scenario import Context, State, Relation +from lib.charms.my_Charm.v0.my_lib import MyObject + +@pytest.mark.parametrize('n_relations', (1, 2, 7)) +def test_my_object_data(context, endpoint, n_relations): + """Verify that the charm lib does what it should on relation changed.""" + # arrange + relations = [Relation( + endpoint=endpoint, interface='my-interface', remote_app_name=f"remote_{n}", + remote_app_data={"foo": f"my-data-{n}"} + ) for n in range(n_relations)] + state_in = State(relations=relations) + + with context.manager(relations[0].changed_event, state_in) as mgr: + # act + state_out = mgr.run() # this will emit the event on the charm + # control is handed back to us before ops is torn down + + # assert + charm = mgr.charm # the MyTestCharm instance ops is working with + obj: MyObject = charm.obj + assert obj.data == [ + f"my-data-{n}" for n in range(n_relations) + ] +``` + +------------------------- + diff --git a/tmp/t/10621.md b/tmp/t/10621.md new file mode 100644 index 000000000..cbec647ad --- /dev/null +++ b/tmp/t/10621.md @@ -0,0 +1,47 @@ +ppasotti | 2023-06-29 08:02:25 UTC | #1 + +> [Scenario](/t/10583) > State + + +In the context of Scenario, State refers to a monolithic data structure representing all data that is available to a charm at runtime. +Simplifying somewhat, that information can be categorized depending on its source: +- data coming from the juju controller and mediated by hook tools +- data coming from the locally stored unit state (the unit state db) +- data coming from environment variables +- data coming from the workload and mediated by Pebble calls in the case of kubernetes charms + +The State encapsulates all these types of data in a unified, immutable data structure. + +More in detail, the information contained in State can be summarised as follows: in this table you can find +- the name of the State component +- its type in Python (and a link to the reference doc, where available) +- the source of the data at runtime for that component, i.e. what part of the charm runtime this data is meant to mock +- whether the data is writeable by the charm during hook execution + + +| State component | type | source | write | +|------------------|:------|:--------------------------------------------------------------------------------:|------:| +| `config` | Dict | **hook tool(s):** `config-get` | no | +| `relations`| List[[Relation](/t/10653)] | **hook tool(s):** `relation-[get\|set\|list\|ids]` | no* | +| `networks`| List[Network] | **hook tool(s):** `network-get` | no | +| `unit_status`| StatusBase | **hook tool(s):** `status-[get\|set]` | yes | +| `app_status`| StatusBase | **hook tool(s):** `status-[get\|set]` | yes | +| `workload_version`| str | **hook tool(s):** `workload-version-set` | yes | +| `leader`| bool | **hook tool(s):** `is-leader` | no | +| `secrets` | List[[`Secret`](/t/10662)] | **hook tool(s):** `secret-[get\|set\|grant\|revoke\|add\|ids\|info-get\|remove]` | yes* | +| `deferred` | List[[DeferredEvent](/t/10656)] | **local unit state db** | yes | +| `stored_state` | List[[StoredState](/t/10657)] | **local unit state db** | yes | +| `model` | Model | **envvars** | no | +| `unit_id` | int | **envvars** | no | +| `containers` | List[[Container](/t/10654)] | **(kubernetes only:) Pebble API** | yes | + +[note] +All data is readable according to the Juju access control rules: e.g. a follower unit cannot read its leader's unit databag, nor write any databag other than its own. +[/note] + +> See more: +> - [hook tools](https://juju.is/docs/sdk/hook-tool) +> - [pebble](https://juju.is/docs/sdk/pebble) + +------------------------- + diff --git a/tmp/t/10622.md b/tmp/t/10622.md new file mode 100644 index 000000000..9f5fef256 --- /dev/null +++ b/tmp/t/10622.md @@ -0,0 +1,113 @@ +ppasotti | 2024-04-25 14:05:21 UTC | #1 + +> [Scenario](/t/10583) > Context + +In Scenario, the `Context` object represents the charm code and all of the metadata that is associated with it. + +Scenario is about simulating events on charms. + +The Context encapsulates: +- what charm type should be instantiated and passed to `ops.main` when the event emission is simulated +- what is in the charm's `charmcraft.yaml` / `metadata.yaml` + `actions.yaml` + `config.yaml` +- what is the charm execution root (a temporary directory and its contents when the charm is executed) +- the 'side-effects' of a charm execution: + - any `stdout/stderr` output that gets pushed in the `juju-log` +- useful historical data about the intermediate steps of the state transition. These allow you peek into the charm execution black-box and write assertions about the path taken by the charm on its way to the 'final' state. + - `unit_status_history: List[StatusBase]` + - `app_status_history: List[StatusBase]` + - `workload_version_history: List[str]` +- the events emitted on the charm. + +# Basic Usage + +```python +from scenario import Context, State +from charm import MyCharm + +ctx = Context(charm_type=MyCharm) +state_out: State = ctx.run('start', State()) +``` + +## Advanced usage + +By default, `Context` will attempt to automatically load the charm metadata from the filesystem position of the module containing the charm type you pass to it. +If you need to customize any of the metadata, you can override this behaviour: + +```python +from scenario import Context +from charm import MyCharm + +ctx = Context(charm_type=MyCharm, meta={"requires": {"foo": {"interface": "bar"}}}) +``` + +## Assertions on outputs and side effects + +The `Context.run()` API is as black-box as it gets: pass in a state, get out another state. +Charm testing however requires at times to inspect the execution flow of the charm. This means being able to introspect transient states such as status changes that are later discarded (a charm sets `maintenance` and then `active`: `active` will be part of the final state, but `maintenance` would be lost). + +Also consider logging output: that data is passed to Juju via the `debug-log` hook tool but is write-only: a charm cannot retrieve the logs it had emitted before. But it can be useful to write testing code to validate debugging output. + +Finally, it is often useful to inspect the execution path a charm took by examining what custom events a charm emitted on itself while executing the 'toplevel' Juju event. + +`Context` captures all these transient 'side effects' of the charm execution. +These are not returned by the `.run()` call, as they do not belong in the `State`. Instead, they are attached to the `Context` itself. + +```python +import ops +from scenario import Context, State +from charm import MyCharm, MyCustomEventType + +ctx = Context(charm_type=MyCharm) +ctx.run('start', State()) + +# assert the charm set these statuses: +assert ctx.unit_status_history[0] == ops.MaintenanceStatus('starting...') +assert ctx.unit_status_history[1] == ops.MaintenanceStatus('workload coming up...') +assert ctx.unit_status_history[2] == ops.ActiveStatus('') + +# assert the contents of the juju-log stack +assert ctx.juju_log[-1] == "setting active status" + +# assert certain (custom) events have been emitted on the charm +assert isinstance(ctx.emitted_events[0], StartEvent) +assert isinstance(ctx.emitted_events[3], MyCustomEventType) +``` + +[note] +These data structures are **not** automatically cleared! If you `.run()` multiple events in a sequence, they will keep adding up. You can call `Context.clear()` to clear all histories and emitted events. +[/note] + + +# White-box testing with Scenario + +When you call `Context.run()`, Scenario will set up the operator framework, emit the event, and tear everything down again. +This means that there is no easy way to get a hold of the charm instance to run tests against it. +In order to support this use case, the `Context` object exposes some API to give you temporarily back control of the framework before the event is emitted. + +```python +from scenario import State, Context +state_in = State( + config={'foo': 'bar'}, + unit_status=BlockedStatus("config not good")) +# suppose the charm is in blocked status because some previous state contained an invalid config. +# the config foo=bar *now* is correct, but the charm hasn't been notified of the config change yet. + +with Context(MyCharm).manager("config-changed", state_in) as mgr: + # if you get mgr.charm now, you will obtain the charm instance before the event has been emitted. So if you inspect the state, it will look exactly as our initial state_in. + charm = mgr.charm + assert charm.unit.status == BlockedStatus("config not good") + # you can also make calls on internal APIs + assert charm._check_config() is False + + # this emits the event + state_out = mgr.run() + + # now the framework has emitted the event and the charm has executed all observers, so the state has changed + assert charm.unit.status == ActiveStatus("config good") + +# you can also run your assertions on the output state as usual +assert state_out.unit_status == ActiveStatus("config good") +``` + +------------------------- + diff --git a/tmp/t/10623.md b/tmp/t/10623.md new file mode 100644 index 000000000..d44070750 --- /dev/null +++ b/tmp/t/10623.md @@ -0,0 +1,105 @@ +ppasotti | 2023-05-17 11:28:34 UTC | #1 + +> [Scenario](/t/10583) > Event + +In Scenario, the `Event` object corresponds to the homonym Juju data structure -- a Juju event. + +> See more: [Event (in Juju)](/t/6361) + +Like a Juju event, the `scenario.Event` data structure encapsulates some environment variables that the charm is being executed with and roughly represents the "reason why" the charm is being executed. + +The fundamental piece of information carried by an Event is therefore the name of the 'hook' that the Juju controller has decided should be executed by this charm. + +# Basic usage + +In many situations, the name of the event is enough for Scenario to work. + +Look at the code snippet below: + +```python +from scenario import Context, State +from charm import MyCharm + +ctx = Context(MyCharm) +ctx.run(event="update-status", state=State()) +``` + +`Context.run` will first cast the string `'update-status'` to an `Event` data structure. The following code is therefore equivalent: + +```python +from scenario import Event +... +ctx.run(event=Event("update-status"), state=State()) +``` + +# Event metadata: generic usage +Depending on the hook, certain additional environment variables need to be set for `ops ` to function, just like the Juju unit agent guarantees them to be present when the charm is run for real. + +[note] +For example, [relation events](/t/6498) require a `JUJU_RELATION_ID` envvar to be set. +[/note] + +In those situations, you cannot simply use strings to refer to events, but you need to provide an +`Event` object to which you can pass some additional required metadata. Scenario's runtime will use that information to populate the envvars that `ops` expects to be set. + +For example, if you want to simulate [relation events](/t/6498) in Scenario, you need to: + +```python +from scenario import Event, Relation +... +relation = Relation('foo') +ctx.run( + event=Event("foo-relation-changed", relation=relation), + state=State(relations=[relation]) +) +``` + +The same holds for [workload events](/t/6468) and [secret events](/t/7191). + +[note] +For workload (`pebble-ready`) events, the reason we need to associate the container with the event is that the Framework uses **an envvar** (and not the event name) to determine which container the event is about. Scenario needs that information, similarly, for injecting that envvar into the charm's runtime. +[/note] + +[note] +For secret events, the reason is similar: the operator framework uses several envvars set by Juju to determine the secret ID the event is about and other required metadata. +[/note] + +# Syntactic sugar +As a way to simplify the user experience, all event types that are associated to some operator data structure can be obtained from the data structure itself. + +So instead of doing + +```python +relation = Relation('foo') +relation_changed_evt = Event('foo_relation_changed', relation=relation) +``` + +you can simply: + +```python +relation_changed_evt = Relation('foo').changed_event +``` + +And similarly for [`Container`](/t/10657) and [`Secret`](/t/10662). + +|[`Relation`](/t/10653) method | event | +|--|--| +|`Relation('foo').changed_event`|`foo_relation_changed`| +|`Relation('foo').joined_event`|`foo_relation_joined`| +|`Relation('foo').broken_event`|`foo_relation_broken`| +|`Relation('foo').created_event`|`foo_relation_created`| +|`Relation('foo').departed_event`|`foo_relation_departed`| + +|[`Container`](/t/10657) method | event| +|--|--| +|`Container('foo').pebble_ready_event`|`foo_pebble_ready`| + +|[`Secret`](/t/10662) method | event| +|--|--| +|`Secret('id').changed_event`|`secret_changed`| +|`Secret('id').rotate_event`|`secret_rotate`| +|`Secret('id').expired_event`|`secret_expired`| +|`Secret('id').remove_event`|`secret_remove`| + +------------------------- + diff --git a/tmp/t/1063.md b/tmp/t/1063.md new file mode 100644 index 000000000..150a74201 --- /dev/null +++ b/tmp/t/1063.md @@ -0,0 +1,189 @@ +system | 2023-04-18 13:42:18 UTC | #1 + +This document clarifies the various Juju commands that can be used to remove things, as well as a couple of options that can be used to force a removal. + +**Contents:** + +- [Removal terms](#heading--removal-terms) +- [Forcing removals](#heading--forcing-removals) + + +

Removal terms

+ +There is a distinction between the similar sounding commands `unregister`, `detach`, `remove`, `destroy`, and `kill`. These commands are ordered such that their effect increases in severity: + +* `Unregister` means to decouple a resource from a logical entity for the client. The effect is local to the client only and does not affect the logical entity in any way. + +* `Detach` means to decouple a resource from a logical entity (such as an application). The resource will remain available and the underlying cloud resources used by it also remain in place. + +* `Remove` means to cleanly remove a single logical entity. This is a destructive process, meaning the entity will no longer be available via Juju, and any underlying cloud resources used by it will be freed (however, this can often be overridden on a case-by-case basis to leave the underlying cloud resources in place). + +* `Destroy` means to cleanly tear down a logical entity, along with everything within these entities. This is a very destructive process. + +* `Kill` means to forcibly tear down an unresponsive logical entity, along with everything within it. This is a very destructive process that does not guarantee associated resources are cleaned up. + + +[note type="information"] +These command terms/prefixes do not apply to all commands in a generic way. The explanations above are merely intended to convey how a command generally operates and what its severity level is. +[/note] + + +

Forcing removals

+ +Juju object removal commands do not succeed when there are errors in the multiple steps that are required to remove the underlying object. For instance, a unit will not remove properly if it has a hook error, or a model cannot be removed if application units are in an error state. This is an intentionally conservative approach to the deletion of things. + +However, this policy can also be a source of frustration for users in certain situations (i.e. "I don't care, I just want my model gone!"). Because of this, several commands have a `--force` option. + +Furthermore, even when utilising the `--force` option, the process may take more time than an administrator is willing to accept (i.e. "Just go away as quickly as possible!"). Because of this, several commands that support the `--force` option have, in addition, support for a `--no-wait` option. + +[note type=caution status=Caution] +The `--force` and `--no-wait` options should be regarded as tools to wield as a last resort. Using them introduces a chance of associated parts (e.g., relations) not being cleaned up, which can lead to future problems. +[/note] + +As of `v.2.6.1`, this is the state of affairs for those commands that support at least the `--force` option: + +command | `--force` | `--no-wait` +---------------|---------------|--------------- +`destroy-model` | yes | yes +`detach-storage` | yes | no +`remove-application` | yes | yes +`remove-machine` | yes | yes +`remove-offer` | yes | no +`remove-relation` | yes | no +`remove-storage` | yes | no +`remove-unit` | yes | yes + +When a command has `--force` but not `--no-wait`, this means that the combination of those options simply does not apply. + + + +------------------------- + +mdavistelstra | 2019-04-08 04:04:45 UTC | #2 + +Are all these commands non-blocking? How do I find out whether the removal has finished? + +I want to destroy a charm. Charms are not in the list of objects which can be removed. +I am *assuming* that removing the application will suffice. + +I ran `juju remove-application haproxy` then waited a few minutes. I still see `haproxy` in the GUI and with `juju status`. + +When I run `juju remove-application haproxy --debug` I see some colorful output finishing with "command finished". + +What am I doing wrong? How do I view the status of a removal to ensure that it actually finished successfully? + +------------------------- + +pmatulis | 2019-04-08 13:41:13 UTC | #3 + +[quote="mdavistelstra, post:2, topic:1063"] +I want to destroy a charm. Charms are not in the list of objects which can be removed. +I am *assuming* that removing the application will suffice. +[/quote] + +I've edited this doc page to make things clearer. Thanks for that feedback. + +------------------------- + +mdavistelstra | 2019-04-09 06:27:58 UTC | #4 + +[quote="system, post:1, topic:1063"] +To remove individual units instead of the entire application (i.e. all the units): + +`juju remove-unit ` +[/quote] + + +When i try that, it says: + +``` +$ juju remove-unit ntp/3 +removing unit ntp/3 failed: unit "ntp/3" is a subordinate +``` + +How do I remove a subordinate? (I have already removed the relationship.) + +------------------------- + +mdavistelstra | 2019-04-09 06:45:36 UTC | #5 + +The commands on this page do not work if the unit or application is in an error state. + +When a unit has state `error: hook failed: "install"`, the `juju remove-unit` command does not work. (It doesn't print an error message or return an error code, but it doesn't remove the unit either.) + +So how can I remove a unit which is in an error state? + +The list up the top mentioned "remove", "destroy" and "kill", yet `juju destroy-unit` and `juju kill-unit` are not valid commands. How do I remove or kill a unit? + +------------------------- + +pmatulis | 2019-04-09 14:17:35 UTC | #6 + +[quote="mdavistelstra, post:2, topic:1063"] +Are all these commands non-blocking? How do I find out whether the removal has finished? +[/quote] + +The commands are indeed non-blocking, and are asynchronous. Generally speaking, confirmation of actions taken are confirmed by the output to `juju status`. There could be an issue with the backing cloud. It's hard to say at this point. If the application does not get removed you may look into the [logs](https://docs.jujucharms.com/troubleshooting-logs) and [Troubleshooting removals](https://docs.jujucharms.com/troubleshooting-removals). You can file a software issue [here](https://bugs.launchpad.net/juju/+filebug) if you feel the need to. + +------------------------- + +pmatulis | 2019-04-09 14:05:44 UTC | #7 + +[quote="mdavistelstra, post:4, topic:1063"] +How do I remove a subordinate? +[/quote] + +You would remove its application: + +```bash +juju remove-application ntp +``` + +Subordinate applications don't operate like regular applications in the sense that their units are not independent. They depend on the charm they're associated with (the *principal* charm). + +------------------------- + +pmatulis | 2019-04-09 14:12:20 UTC | #8 + +[quote="mdavistelstra, post:5, topic:1063"] +So how can I remove a unit which is in an error state? + +The list up the top mentioned “remove”, “destroy” and “kill”, yet `juju destroy-unit` and `juju kill-unit` are not valid commands. How do I remove or kill a unit? +[/quote] + +You can use the [Troubleshooting removals](https://docs.jujucharms.com/2.5/en/troubleshooting-removals) page I linked to previously. + +Re the removal prefixes, correct they do not apply to all commands. I will update the doc page to clarify. + +------------------------- + +tiago.pasqualini | 2020-07-28 16:42:47 UTC | #9 + +[quote="system, post:1, topic:1063"] +In the case that the removed unit is the only one running the corresponding machine will also be removed unless any of the following is true for that machine: + +* it was created with `juju add-machine` +* it is not being used as the only controller +* it is not hosting Juju-managed containers (KVM guests or LXD containers) +[/quote] + +The first condition is not true. We should remove that from the doc. + +------------------------- + +pedroleaoc | 2022-04-07 09:25:20 UTC | #10 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:29 UTC | #11 + + + +------------------------- + diff --git a/tmp/t/10632.md b/tmp/t/10632.md new file mode 100644 index 000000000..23ed06743 --- /dev/null +++ b/tmp/t/10632.md @@ -0,0 +1,28 @@ +mcjaeger | 2023-10-26 19:56:57 UTC | #1 + +Charms are built for reuse. Having reuse in mind, the design and implementation of a charm needs to be independent from particular use cases or domains. But how can you ensure reuse? + +The best way to enable reuse is to start an open source project. Open source brings experts together; they can participate in the development and contribute their knowledge from their different backgrounds. In addition, the open source approach offers transparency and lets users and developers freely use, modify, analyse and redistribute the software. Read more about [reasons to publish your charm in our documentation](https://juju.is/docs/sdk/reasons-to-publish-your-charm-on-charmhub). + +- [Two stages of maturity](#heading--two-stages-of-maturity) +- [Requirements for public listing](#heading--requirements-for-public-listing) + +

Two stages of maturity

+ + +An open source project is the suitable foundation for reuse. However, providing a reusable charm is also a matter of maturity: high-quality software development and relevant capabilities for operating applications. Accordingly, the development of a charm follows a two-stage approach: + +1. [Stage 1: Important qualities](/t/6930): A quality open source project, implements state-the-art documentation, testing and automation - this is the foundation for sharing and effective collaboration. + +2. [Stage 2: Important capabilities](/t/7310) are about implementing the most relevant capabilities to ensure effective operations. + +For both stages, the reference documents above provide two parts: a first part explains the goals and a second part lists references to the documentation or example code for implementation. The points listed in the stages prioritise the development and explain how to implement the qualities and capabilities using the [Charm SDK](https://juju.is/docs/sdk). For general best practices for developing a charm, please read the [Charm development best practices](https://juju.is/docs/sdk/styleguide). + +

Requirements for public listing

+ +Everyone can publish charms to [https://charmhub.io/](https://charmhub.io/). Then, the charm can be accessed for deployments using Juju or via a web browser by its URL. If a charm is published in Charmhub.io and included in search results, the charm entry needs to be switched into the listed mode. To bring your charm into the listing, [reach out to the community](https://discourse.charmhub.io/c/charmhub-requests/46) to announce your charm and ask for a review by an experienced community member. + +The [Stage 1- Important qualities](https://juju.is/docs/sdk/charm-publication-checklist) reference is the requirement for switching a charm into the *listed* mode. The points listed in the first stage ensure a useful charm project is suitable for presentation on [https://charmhub.io/](https://charmhub.io/) and for testing by others. + +------------------------- + diff --git a/tmp/t/1065.md b/tmp/t/1065.md new file mode 100644 index 000000000..c338a8675 --- /dev/null +++ b/tmp/t/1065.md @@ -0,0 +1,285 @@ +system | 2023-11-23 15:41:40 UTC | #1 + +
+ +Fan networking addresses a need raised by the proliferation of container usage in an IPv4 context: the ability to manage the address space such that network connectivity among containers running on separate hosts is achieved. + +Juju integrates with the Fan to provide network connectivity between containers that was hitherto not possible. The typical use case is the seamless interaction between deployed applications running within LXD containers on separate Juju machines. + + +**This page covers the following topics:** + + +* [Fan overview](#fan-overview) +* [Juju model Fan configuration](#juju-model-fan-configuration) +* [Cloud provider requirements](#cloud-provider-requirements) +* [Examples](#examples) + * [Rudimentary confirmation of the Fan using a GCE cloud](#rudimentary-confirmation-of-the-fan-using-a-gce-cloud) + * [Deploying applications with the Fan using an AWS cloud](#deploying-applications-with-the-fan-using-an-aws-cloud) + + + + +## Fan overview + +The Fan is a mapping between a smaller IPv4 address space (e.g. a /16 network) and a larger one (e.g. a /8 network) where **subnets** from the smaller one (the *underlay* network) are assigned to **addresses** on the larger one (the *overlay* network). Connectivity between containers on the larger network is enabled in a simple and efficient manner. + +In the case of the above networks (/16 underlay and /8 overlay), each host address on the underlay "provides" 253 addresses on the overlay. Fan networking can thus be considered a form of "address expansion". + +Further reading on generic (non-Juju) Fan networking: + +* [Fan networking](https://wiki.ubuntu.com/FanNetworking) : general user documentation +* [Container-to-Container Networking](https://insights.ubuntu.com/2015/06/22/container-to-container-networking-the-bits-have-hit-the-fan/) : a less technical overview +* [LXD network configuration](https://github.com/lxc/lxd/blob/master/doc/networks.md) : Fan configuration options at the LXD level +* [`fanctl` man page](http://manpages.ubuntu.com/cgi-bin/search.py?q=fanctl) : configuration information at the operating system level + + +## Juju model Fan configuration + +Juju manages Fan networking at the model level, with the relevant configuration options being `fan-config` and `container-networking-method`. + +First, configure the Fan via `fan-config`. This option can assume a space-separated list of `=`. This option maps the underlay network to the overlay network. + +``` bash +juju model-config fan-config=10.0.0.0/16=252.0.0.0/8 +``` + +Then, enable the Fan with the `container-networking-method` option. It can take on the following values: + +* local : standard LXD; addressing based on the LXD bridge (e.g. lxdbr0) +* provider : addressing based on host bridge; works only with providers with built-in container addressing support (e.g. MAAS with LXD) +* fan : Fan networking; works with any provider, in principle + + +``` bash +juju model-config container-networking-method=fan +``` + +To confirm that a model is properly configured use the following command: + +``` bash +juju model-config | egrep 'fan-config|container-networking-method' +``` + +This example will produce the following output: + +``` bash +container-networking-method model fan +fan-config model 10.0.0.0/16=252.0.0.0/8 +``` + +See [Configuring models](/t/configuring-models/1151) for more details on setting model options. + + +## Cloud provider requirements + +Juju autoconfigures Fan networking for both the AWS and GCE clouds. All that is needed is a controller, which does not need any special Fan options passed during its creation. + +In principle, all public cloud types can utilize the Fan. Yet due to the myriad ways a cloud may configure their subnets your mileage may vary. At the very least, if you are using a cloud other than AWS or GCE, manual configuration at the Juju level will be needed (the above model options). Adjustments at the cloud level can also be expected. For guidance, the auto-configured clouds both start with a /16 address space. Juju then maps it onto an /8. + +Note that [MAAS](https://maas.io/) has LXD addressing built-in so there is no point in applying the Fan in such a context. + + +## Examples + +Two examples are provided. Each will use a different cloud: + +* Rudimentary confirmation of the Fan using a GCE cloud +* Deploying applications with the Fan using an AWS cloud + + +### Rudimentary confirmation of the Fan using a GCE cloud + +Fan networking works out-of-the-box with GCE. We'll use a GCE cloud to perform a rudimentary confirmation that the Fan is in working order by creating two machines with a LXD container on each. A network test will then be performed between the two containers to confirm connectivity. + +Here we go: + +``` bash +juju add-machine -n 2 +juju deploy ubuntu --to lxd:0 +juju add-unit ubuntu --to lxd:1 +``` + +After a while, we see the following output to command `juju machines -m default | grep lxd`: + +``` bash +0/lxd/0 started 252.0.63.146 juju-477cfe-0-lxd-0 xenial us-east1-b Container started +1/lxd/0 started 252.0.78.212 juju-477cfe-1-lxd-0 xenial us-east1-c Container started +``` + +So these two containers should be able to contact one another if the Fan is up: + +``` bash +juju ssh -m default 0 sudo lxc exec juju-477cfe-0-lxd-0 '/usr/bin/tracepath 252.0.78.212' +``` + +Output: + +``` bash +1?: [LOCALHOST] pmtu 1410 + 1: 252.0.78.212 1.027ms reached + 1: 252.0.78.212 0.610ms reached + Resume: pmtu 1410 hops 1 back 1 +Connection to 35.196.138.253 closed. +``` + + +### Deploying applications with the Fan using an AWS cloud + +To use Fan networking with AWS a *virtual private cloud* (VPC) is required. Fortunately, a working VPC is provided with every AWS account and is used, by default, when creating regular EC2 instances. + +[note] +You may need to create a new VPC if you are using an old AWS account (the original VPC may be deficient). Some may simply prefer to have a Juju-dedicated VPC. See appendix [Creating an AWS VPC](/t/appendix-creating-an-aws-vpc/1064) for instructions. +[/note] + +Whether you created a secondary VPC out of necessity or preference you will need to inform Juju about it. See [AWS specific features](/t/using-amazon-aws-with-juju/1084#heading--aws-specific-features) for how to do this. + +Here, Fan networking will be leveraged by deploying and relating applications that are running in different LXD containers, where the containers are housed on separate machines. + +``` bash +juju add-machine -n 2 +juju deploy mysql --to lxd:0 +juju deploy wordpress --to lxd:1 +juju add-relation mysql wordpress +``` + +[note] +A VPC may fail to provide the default AWS instance type of 'm3.medium'. See [AWS specific features][anchor__aws-specific-features] for how to request an alternative. +[/note] + +A partial output to `juju status` is: + +``` bash +Unit Workload Agent Machine Public address Ports Message +mysql/0* active idle 0/lxd/0 252.0.82.239 3306/tcp Ready +wordpress/0* active executing 1/lxd/0 252.0.169.174 80/tcp +``` + +We can confirm that the MySQL container can contact the WordPress container with: + +``` bash +juju ssh mysql/0 exec nc -vz 252.0.169.174 80 +``` + +This example test was successful by yielding the following output: + +``` bash +Connection to 252.0.169.174 80 port [tcp/http] succeeded! +``` + +------------------------- + +pedroleaoc | 2021-06-08 18:06:50 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:04 UTC | #3 + + + +------------------------- + +hypeitnow | 2023-02-07 21:29:46 UTC | #4 + +Hi all, + +Maybe it would be worth to mention if it is possible to run fan networking with the lxd provider consisting of a cluster with e.g 3 members. What would be needed from lxd networking point of view? Maybe it should work by default? + +I tried to follow the example and ended up in situation where I was not able to execute +``` +juju deploy -n 3 --to lxd:0,lxd:1,lxd:2 --channel latest/edge mysql-innodb-cluster +``` +Where 0,1,2 are located on different hosts and have a FAN network as eth0 on each one. The machines are able to ping each other. +But when I try to spin up a container in each I get(also visible in juju show-machine 0): +``` +message: host machine "0" has no available FAN devices in space(s) "alpha" +``` +EDIT: +MAAS cloud bahaves exactly the same way, I found a line in juju source code that fan devices are validated by checking interface name, right now my machine has interface fan-240 but the error is exectly the same when creating a container(host has no available FAN devices in lub-net space) : +``` +root@juju-client:~/openstack# j show-machine 3 +model: openstack-maas +machines: + "3": + juju-status: + current: started + since: 06 Feb 2023 21:21:12Z + version: 3.1.0 + hostname: openstack-2 + dns-name: 10.10.10.222 + ip-addresses: + - 10.10.10.222 + - 10.10.99.222 + - 10.187.24.2 + - 240.0.11.2 + instance-id: m7wg7a + display-name: openstack-2 + machine-status: + current: running + message: Deployed + since: 06 Feb 2023 21:12:54Z + modification-status: + current: idle + since: 06 Feb 2023 20:56:04Z + base: + name: ubuntu + channel: "22.04" + network-interfaces: + eth1: + ip-addresses: + - 10.187.24.2 + mac-address: 00:16:3e:d8:9c:70 + is-up: true + eth2: + ip-addresses: + - 10.10.10.222 + mac-address: 00:16:3e:db:a9:5c + gateway: 10.10.10.1 + space: lub-net + is-up: true + eth3: + ip-addresses: + - 10.10.99.222 + mac-address: 00:16:3e:ec:c5:71 + space: lub-net + is-up: true + fan-240: + ip-addresses: + - 240.0.11.2 + mac-address: 00:16:3e:db:ee:69 + space: lub-net + is-up: true + lxdbr0: + ip-addresses: + - 10.0.239.1 + mac-address: 00:16:3e:7e:17:d9 + is-up: true + containers: + 3/lxd/0: + juju-status: + current: down + message: agent is not communicating with the server + since: 06 Feb 2023 21:34:32Z + instance-id: pending + machine-status: + current: provisioning error + message: host machine "3" has no available FAN devices in space(s) "lub-net" + since: 06 Feb 2023 21:34:32Z + modification-status: + current: idle + since: 06 Feb 2023 20:56:13Z + base: + name: ubuntu + channel: "22.04" + constraints: arch=amd64 spaces=lub-net + constraints: arch=amd64 mem=1536M tags=openstack + hardware: arch=amd64 cores=4 mem=7680M tags=virtual,openstack availability-zone=default +``` +Can anyone please advise me what I do wrong? + +Thanks! + +------------------------- + diff --git a/tmp/t/1066.md b/tmp/t/1066.md new file mode 100644 index 000000000..c488d8ab0 --- /dev/null +++ b/tmp/t/1066.md @@ -0,0 +1,33 @@ +system | 2024-02-09 13:16:39 UTC | #1 + + > See also: +> - [How to make a controller highly available](/t/1111#heading--make-a-controller-highly-available) +> - [How to make an application highly available](/t/5476#heading--make-an-application-highly-available) + +In the context of a cloud deployment in general, **high availability (HA)** is the concept of making software resilient to failures by means of running multiple replicas with shared and synchronised software context -- something usually achieved through coordinated [scaling (horizontally and up)](/t/13137). In Juju, it is supported for controllers on machine clouds and for regular applications on both machine and Kubernetes clouds + + +![Juju - Controller high availability (machines) |690x328](upload://7WEq2ZTtCpfnVQzSC7ULAdRMVub.png) + +***Controller high availability (machine clouds).** Juju controllers can be made highly-available by enabling more than one machine to each run a separate controller unit with a separate controller agent instance, where each machine effectively becomes an instance of the controller. This set of Juju agents collectively use a database replicaset to achieve data synchronisation amongst them.* + +------------------------- + +pedroleaoc | 2021-06-08 18:06:55 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:30:21 UTC | #4 + + + +------------------------- + +alex | 2023-02-07 14:59:42 UTC | #5 + +Broken link to "How to make a controller highly available", should be: https://juju.is/docs/olm/manage-controllers#heading--make-a-controller-highly-available + +------------------------- + diff --git a/tmp/t/1073.md b/tmp/t/1073.md new file mode 100644 index 000000000..98f98ee9a --- /dev/null +++ b/tmp/t/1073.md @@ -0,0 +1,296 @@ +system | 2024-08-21 07:28:13 UTC | #1 + +> See also: [Relation (integration)](/t/5464) + + +**Contents:** +- [Add a relation](#heading--add-a-relation) +- [View all the current relations](#heading--view-all-the-current-relations) +- [Get the relation ID](#heading--get-the-relation-id) +- [Remove a relation](#heading--remove-a-relation) + +

Add a relation

+ + + +The procedure differs slightly depending on whether the applications that you want to integrate are on the same model or rather on different models. + +- [Add a same-model relation](#heading--add-a-same-model-relation) +- [Add a cross-model relation](#heading--add-a-cross-model-relation) + +

Add a same-model relation

+ +[tabs] + +[tab version="juju"] + +To set up a relation between two applications on the same model, run the `integrate` command followed by the names of the applications. For example: + +``` text +juju integrate mysql wordpress +``` + +This will satisfy WordPress's database requirement where MySQL provides the appropriate schema and access credentials required for WordPress to run properly. + +The code above however works only if there is no ambiguity in what relation the charm _requires_ and what the related charm _provides_. + +If the charms in question are able to establish multiple relation types, Juju may need to be supplied with more information as to how the charms should be joined. For example, if we try instead to relate the 'mysql' charm to the 'mediawiki' charm: + +```text +juju integrate mysql mediawiki +``` + +the result is an error: + +``` text +error: ambiguous relation: "mediawiki mysql" could refer to + "mediawiki:db mysql:db"; "mediawiki:slave mysql:db" +``` + +The solution is to be explicit when referring to an *endpoint*, where the latter has a format of `:`. In this case, it is 'db' for both applications. However, it is not necessary to specify the MySQL endpoint because only the MediaWiki endpoint is ambiguous (according to the error message). Therefore, the command becomes: + +```text +juju integrate mysql mediawiki:db +``` +[note] +The integration endpoints provided or required by a charm are listed in the result of the `juju info` command. They are also listed on the page for the charmed operator at [Charmhub](https://charmhub.io). +[/note] + +> See more: [`juju integrate`](/t/10207) + +[/tab] + +[tab version="terraform juju"] + +To add a same-model relation, create a resource of the `juju_integration` type, give it a label (below, `this`), and in its body add: +- a `model` attribute specifying the name of the model where you want to create the relation; +- two `application` blocks, specifying the names of the applications that you want to integrate (and, if necessary, their endpoints_; +- a `lifecycle` block with the `replace_triggered_by` argument specifying the list of application attributes (always the name, model, constraints, placement, and charm name) for which, if they are changed = destroyed and recreated, the relation must be recreated as well. + +[note type=caution] +**To avoid complications (e.g., race conditions) related to how Terraform works:** + +Make sure to always specify resources and data sources by reference rather than directly by name. + +For example, for a resource / data source of type `juju_model` with label `development` and name `mymodel`, do not specify it as `mymodel` but rather as `juju_model.development.name` / `data.juju_model.development.name`. + +[/note] + + +```terraform +resource "juju_integration" "this" { + model = juju_model.development.name + via = "10.0.0.0/24,10.0.1.0/24" + + application { + name = juju_application.wordpress.name + endpoint = "db" + } + + application { + name = juju_application.percona-cluster.name + endpoint = "server" + } + + # Add any RequiresReplace schema attributes of + # an application in this integration to ensure + # it is recreated if one of the applications + # is Destroyed and Recreated by terraform. E.G.: + lifecycle { + replace_triggered_by = [ + juju_application.wordpress.name, + juju_application.wordpress.model, + juju_application.wordpress.constraints, + juju_application.wordpress.placement, + juju_application.wordpress.charm.name, + juju_application.percona-cluster.name, + juju_application.percona-cluster.model, + juju_application.percona-cluster.constraints, + juju_application.percona-cluster.placement, + juju_application.percona-cluster.charm.name, + ] + } +} +``` + +> See more: [`juju_integration` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/integration), [Terraform | `lifecycle` > `replace_triggered_by`](https://developer.hashicorp.com/terraform/language/meta-arguments/lifecycle#replace_triggered_by) + +[/tab] + +[tab version="python libjuju"] +To integrate two applications, on a connected Model, use the `integrate()` method. + +```python +await my_model.integrate('mysql', 'mediawiki') + +# Integrate with particular endpoints +await my_model.integrate('mysql', 'mediawiki:db') +``` + +> See more: [`integrate()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.integrate) +[/tab] +[/tabs] + + +

Add a cross-model relation

+> See also: [Cross-model relation](/t/5464#heading--cross-model) + + +In a cross-model relation there is also an 'offering' model and a 'consuming' model. The admin of the 'offering' model 'offers' an application for consumption outside of the model and grants an external user access to it. The user on the 'consuming' model can then find an offer to use, consume the offer, and integrate an application on their model with the 'offer' via the same `integrate` command as in the same-model case (just that the offer must be specified in terms of its offer URL or its consume alias). This creates a local proxy for the offer in the consuming model, and the application is subsequently treated as any other application in the model. + +> See more: [How to manage offers > Integrate with an offer](/t/1150#heading--integrate-with-an-offer) + +

View all the current relations

+ +[tabs] +[tab version="juju"] + +To view the current relations in the model, run `juju status --relations`. The example below shows a peer relation and a regular relation: + +```text +[...] +Relation provider Requirer Interface Type Message +mysql:cluster mysql:cluster mysql-ha peer +mysql:db mediawiki:db mysql regular +``` + +To view just a specific relation and the applications it integrates, run `juju status --relations` followed by the provider and the requirer application (and endpoint). For example, based on the output above, `juju status --relations mysql mediawiki` would output: + +```text +[...] +Relation provider Requirer Interface Type Message +mysql:db mediawiki:db mysql regular +``` + +> See more: [`juju status --relations`](/t/10173) + +[/tab] + +[tab version="terraform juju"] + +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] +To view the current relations in a model, directly access the Model's `relations` property. + +```python +my_model.relations +``` + +> See more: [`Model.relations (property)`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.relations) +[/tab] +[/tabs] + +

Get the relation ID

+ +[tabs] +[tab version="juju"] +To get the ID of a relation, for any unit participating in the relation, run the `show-unit` command -- the output will also include the relation ID. For example: + +```text +$ juju show-unit synapse/0 + +... + - relation-id: 7 + endpoint: synapse-peers + related-endpoint: synapse-peers + application-data: + secret-id: secret://1234 + local-unit: + in-scope: true +``` + + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + + +

Remove a relation

+ +[tabs] +[tab version="juju"] + +Regardless of whether the relation is same-model or cross-model, to remove an relation, run the `remove-relation` command followed by the names of the two applications involved in the integration: + +`juju remove-relation ` + +For example: + +```text +juju remove-relation mediawiki mysql +``` + +In cases where there is more than one relation between the two applications, specify the interface at least for one of the applications: + +```text +juju remove-relation mediawiki mysql:db +``` + +> See more: [`juju remove-relation`](/t/10110) + +[/tab] + +[tab version="terraform juju"] +To remove a relation, in your Terraform plan, remove its resource definition. + +> See more: [`juju_integration` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/integration) + +[/tab] + +[tab version="python libjuju"] +To remove a relation, use the `remove_relation()` method on an Application object. + +```python +await my_app.remove_relation('mediawiki', 'mysql:db') +``` + +> See more: [`remove_relation()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.application.html#juju.application.Application.remove_relation) +[/tab] +[/tabs] + + + +
+ +> **Contributors:** @amandahla, @aurelien-lourot , @cderici, @danieleprocida, @evilnick , @hmlanigan, @nottrobin , @pedroleaoc, @pmatulis, @tmihoc + +------------------------- + +thogarre | 2021-02-12 18:39:45 UTC | #2 + +Is there another page with more information about relations? specifically, around gathering relation data from the cli (relation-get, relation-list, relation-ids)? + +------------------------- + +eric-chen | 2021-08-16 03:07:46 UTC | #3 + +[quote="system, post:1, topic:1073"] +star typology +[/quote] + +Typo here => star topology + +------------------------- + +pedroleaoc | 2022-04-07 09:24:49 UTC | #4 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:46 UTC | #5 + + + +------------------------- + diff --git a/tmp/t/10811.md b/tmp/t/10811.md new file mode 100644 index 000000000..8359e85bd --- /dev/null +++ b/tmp/t/10811.md @@ -0,0 +1,13 @@ +tmihoc | 2023-06-02 08:52:27 UTC | #1 + +This document describes the Juju execution flow for a Kubernetes / machine [charm](https://juju.is/docs/sdk/charmed-operators). + +The Juju [controller](https://juju.is/docs/olm/controller) sends an [event](https://juju.is/docs/sdk/event) to a unit [agent](https://juju.is/docs/olm/agent) that is in the charm container / VM. The unit agent executes the charm according to certain environment variables. When this happens, the environment variables are translated by the operator framework [Ops](https://juju.is/docs/sdk/ops) into the events in the charm code, and the charm then responds with the event handlers in the charm code. All of this is represented schematically in the diagram below, where the top depicts the situation for a Kubernetes charm and the bottom -- for a machine charm. + +![image|690x660](upload://5OdbdBmJR6RDKSHp8pcdwfQ1Ay6.png) + + +> See more: https://juju.is/docs/sdk/talking-to-a-workload-control-flow-from-a-to-z + +------------------------- + diff --git a/tmp/t/10813.md b/tmp/t/10813.md new file mode 100644 index 000000000..cd9d489aa --- /dev/null +++ b/tmp/t/10813.md @@ -0,0 +1,104 @@ +0x12b | 2024-04-04 19:41:14 UTC | #1 + + + +Whenever Traefik is used to ingress your Kubernetes workloads, you risk encountering the dreaded "Gateway Address Unavailable" message. In this article, we'll go through what you can do to remediate it. + +[note] +#### :exclamation: Assumes MicroK8s + +In this article, we will assume that you are running MicroK8s on either a bare-metal or virtual machine. If your setup differs from this, parts of the how-to may still apply, although you will need to tailor the exact steps and commands to your setup. +[/note] + +## Checklist +- The [metallb microk8s addon](https://microk8s.io/docs/addon-metallb) is enabled. +- Traefik's service type is `LoadBalancer`. +- An external IP address is assigned to traefik. + +## The metallb addon isn't enabled +Check with: +```bash +microk8s status -a metallb +``` + +If it is disabled, you can enable it with: +```bash +IPADDR=$(ip -4 -j route get 2.2.2.2 | jq -r '.[] | .prefsrc') +microk8s enable metallb:$IPADDR-$IPADDR +``` + +This command will fetch the IPv4 address assigned to your host, and hand it to MetalLB as an assignable IP. If the address range you want to hand to MetalLB differs from your host ip, alter the `$IPADDR` variable to instead specify the range you want to assign, for instance `IPADDR=10.0.0.1-10.0.0.100`. + +## No external IP address is assigned to the traefik service +Does the traefik service have an external IP assigned to it? Check with: + +```bash +JUJU_APP_NAME="traefik" +kubectl get svc -A -o wide | grep -E "^NAMESPACE|$JUJU_APP_NAME" +``` + + +## No available IP in address pool +This frequently happens when: +- Metallb has only one IP in its range but you deployed two instances of traefik, or traefik is forcefully removed (`--force --no-wait`) and then a new traefik app is deployed immediately after. +- The [`ingress`](https://microk8s.io/docs/ingress) addon is enabled. It's possible that nginx from the ingress addon has claimed the ExternalIP. Disable nginx and re-enable metallb. + +Check with: +```bash +kubectl get ipaddresspool -n metallb-system -o yaml && kubectl get all -n metallb-system +``` +You could add more IPs to the range: +``` +FROM_IP="..." +TO_IP="..." +microk8s enable metallb:$FROM_IP-$TO_IP +``` + +## Juju reverted the service type to `ClusterIP` +Juju controller cycling may cause the type to revert from `LoadBalancer` back to `ClusterIP`. + +Check with: + +```bash +kubectl get svc -A -o wide | grep -E "^NAMESPACE|LoadBalancer" +``` + +If traefik isn't listed (it's not `LoadBalancer`), then recreate the pod to have it retrigger the assignment of the external IP with `kubectl delete` . It should be `LoadBalancer` when kubernetes brings it back. + +## Integration tests pass locally but fail on GitHub runners +This used to happen when the github runners were at peak usage, making the already small 2cpu7gb runners run even slower. +As much of a bad answer as this is, the best response may be to increase timeouts or try to move CI jobs to internal runners. + + + +## Verification + +Verify that the Traefik Kubernetes service now has been assigned an external IP: + +``` +$ microk8s.kubectl get services -A + +NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) +cos traefik LoadBalancer 10.152.183.130 10.70.43.245 80:32343/TCP,443:30698/TCP 4d3h + 👆 - This one! + +``` + +Verify that Traefik is functioning correctly by trying to trigger one of your ingressed paths. If you have COS Lite deployed, you may check that if works as expected using the Catalogue charm: + +``` +$ curl http:///-catalogue/ + +# for example... + +$ curl http://10.70.43.245/cos-catalogue/ +``` + +This command should return a long HTML code block if everything works as expected. + +------------------------- + diff --git a/tmp/t/1083.md b/tmp/t/1083.md new file mode 100644 index 000000000..0105710b1 --- /dev/null +++ b/tmp/t/1083.md @@ -0,0 +1,645 @@ +system | 2024-04-16 12:49:49 UTC | #1 + +> See also: [Client](/t/13146) + +This document shows how to install and manage your Juju client -- whether it is the `juju` client, the `terraform juju` client, or the `python-libjuju` client. + +> See more: [`juju`](/t/5465), [`terraform juju`](/t/13086), [`python-libjuju`](/t/13089) + +[note type=information] +Here and throughout the how-to guides, to view content specific to your client of interest, in each section select the tab for that client. The decision will stick until you select another client. +[/note] + +**Contents:** + +- [Install the client](#heading--install-the-client) +- [Use the client](#heading--use-the-client) +- [Back up the client](#heading--back-up-the-client) +- [Upgrade the client](#heading--upgrade-the-client) +- [Uninstall the client](#heading--uninstall-the-client) + +

Install the client

+ +[tabs] +[tab version="juju" view="Linux"] + +**Install from snap.** + +[note type=information] +**Why install from snap?** Snaps get updated automatically. Thus, your client will be updated automatically as soon as a new Juju release becomes available. + +**Snap command not available on your system?** Visit [snapcraft.io](https://snapcraft.io) for instructions on how to install `snapd`. +[/note] + +To install the latest `juju` client from snap, run: + +```text +sudo snap install juju +``` + +To select a particular version, run `snap info juju` to find out what versions are available, then `sudo snap install juju --channel=` to install the version of your choice (e.g., `sudo snap install juju --channel=3.4/stable`). + +--- +[details=Example] + +```text +$ snap info juju +name: juju +summary: Juju - a model-driven operator lifecycle manager for K8s and + machines +publisher: Canonical✓ +store-url: https://snapcraft.io/juju +contact: https://canonical.com/ +license: AGPL-3.0 +description: | + A model-driven **universal operator lifecycle manager** for multi cloud and + hybrid cloud application management on K8s and machines. + + **What is an operator lifecycle manager?** + Kubernetes operators are containers with operations code, that drive your + applications on K8s. Juju is an operator lifecycle manager that manages the + installation, integration and configuration of operators on the cluster. + Juju also extends the idea of operators to traditional application + management on Linux and Windows servers, or cloud instances. + + **Model-driven operations and integration** + Organise your operators into models, which group together applications that + can be tightly integrated on the same substrate and operated by the same + team. Capture resource allocation, storage, networking and integration + information in the model to simplify ongoing operations. + + **Better day-2 operations** + Each operator code package, called a charm, declares methods for actions + like back, restore, or security audit. Calling these methods provides + remote administration of the application with no low-level access required. + + **Learn more** + + - https://juju.is/ + - https://discourse.charmhub.io/ + - https://github.com/juju/juju +commands: + - juju +services: + juju.fetch-oci: oneshot, disabled, inactive +snap-id: e2CPHpB1fUxcKtCyJTsm5t3hN9axJ0yj +tracking: 3.1/stable +refresh-date: 2024-01-03 +channels: + 3/stable: 3.4.0 2024-03-07 (26548) 99MB - + 3/candidate: ↑ + 3/beta: ↑ + 3/edge: ↑ + 4.0/stable: – + 4.0/candidate: – + 4.0/beta: 4.0-beta2 2024-01-11 (25984) 98MB - + 4.0/edge: 4.0-beta3-ec9b93b 2024-02-19 (26600) 98MB - + 4/stable: – + 4/candidate: – + 4/beta: 4.0-beta2 2024-01-17 (25984) 98MB - + 4/edge: ↑ + 3.5/stable: – + 3.5/candidate: – + 3.5/beta: – + 3.5/edge: 3.5-beta1-c3de749 2024-03-12 (26766) 98MB - + 3.4/stable: 3.4.0 2024-02-15 (26548) 99MB - + 3.4/candidate: ↑ + 3.4/beta: ↑ + 3.4/edge: 3.4.1-14d5608 2024-03-13 (26783) 98MB - + 3.3/stable: 3.3.3 2024-03-06 (26652) 99MB - + 3.3/candidate: ↑ + 3.3/beta: ↑ + 3.3/edge: 3.3.4-65b78cd 2024-03-13 (26779) 99MB - + 3.2/stable: 3.2.4 2023-11-22 (25443) 95MB - + 3.2/candidate: ↑ + 3.2/beta: ↑ + 3.2/edge: 3.2.5-9e20221 2023-11-17 (25455) 95MB - + 3.1/stable: 3.1.7 2024-01-03 (25751) 95MB - + 3.1/candidate: ↑ + 3.1/beta: ↑ + 3.1/edge: 3.1.8-1a8d6a3 2024-03-12 (26750) 95MB - + 2.9/stable: 2.9.46 2023-12-05 (25672) 120MB classic + 2.9/candidate: 2.9.47 2024-03-07 (26724) 120MB classic + 2.9/beta: ↑ + 2.9/edge: 2.9.48-dfd7fee 2024-03-07 (26740) 120MB classic + 2.8/stable: 2.8.13 2021-11-11 (17665) 74MB classic + 2.8/candidate: ↑ + 2.8/beta: ↑ + 2.8/edge: ↑ +installed: 3.1.7 (25751) 95MB - + +$ sudo snap install juju --channel=3.4/stable + + +``` +[/details] +--- + +To install multiple versions of `juju` via snap, enable `snap`'s experimental parallel-install feature, reboot, then install a different version with a different name. + +> See more: [Snap | Channels](https://snapcraft.io/docs/channels) + +---- +[details=Example] +```text + +# Enable snap's experimental parallel-install feature: +sudo snap set system experimental.parallel-instances=true` + +# Reboot. + +# Install juju 2.9 under the name 'juju_29' +sudo snap install --channel 2.9/stable juju_29 --classic + +# Install juju 3.3 under the name 'juju_33' +sudo snap install --channel 3.3/stable juju_33 + +# Test your 2.9 client: +juju_29 status + +# Test your 3.3 client: +juju_33 status + +[/details] +---- + +> See more: [Snap | Parallel installs](https://snapcraft.io/docs/parallel-installs) + +**Install from binary.** + +This method allows you to install the Juju client on systems that do not support snaps. + +1. Visit the project's [downloads](https://launchpad.net/juju/+download) page and select the binary that matches your system's architecture and the version that you want to install. + +For example, to download the 2.9.38 client for amd64: + +```text +curl -LO https://launchpad.net/juju/2.9/2.9.38/+download/juju-2.9.38-linux-amd64.tar.xz +``` + +2. Validate the downloaded binary archive (optional) + +Download the md5 checksum that matches the binary you just downloaded: + +[note type="positive"] +The link to the `md5` signature can be constructed by appending `/+md5` to the end of the link you just downloaded. +[/note] + +```text +curl -L https://launchpad.net/juju/2.9/2.9.38/+download/juju-2.9.38-linux-amd64.tar.xz/+md5 -o juju.md5 +``` + +Validate the downloaded binary archive against the checksum file: + +```text +cat juju.md5 | md5sum --check +``` + +If the checksum check succeeds, the output will be: + +``` +juju-2.9.38-linux-amd64.tar.xz: OK +``` + +If the check fails, md5sum exits with nonzero status and prints output similar to: + +``` +juju-2.9.38-linux-amd64.tar.xz: FAILED +md5sum: WARNING: 1 computed checksum did NOT match +``` + +3. Unpack and install client binary + +```text +tar xf juju-2.9.38-linux-amd64.tar.xz +sudo install -o root -g root -m 0755 juju /usr/local/bin/juju +``` + +4. Test that the version of the client you installed is up to date + +```text +juju version +``` + +**Build from source.** + +Visit the [downloads section](https://launchpad.net/juju/+download) of the [Launchpad project](https://launchpad.net/juju/) to download a tar.gz with Juju source code. For build instructions refer to the [contributing to Juju](https://github.com/juju/juju/blob/develop/CONTRIBUTING.md) documentation on Github. + +[/tab] +[tab version="juju" view="macOS"] + + +The Juju client is available on [Homebrew](https://brew.sh/) and can be installed as follows: + +```text +brew install juju +``` + +[/tab] + +[tab version="juju" view="Windows"] + +Visit the project's [downloads](https://launchpad.net/juju/+download) page and select the signed installer for the Juju version you wish to install. + +[/tab] + +[tab version="terraform juju" view="Linux"] + +To install the `terraform juju` client, install the `terraform` CLI: + +> See: [Hashicorp | Install Terraform](https://developer.hashicorp.com/terraform/install) + +For example, on a Linux that supports snaps: + +```text +sudo snap install terraform +``` + +[/tab] + + +[tab version="terraform juju" view="macOS"] + +[TO BE ADDED] + +[/tab] + +[tab version="terraform juju" view="Windows"] +> See: [Hashicorp | Install Terraform](https://developer.hashicorp.com/terraform/install) +[/tab] + + +[tab version="python libjuju" view="Linux"] + +In PyPI, which is the Python repository that `pip` is drawing modules from, `python-libjuju` is simply referred to as `juju`. You can install it directly via `pip`: + +```text +pip3 install juju +``` + +[/tab] + +[tab version="python libjuju" view="macOS"] + +[TO BE ADDED] + +[/tab] + +[tab version="python libjuju" view="Windows"] + +[TO BE ADDED] + +[/tab] + + +[/tabs] + + +

Use the client

+ +[tabs] +[tab version="juju"] + + + +Use the `juju` client reference and the Juju how-to guides to build up your deployment. + +> See: +> - [`juju help`](/t/1729) +> - [`juju` CLI commands](/t/10045) +> - [Juju | How-to guides](/t/5334) + + +[/tab] + +[tab version="terraform juju"] + +Once you've installed the `terraform` CLI, to start using the `terraform juju` client: + +1. **Require the `juju` provider.** In your Terraform plan, under `required_providers`, specify the `juju` provider: + +```text +terraform { + required_providers { + juju = { + version = "~> 0.10.0" + source = "juju/juju" + } + } +} +``` + + +**2. Configure the provider to use a pre-existing controller.** There are 3 ways you can do this: + +[note type=information status="For all methods: To view your controller's details, run"] +`juju show-controller --show-password` +[/note] + +----------- + +[details="Configure the provider using static credentials"] + +In your Terraform plan, in your provider specification, use the various keywords to provide your controller information statically: + +```text +provider "juju" { + controller_addresses = "10.225.205.241:17070,10.225.205.242:17070" + username = "jujuuser" + password = "password1" + ca_certificate = file("~/ca-cert.pem") +} +``` + +> See [Terraform | `juju` provider](https://registry.terraform.io/providers/juju/juju/latest/docs) + +[/details] + +----------- + +[details="Configure the provider using environment variables"] + +In your Terraform plan, leave the `provider` specification empty: + +```text +provider "juju" {} +``` + +Then, in a terminal, export the controller environment variables with your controller's values. For example: + +```text +export JUJU_CONTROLLER_ADDRESSES = "10.225.205.241:17070,10.225.205.242:17070" +export JUJU_USERNAME="jujuuser" +export JUJU_PASSWORD="password1" +export JUJU_CA_CERT= file("~/ca-cert.pem") +``` +[/details] + +----------- + +[details="Configure the provider using the `juju` client"] + +In your Terraform plan, leave the `provider` specification empty: + +```text +provider "juju" {} + +``` + +Then, in a terminal, use the `juju` client to switch to the desired controller: `juju switch `. Your Terraform plan will be interpreted relative to that controller. + +[/details] + +----------- + +3. Use the `terraform juju` client reference and the Juju how-to guides to build up your deployment. + +> See more: +> - [Terraform Juju](https://registry.terraform.io/providers/juju/juju/latest/docs) +> - [Juju | How-to guides](/t/5334) + +4. Once you're done, in a terminal, run: + + a. (just the first time) `terraform init` to initialise your project; + + b. `terraform plan` to stage the changes; and + + c. `terraform apply` to apply the changes to your Juju deployment. + +[/tab] + +[tab version="python libjuju"] + +1. After installing `python-libjuju`, import it into your Python script as follows: + +`import juju` + +You can also import specific modules to use, depending on your use case: + +`from juju import model` + +or + +`from juju import controller` + + +Examples of different use cases of this client can be found in the docs, as well as in the [examples +directory in the repository](https://github.com/juju/python-libjuju/tree/master/examples) which can be run using ``tox``. For +example, to run ``examples/connect_current_model.py``, use: + +```text + tox -e example -- examples/connect_current_model.py +``` + +Or you can directly run it via python as well: + +``` + $ python3 examples/connect_current_model.py +``` + +To experiment with the library in a REPL, launch Python repl with asyncio module loaded, as follows: + +```text + $ python3 -m asyncio +``` + +and then, for example to connect to the current model and fetch status: + +``` + >>> from juju.model import Model + >>> model = Model() + >>> await model.connect_current() + >>> status = await model.get_status() +``` + +Whichever your chosen method, use the `python-libjuju` client reference and the Juju how-to guides to build up your deployment. + +> See more: +> - [`python-libjuju`](https://pythonlibjuju.readthedocs.io/en/latest/) +> - [Juju | How-to guides](/t/5334) + +[/tab] +[/tabs] + +

Back up the client

+ + +[tabs] +[tab version="juju"] + +[note type="information] +A backup of the client enables one to regain management control of one's controllers and associated cloud environments. +[/note] + + + +**Create a backup of the `juju` client.** Making a copy of the client directory is sufficient for backing up the client. This is normally done with backup software that compresses the data into a single file (archive). On a Linux/Ubuntu system, the `tar` program is a common choice: + +``` text +cd ~ +tar -cpzf juju-client-$(date "+%Y%m%d-%H%M%S").tar.gz .local/share/juju +``` + +[note] +For Microsoft Windows any native Windows backup tool will do. +[/note] + +The above invocation embeds a timestamp in the generated archive's filename, which is useful for knowing **when** a backup was made. You may, of course, call it what you wish. + +The archive should normally be transferred to another system (or at the very least to a different physical drive) for safe-keeping. + +[note type="caution"] +Whoever has access to a client backup will have access to its associated environments. Appropriate steps should be taken to protect it (e.g. encryption). +[/note] + +**Restore the `juju` client from a backup.** To restore your client from a backup, extract the backup created earlier. E.g., on Ubuntu: + +[note type="caution"] +This command will extract the contents of the archive and overwrite any existing files in the Juju directory. Make sure that this is what you want. +[/note] + +``` text +cd ~ +tar -xzf juju-yymmdd-hhmmss.tar.gz +``` + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. +[/tab] +[/tabs] + +

Upgrade the client

+ +> See also: [Upgrading things](/t/1199) + +[tabs] +[tab version="juju" view="Linux"] +**If you've installed via `snap`.** + +[note type="caution"] +Ensure you've created a backup of your ./local/share/juju before starting the upgrade process for the client. +[/note] + +If the Juju client was installed via snap, the updates to the client should be handled automatically. Run `snap info juju` to view a list of releases and `juju version` to view the current release. + +If there has been a new release but the `juju` snap hasn't been refreshed, you can manually trigger this with `sudo snap refresh juju`. To refresh to a specific version, run the `refresh` command with the `--channel=` option, e.g. + +```text +sudo snap refresh juju --channel 3/stable +``` + +> See more: [Snap | Managing updates](https://snapcraft.io/docs/managing-updates), [Snap | Channels](https://snapcraft.io/docs/channels) +[/tab] + +[tab version="juju" view="macOS"] +[TO BE ADDED] +[/tab] + +[tab version="juju" view="Windows"] +[TO BE ADDED] +[/tab] + +[tab version="terraform juju" view="Linux"] + +To upgrade the `terraform juju` client, in your Terraform plan update the version constraint, then run `terraform init` with the `--upgrade` flag. + +> See more: [Terraform | Version constraints](https://developer.hashicorp.com/terraform/language/providers/requirements#version-constraints), [`terraform init --upgrade`](https://developer.hashicorp.com/terraform/cli/commands/init#upgrade-1) + +[/tab] + +[tab version="terraform juju" view="macOS"] +[TO BE ADDED] +[/tab] + +[tab version="terraform juju"view="Windows"] +[TO BE ADDED] +[/tab] + +[tab version="python libjuju" view="Linux"] +[TO BE ADDED] +[/tab] + +[tab version="python libjuju" view="macOS"] +[TO BE ADDED] +[/tab] + +[tab version="python libjuju"view="Windows"] +[TO BE ADDED] +[/tab] + +[/tabs] + +

Uninstall the client

+ +[tabs] +[tab version="juju" view="Linux"] +**If you've installed `juju` via `snap`:** To uninstall, run: + +```text +sudo snap remove juju +``` +[/tab] + +[tab version="juju" view="macOS"] +[TO BE ADDED] +[/tab] + +[tab version="juju" view="Windows"] +[TO BE ADDED] +[/tab] + +[tab version="terraform juju" view="Linux"] +[TO BE ADDED] +[/tab] + +[tab version="terraform juju" view="macOS"] +[TO BE ADDED] +[/tab] + +[tab version="terraform juju" view="Windows"] +[TO BE ADDED] +[/tab] + +[tab version="python libjuju" view="Linux"] +[TO BE ADDED] +[/tab] + +[tab version="python libjuju" view="macOS"] +[TO BE ADDED] +[/tab] + +[tab version="python libjuju" view="Windows"] +[TO BE ADDED] +[/tab] + +[/tabs] + + +
+ +> **Contributors:** @cderici, @hmlanigan, @simonrichardson, @timclicks, @tmihoc + +------------------------- + +pedroleaoc | 2022-04-07 08:32:00 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:30:59 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/10835.md b/tmp/t/10835.md new file mode 100644 index 000000000..a98a5d820 --- /dev/null +++ b/tmp/t/10835.md @@ -0,0 +1,22 @@ +tmihoc | 2024-06-06 12:22:52 UTC | #1 + +In the context of a charm or a bundle, just as in the context of a snap, **promotion** refers to the association of a [revision](/t/10837) to a higher-ranking [channel](/t/6562) risk level of the same track. + +For example, in the (partial) output of `juju info mongodb` below, revision `100` has been promoted from `3.6/edge` through `3.6/beta` and `3.6/candidate` all the way to `3.6/stable`. (The up arrow next to `3.6/beta` indicates that that channel has been closed and, if you try `juju deploy --channel 3.6/beta`, what you'll get is the next higher-ranking risk level of the same track, that is, `3.6/candidate`.) + +```text +channels: | + 5/stable: 117 2023-04-20 (117) 12MB amd64 ubuntu@22.04 + 5/candidate: 117 2023-04-20 (117) 12MB amd64 ubuntu@22.04 + 5/beta: ↑ + 5/edge: 118 2023-05-03 (118) 13MB amd64 ubuntu@22.04 + 3.6/stable: 100 2023-04-28 (100) 860kB amd64 ubuntu@20.04, ubuntu@18.04 + 3.6/candidate: 100 2023-04-13 (100) 860kB amd64 ubuntu@20.04, ubuntu@18.04 + 3.6/beta: ↑ + 3.6/edge: 100 2023-02-03 (100) 860kB amd64 ubuntu@20.04, ubuntu@18.04 +``` + +Charm promotion is done at release time by specifying the revision number and the channel `[track/]risk` level that you want to associate it with (e.g., `charmcraft release --revision 118 --channel=5/candidate`). + +------------------------- + diff --git a/tmp/t/10837.md b/tmp/t/10837.md new file mode 100644 index 000000000..69fac2935 --- /dev/null +++ b/tmp/t/10837.md @@ -0,0 +1,33 @@ +tmihoc | 2023-06-06 12:07:25 UTC | #1 + +> See also: [How to register, upload, publish, and release a charm](/t/4462) + +In the context of a charm, a **revision** is a number that uniquely identifies the published charm. + +The revision increases with every new version of the charm being published. + +[note type=caution] +This can lead to situations of mismatch between the semantic version of a charm and its revision number. That is, whether the changes you make to the charm are for a semantically newer or older version, the revision number always goes up. +[/note] + +Charm revisions are not published for anybody else until you release them into a [channel](/t/6562). Once you release them, though, users will be able to see them at `charmhub.io/` or access them via `juju info ` or `juju deploy `. + + + + +------------------------- + diff --git a/tmp/t/1084.md b/tmp/t/1084.md new file mode 100644 index 000000000..d1a05bdf9 --- /dev/null +++ b/tmp/t/1084.md @@ -0,0 +1,139 @@ +system | 2024-07-04 14:57:32 UTC | #1 + +> [List of supported clouds](/t/6665) > Amazon EC2 + + + + +This document describes details specific to using your existing Amazon EC2 cloud with Juju. + +> See more: [Amazon EC2](https://docs.aws.amazon.com/ec2/?icmpid=docs_homepage_featuredsvcs) + +When using the Amazon EC2 cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +|Juju points of variation|Notes for the Amazon EC2 cloud| +|---|---| +|**setup (chronological order):**|| +|[CLOUD](/t/5454)| | +|requirements:| TBA| +|[definition:](/t/5454#heading--cloud-definition)|:information_source: Juju automatically defines a cloud of this type.| +|- name:|`aws` or user-defined| +|- type:|`ec2`| +|- authentication types:|`[access-key, secret-key]`| +|- regions:|[TO BE ADDED]| +|- cloud-specific model configuration keys:|**`vpc-id`** (string)
Sets a specific AWS VPC ID. Optional. When not specified, Juju requires a default VPC or EC2-Classic features to be available for the account/region. :warning: If your AWS account was created before 04-12-2013: Your account does not have a default VPC. As a result, Juju may select a much larger instance type than what is required. To remedy this, create a [default VPC](https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html).

**`vpc-id-force`** (boolean)
Forces Juju to use the AWS VPC ID specified with `vpc-id`, when it fails the minimum validation criteria. :warning: Not accepted without `vpc-id`. | +|[CREDENTIAL](/t/6006)|| +|definition: | `auth-type`: `access-key`, which requires you to provide your access key and your secret key. See more: [Amazon \| AWS security credentials](https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys)

**If you want to use a YAML file:**

`credentials:`
 `aws:`
  `:`
   `auth-type: access-key`
   `access-key: `
   `secret-key: `

**If you want to use environment variables:**

`AWS_ACCESS_KEY_ID=""`

`AWS_SECRET_KEY_ID=""`| +|[CONTROLLER](/t/5455)|| +notes on bootstrap: | You can authenticate the controller with the cloud using instance profiles: Use the cloud CLI to create an instance profile, then pass the instance profile to the controller during bootstrap via the `instance-role` constraint: `juju bootstrap --bootstrap-constraints="instance-role="`. See more: `instance-role` below or [Discourse \| Using AWS instance profiles with Juju](/t/5185).| +||| +||| +|**other (alphabetical order:)**|| +|[CONSTRAINT](/t/6184)|| +|conflicting:|`[instance-type]` vs. `[cores, cpu-power, mem]`| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip)|:white_check_mark:| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:| +|- [`container`](/t/6184#heading--container)|:white_check_mark:| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|:white_check_mark:| +|- [`image-id`](/t/6184#heading--image-id)|:white_check_mark: (Starting with Juju 3.3)
Type: String.
Valid values: An AMI.| +|- [`instance-role`](/t/6184#heading--instance-role)|:white_check_mark:
Value: `auto` or an [instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html) name. | +|- [`instance-type`](/t/6184#heading--instance-type)|:white_check_mark:
Valid values: See cloud provider.
Default value: `m3.medium`.| +|- [`mem`](/t/6184#heading--mem)|:white_check_mark:| +|- [`root-disk`](/t/6184#heading--root-disk)|:white_check_mark:| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|:white_check_mark:| +|- [`spaces`](/t/6184#heading--spaces)|:white_check_mark:| +|- [`tags`](/t/6184#heading--tags)|❌| +|- [`virt-type`](/t/6184#heading--virt-type)|❌| +|- [`zones`](/t/6184#heading--zones)|:white_check_mark:| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)|:white_check_mark:| +|[`subnet=...`](/t/6187#heading--subnet)|:white_check_mark:| +|[`system-id=...`](/t/6187#heading--system-id)|:negative_squared_cross_mark:| +|[`zone=...`](/t/6187#heading--zone)|:white_check_mark:
If the query looks like a CIDR, then this will match subnets with the same CIDR. If it follows the syntax of a "subnet-XXXX", this will match the Subnet ID. Everything else is just matched as a Name.| +|[RESOURCE (cloud)](/t/1102)

Consistent naming, tagging, and the ability to add user-controlled tags to created instances.|:white_check_mark:| + + + + + +------------------------- + +tvansteenburgh | 2020-04-08 21:09:41 UTC | #2 + +[quote="system, post:1, topic:1084"] +Here we’ve chosen `AdministratorAccess` , which is the most privileged policy available: +[/quote] + +What is the least privileged policy that would still give Juju the permissions it needs to perform all of its functions? I want to create an IAM account for Juju to use, and I don't want to give it AdministratorAccess. + +------------------------- + +timClicks | 2020-04-08 21:15:04 UTC | #3 + +At this stage, I'm afraid that we don't have a definitive answer for this. AdministratorAccess is far too broad, you're correct. + +One issue to reckon with is the `juju trust` model. We don't know in advance what charms will want to do with the provider's API. + +------------------------- + +timClicks | 2020-04-26 22:14:28 UTC | #4 + +Update: AmazonEC2FullAccess is sufficient for deploying Charmed Kubernetes, so I've updated the text to indicate that it's the recommended policy. + +------------------------- + +emcp | 2021-12-07 19:44:58 UTC | #5 + +any guidance on how to use Juju in conjunction with aws reserved instances? + +We've setup and run in a VPC , used spaces... this is the final question about Juju we've had though .. if we're going to use it in AWS to manage a webapp + +Thanks + +Edit: seems after reading some similar questions around other tools.. I merely need to call out for a certain instance type.. and if none is taken yet.. whatever instance I create with Juju will count towards my quota.. is this correct ? I don't do anything in juju other than constraint on instance type? + +------------------------- + +pmatulis | 2021-12-08 00:32:38 UTC | #6 + +As long as your VPC is set up to provision instances and you provide the correct credentials to Juju then Juju will spawn those instances upon request. I don't think you need to specify any special constraint. + +------------------------- + +ca-scribner | 2022-01-13 19:30:30 UTC | #7 + +Not sure if something has changed, but I don't think AmazonEC2FullAccess is sufficient. I just tried to install without any changes to the bundle, etc, and cannot get a charmed kubernetes deployed without a permission error for `CreateRole`. AdministratorAccess of course still works + +------------------------- + +emcp | 2022-01-16 22:52:41 UTC | #8 + +on non-k8s I've only used AmazonEC2FullAccess.. would be curious what more is needed for the k8s charms + +------------------------- + +pedroleaoc | 2022-04-07 08:35:20 UTC | #9 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:32 UTC | #10 + + + +------------------------- + diff --git a/tmp/t/1086.md b/tmp/t/1086.md new file mode 100644 index 000000000..98a08d1a3 --- /dev/null +++ b/tmp/t/1086.md @@ -0,0 +1,484 @@ +system | 2024-10-17 14:51:36 UTC | #1 + + + +> [List of supported clouds](/t/6665) > Microsoft Azure + +This document describes details specific to using your existing Microsoft Azure cloud with Juju. + +> See more: [Microsoft Azure](https://azure.microsoft.com/en-us) + +When using the Microsoft Azure cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + + + +## Requirements + +**If you're in a locked-down environment:**
Permissions:

- `Microsoft.Compute/skus (read)`

- `Microsoft.Resources/subscriptions/resourceGroups (read, write, delete)`

- `Microsoft.Resources/deployments/ (write/read/delete/cancel/validate)`

- `Microsoft.Network/networkSecurityGroups (write, read, delete, other - join)`

- `Microsoft.Network/virtualNetworks/ (write, read, delete)`

- `Microsoft.Compute/virtualMachineScaleSets/ (write, read, delete, other - start action, other - deallocate action, other - restart action, other powerOff action)`

- `Microsoft.Network/virtualNetworks/subnets/ (read, write, delete, other - join)`

- `Microsoft.Compute/availabilitySets (write, read, delete)`

- `Microsoft.Network/publicIPAddresses (write, read, delete, other - join - optional for public services)`

- `Microsoft.Network/networkInterfaces (write, read, delete, other - join)`

- `Microsoft.Compute/virtualMachines (write, read, delete, other - start, power off, restart, deallocate)`

- `Microsoft.Compute/disks (write, read, delete)` + + +## Notes on `juju add-cloud` + +Type in Juju: `azure`. + +Name in Juju: `azure`. + + +## Notes on `juju add-credential` +[note type=caution status="If your credential stops working"] +Credentials for the `azure` cloud have been reported to occasionally stop working over time. If this happens, try `juju update-credential` (passing as an argument the same credential) or `juju add-credential` (passing as an argument a new credential) + `juju default-credential`. +[/note] + +[note type=positive status="For some example workflows"] +See Appendix: Example authentication workflows. + +[/note] + +### Authentication types + +#### `managed-identity` (preferred) +> *Requirements:* +> - Juju 3.6+. +> - A managed identity. See more: Appendix: How to create a managed identity. +> - The managed identity and the Juju resources must be created on the same subscription. +> - The `add-credential` steps must be run from either [the Azure Cloud Shell^](https://shell.azure.com/) or a jump host running in Azure in order to allow the cloud metadata endpoint to be reached. + +This is the recommended way to authenticate with Azure as this way you are never touching your cloud credentials directly. + +> See more: [Appendix: Example authentication workflows > Workflow 1](/t/1086/1#appendix-example-authentication-workflows-15) + + +#### `interactive` = "service-principal-secret-via-browser" + +This is the recommended way to authenticate with Azure if you want to use a service principal secret. + +When you add the credential in this way and provide the subscription ID, Juju will open up a browser and you’ll be prompted to log in to Azure. + +:information_source: If you are using the unconfined `juju` snap `/snap/juju/current/bin/juju add-credential azure` and have the `azure` CLI and you are logged in and you want to use the currently logged in user: You may leave the subscription ID empty -- Juju will fill it in for you. + +:warning: If you decide to fill in the optional fields as well: Make sure to set them to unique values (i.e., the `application-name` and `role-definition-name` fields cannot be the same). + +:white_check_mark: Starting with Juju 3.6, you can also combine this authentication type with a managed identity by bootstrapping with the `instance-role` constraint. + +> See more: [Appendix: Example authentication workflows > Workflows 2-3](/t/1086/1#appendix-example-authentication-workflows-15) + + +#### `service-principal-secret` (dispreferred) + +Starting with Juju 3.6, you can also combine this with a managed identity by bootstrapping with the `instance-role` constraint. + +> See more: [Appendix: Example authentication workflows > Workflows 2-3](/t/1086/1#appendix-example-authentication-workflows-15) + + +## Notes on `juju bootstrap` + +If during `juju add-credential` you chose `interactive` (= "service-principal-secret-via-browser") or `service-principal-secret`: You can still combine this with a managed identity by running `juju bootstrap` with `--constraints instance-role=...`. + +> See more: [Appendix: Example authentication workflows > Workflow 2](/t/1086/1#appendix-example-authentication-workflows-15), [Supported constraints](/t/1086#supported-constraints-13) + + +## Cloud-specific model configuration keys + + +### `load-balancer-sku-name` +Mirrors the LoadBalancerSkuName type in the Azure SDK. + +| | | +|-|-| +| type | string | +| default value | "Standard" | +| immutable | false | +| mandatory | true | + +### `resource-group-name` +If set, use the specified resource group for all model artefacts instead of creating one based on the model UUID. + +| | | +|-|-| +| type | string | +| default value | schema.omit{} | +| immutable | true | +| mandatory | false | + +### `network` +If set, use the specified virtual network for all model machines instead of creating one. + +| | | +|-|-| +| type | string | +| default value | schema.omit{} | +| immutable | true | +| mandatory | false | + +## Supported constraints + +||| +|-|-| +|[CONSTRAINT](/t/6184)|| +|conflicting:|`[instance-type]` vs `[arch, cores, mem]`| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip)|:white_check_mark:| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:
Valid values: `amd64`.| +|- [`container`](/t/6184#heading--container)|:white_check_mark:| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|❌| +|- [`image-id`](/t/6184#heading--image-id)|❌| +|- [`instance-role`](/t/6184#heading--instance-role)|*Starting with Juju 3.6:* :white_check_mark:

Valid values: `auto` (Juju creates a managed identity for you) or a [managed identity^](https://www.google.com/url?q=https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview&sa=D&source=docs&ust=1720105912478784&usg=AOvVaw2eioSYvtSn1pn-BWstI6AU) name in one of the following formats:

- **If the managed identity is created in a resource group on the same subscription:**
`/`

- **If the managed identity is created in a resource group on a different subscription:**
`//`

- **If the managed identity is created in a resource group and that resource group is used to host the controller model:**
``
e.g., `juju bootstrap azure --config resource-group-name= --constraints instance-role=`

Note: If you want your controller to be in the same resource group as the one used for the managed identity, during bootstrap also specify `--config resource-group-name=`.

> See more: Appendix: Supported authentication types: Example workflows.| +|- [`instance-type`](/t/6184#heading--instance-type)|:white_check_mark:
Valid values: See cloud provider.| +|- [`mem`](/t/6184#heading--mem)|:white_check_mark:| +|- [`root-disk`](/t/6184#heading--root-disk)|:white_check_mark:| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|:white_check_mark:
Represents the juju [`storage pool`](/t/7183) for the root disk. By specifying a storage pool, the root disk can be configured to use encryption.| +|- [`spaces`](/t/6184#heading--spaces)|❌| +|- [`tags`](/t/6184#heading--tags)|❌| +|- [`virt-type`](/t/6184#heading--virt-type)|❌| +|- [`zones`](/t/6184#heading--zones)|:white_check_mark:| + +## Supported placement directives + +||| +|-|-| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)|TBA| +|[`subnet=...`](/t/6187#heading--subnet)|:white_check_mark:| +|[`system-id=...`](/t/6187#heading--system-id)|❌| +|[`zone=...`](/t/6187#heading--zone)|TBA| + + +## Appendix: Example authentication workflows + +### Worflow 1 -- Managed identity only (recommended) +> *Requirements:* +> - Juju 3.6+. +> - A managed identity. See more: Appendix: How to create a managed identity. +> - The managed identity and the Juju resources must be created on the same subscription. +> - The `add-credential` steps must be run from either [the Azure Cloud Shell^](https://shell.azure.com/) or a jump host running in Azure in order to allow the cloud metadata endpoint to be reached. + +1. Create a managed identity. See more: Appendix: How to create a managed identity. +1. Run `juju add-credential azure`; choose `managed-identity`; supply the requested information (the“managed-identity-path” must be of the form `/`). +1. Bootstrap as usual. + +[note type=positive] +**Did you know?** With this workflow where you provide the managed identity during `add-credential` you avoid the need for either your Juju client or your Juju controller to store your credential secrets. Relatedly, the user running `add-credential` / `bootstrap` doesn't need to have any credential secrets supplied to them. +[/note] + +### Workflow 2 -- Service principal secret + managed identity +> *Requirements:* +> - Juju 3.6+. +> - A managed identity. See more: Appendix: How to create a managed identity. + +1. Create a managed identity. +1. Add a service-principal-secret: + - `interactive` = "service-principal-via-browser" (recommended): + - If you have the `azure` CLI and you are logged in and you want to use the currently logged in user: Run `/snap/juju/current/bin/juju add-credential azure`; choose `interactive`, then leave the subscription ID field empty -- Juju will fill this in for you. + - Otherwise: Run `juju add-credential azure`, choose `interactive`, then provide the subscription ID -- Juju will open up a browser and you’ll be prompted to log in to Azure. + - `service-principal-secret`: Run `juju add-credential azure`, then choose `service-principal-secret` and supply all the requested information. +1. During bootstrap, provide the managed identity to the controller by using the `instance-role` constraint. + +[note type=positive] +**Did you know?** With this workflow where you provide the managed identity during `bootstrap` you avoid the need for your Juju controller to store your credential secrets. Relatedly, the user running / `bootstrap` doesn't need to have any credential secrets supplied to them. +[/note] + +### Workflow 3 -- Service principal secret only (dispreferred) + +1. Add a service-principal-secret: + - `interactive` = "service-principal-via-browser" (recommended): + - If you have the `azure` CLI and you are logged in and you want to use the currently logged in user: Run `/snap/juju/current/bin/juju add-credential azure`; choose `interactive`, then leave the subscription ID field empty -- Juju will fill this in for you. + - Otherwise: Run `juju add-credential azure`, choose `interactive`, then provide the subscription ID -- Juju will open up a browser and you’ll be prompted to log in to Azure. + - `service-principal-secret`: Run `juju add-credential azure`, then choose `service-principal-secret` and supply all the requested information. +1. Bootstrap as usual. + + +## Appendix: How to create a managed identity + +[note type=caution] +This is just an example. For more information please see the upstream cloud documentation. See more: [Microsoft Azure | Managed identities](https://learn.microsoft.com/en-us/entra/identity/managed-identities-azure-resources/overview). +[/note] + +To create a managed identity for Juju to use, you will need to use the Azure CLI and be logged in to your account. This is a set up step that can be done ahead of time by an administrator. + +The 4 values below need to be filled in according to your requirements. + +```text +$ export group=someresourcegroup +$ export location=someregion +$ export role=myrolename +$ export identityname=myidentity +$ export subscription=mysubscription_id +``` + +The role definition and role assignment can be scoped to either the subscription or a particular resource group. If scoped to a resource group, this group needs to be provided to Juju when bootstrapping so that the controller resources are also created in that group. + +For a subscription scoped managed identity: + +```text +$ az group create --name "${group}" --location "${location}" +$ az identity create --resource-group "${group}" --name "${identityname}" +$ mid=$(az identity show --resource-group "${group}" --name "${identityname}" --query principalId --output tsv) +$ az role definition create --role-definition "{ + \"Name\": \"${role}\", + \"Description\": \"Role definition for a Juju controller\", + \"Actions\": [ + \"Microsoft.Compute/*\", + \"Microsoft.KeyVault/*\", + \"Microsoft.Network/*\", + \"Microsoft.Resources/*\", + \"Microsoft.Storage/*\", + \"Microsoft.ManagedIdentity/userAssignedIdentities/*\" + ], + \"AssignableScopes\": [ + \"/subscriptions/${subscription}\" + ] + }" +$ az role assignment create --assignee-object-id "${mid}" --assignee-principal-type "ServicePrincipal" --role "${role}" --scope "/subscriptions/${subscription}" +``` + +A resource scoped managed identity is similar except: +- the role definition assignable scopes becomes +``` + \"AssignableScopes\": [ + \"/subscriptions/${subscription}/resourcegroups/${group}\" + ] +``` +- the role assignment scope becomes + +`--scope "/subscriptions/${subscription}/resourcegroups/${group}"` + + + + + + + +
+ +> **Contributors:** @kylerhornor , @taurus , @tmihoc, @wallyworld + +------------------------- + +evilnick | 2020-03-12 15:38:35 UTC | #2 + +@timClicks I added a note here about the add-credential operation re-requesting authentication, which seems to happen sometimes. The rest of the instructions seem good as of today :slight_smile: + +------------------------- + +timClicks | 2020-03-12 21:38:51 UTC | #3 + +Thanks for taking the time to make the change :) + +------------------------- + +robgibbon | 2022-01-23 13:19:08 UTC | #4 + +Suggest removing the email address in the cloud credentials initialisation config example. + +------------------------- + +pedroleaoc | 2022-04-07 08:33:16 UTC | #5 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:31 UTC | #6 + + + +------------------------- + +kylerhornor | 2024-02-01 16:01:45 UTC | #7 + +``` +- allocate-public-id +``` +is a typo. Should be `ip` + +------------------------- + +tmihoc | 2024-02-01 16:07:31 UTC | #8 + +@kylerhornor Fixed, thanks! (PS Added you to the Contributors list on the bottom of the doc.) + +------------------------- + +alitvinov | 2024-08-16 10:57:45 UTC | #9 + +Few comments on this doc: + +>notes on bootstrap: +(1) (recommended) (1a) Create the managed identity. + +We are saying that recommended way is the one that currently exists only in juju 3.6-beta2. + Should we recommend something that exists in stable juju as most of the users would use a stable version? +Also, there are no notes on bootstrap of the ‘service-principal-secret’ way at all, which is the main option for stable juju now. +Would be good to add those. + +> notes on bootstrap: +> +> (1) (recommended) +> (1a) Create the managed identity. ... +> +> (2) (2a) Create the managed identity yourself. .. +> +> (3) (3a) Create a credential type service-principal-secret ... + +This section is the same is in the [other doc](https://discourse.charmhub.io/t/new-3-6-feature-support-for-azure-managed-identities/14757) by wallyfworld, however sections are rephrased and options 2 and 3 are switched places comparing to wallyfworld’s doc. +This creates confusion. +I suggest having the same names for options and having them in the same order. + +------------------------- + +tmihoc | 2024-08-16 11:50:15 UTC | #10 + +I'll add more information on the service-principal-secret. + +About the recommendation to use an option that is only available in beta: Fair point. I'll keep the recommendation but caveat that it requires at least 3.6 beta. + +About the other post: It was not intended as a doc (i.e., permanent content) -- just an announcement. But I can see how having two sources can be confusing. I'll work with you to update this doc and then make the announcement merely point to the doc (instead of trying to give the instructions again). + +Thanks! + +------------------------- + +taurus | 2024-10-15 10:44:03 UTC | #11 + +Hi @tmihoc , + +Thank you for describing Azure here. I am trying to follow it using interactive/recommended mode: +```shell +... Run /snap/juju/current/bin/juju add-credential azure; +choose interactive, +then leave the subscription ID field empty – Juju will fill this in for you. +``` + +but I cannot leave subscription field empty, Juju enforce me to fill `subscription-id`: +```shell +/snap/juju/current/bin/juju add-credential azure +... +Auth Types + interactive + service-principal-secret + managed-identity + +Select auth type [interactive]: interactive + +Enter subscription-id: + +Enter subscription-id: + +Enter subscription-id: + +Enter subscription-id: +``` +The juju version is latest from edge: +``` +Name Version Rev Tracking +juju 3.6-rc1-079cfe6 28862 3.6/edge +``` + +Any hints? Thank you! + +------------------------- + +tmihoc | 2024-10-15 12:06:54 UTC | #12 + +@taurus Are all the conditions met: + +> If you have the `azure` CLI and you are logged in and you want to use the currently logged in user: + +? + +------------------------- + +taurus | 2024-10-15 21:41:10 UTC | #13 + +OK, I am logged in now and I see it optional `Enter subscription-id (optional)`. Nice! + +After trying all the possible options and combination I cannot go through the following error right after the successful auth on Azure WEB `devicelogin` page: +```shell +To sign in, use a web browser to open the page https://microsoft.com/devicelogin and enter the code DELETED to authenticate. +ERROR finalizing credential: creating role definition: failed to create role definition: PUT https://management.azure.com/subscriptions/DELETED/providers/Microsoft.Authorization/roleDefinitions/DELETED +-------------------------------------------------------------------------------- +RESPONSE 409: 409 Conflict +ERROR CODE: RoleDefinitionWithSameNameExists +-------------------------------------------------------------------------------- +{ + "error": { + "code": "RoleDefinitionWithSameNameExists", + "message": "A custom role with the same name already exists in this directory. Use a different name." + } +} +-------------------------------------------------------------------------------- +``` + +Any hints here? Thank you! + +------------------------- + +wallyworld | 2024-10-16 09:14:05 UTC | #14 + +By default, Juju creates a role definition called `Juju Application Role Definition`. I assume you are not entering something different during add-credential workflow. + +It has code which queries existing role definitions and if it finds one with this name, it will not try and create a new one. + +We've not seen any issues with this workflow in our testing. However, it seems there's a problem that needs to be diagnosed. + +Can you see if the role definition exists? + +`az role definition list --name "Juju Application Role Definition"` + +And if it does, try deleting it? You'll need to delete any role assignments first. + +``` +az role assignment delete --role "/subscriptions/xxxxx/providers/Microsoft.Authorization/roleDefinitions/yyy" +az role definition delete --name "Juju Application Role Definition" --scope "/subscriptions/xxx" +``` + +where the `--role` arg is the role id from the role definition. + +If this works,we need to figure out why the query to get an existing role is failing. +``` +roleDefinitionClient := clientFactory.NewRoleDefinitionsClient() +pager := roleDefinitionClient.NewListPager(roleScope, &armauthorization.RoleDefinitionsClientListOptions{ + Filter: to.Ptr(fmt.Sprintf("roleName eq '%s'", roleName)), +}) +... +``` + +------------------------- + +taurus | 2024-10-17 13:32:35 UTC | #15 + +TL;DR: successfully boostrapped Juju 3.6-rc1 on Azure, +steps-to-reproduce: https://charmhub.io/postgresql/docs/h-deploy-azure + +The key problem was in (optional) fields. Hint: always fill them with unique values. :-) + +------------------------- + +tmihoc | 2024-10-17 14:52:04 UTC | #16 + +Updated the doc to reflect this (and also added you to the list of contributors on the bottom of the doc -- thanks again!). + +------------------------- + +wallyworld | 2024-10-18 04:30:51 UTC | #17 + +To follow up, the fact that a manually entered role name was needed is a bug + +https://bugs.launchpad.net/juju/+bug/2084858 + +A fix has already been committed. +The issue was the query to look for existing roles was not working as expected. + +We expect this fix will make it to 3.6.1. When adding an Azure credential, you should be able to accept all the defaults for app name, role etc unless you have specific requirements. + +------------------------- + diff --git a/tmp/t/1087.md b/tmp/t/1087.md new file mode 100644 index 000000000..919360b89 --- /dev/null +++ b/tmp/t/1087.md @@ -0,0 +1,833 @@ +system | 2024-10-29 14:45:27 UTC | #1 + +[note type=positive] +This is the documentation for the latest Juju version, with in-line notes about version differences. + +To find out what's new, see [Roadmap & Releases](/t/5064). + +To upgrade, see [How to upgrade your deployment](/t/7530). +[/note] + +Welcome to Juju, your entrypoint into the Juju universe! + +![JujuLandscape|690x285](upload://kbm3OgYEv554GSK2oXJ8HujmwCU.jpeg) + + + +Juju is an open source orchestration engine for software operators that enables the deployment, integration and lifecycle management of applications at any scale, on any infrastructure, using special software operators called 'charms'. + +Juju provides a model-driven way to install, provision, maintain, update, upgrade, and integrate applications on and across Kubernetes containers, Linux containers, virtual machines, and bare metal machines, on public or private cloud. + +As such, Juju makes it simple, intuitive, and efficient to manage the full lifecycle of complex applications in hybrid cloud. + + + + +For system operators and DevOps who manage applications in the cloud, Juju simplifies code; for CIOs, it helps align code with business decisions. + + + +> For a collection of existing charms, see [Charmhub](https://charmhub.io/). To build your own charm, see the [Charm SDK docs](https://juju.is/docs/sdk). + + +----------------------------- + +## In this documentation + +| | | +|---------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------| +| [Tutorial](/t/6559)
Get started - a hands-on introduction to Juju for new users
| [How-to guides](/t/5334)
Step-by-step guides covering key operations and common tasks | +| [Explanation](/t/5358)
Discussion and clarification of key topics | [Reference](/t/5348)
Technical information - specifications, APIs, architecture | + + +----------- + +## Project and community + +Juju is an open source project that warmly welcomes community projects, contributions, suggestions, fixes and constructive feedback. + +- Learn about the [Roadmap & Releases](/t/5064) +- Read our [Code of Conduct](https://ubuntu.com/community/code-of-conduct) +- Join our [Matrix chat](https://matrix.to/#/#charmhub-juju:ubuntu.com) +- Join the [Discourse forum](https://discourse.charmhub.io/t/welcome-to-the-charmed-operator-community/8) to talk about [Juju](https://discourse.charmhub.io/tags/c/juju/6/community-workshop), [charms](https://discourse.charmhub.io/c/charm/41), [docs](https://discourse.charmhub.io/c/doc/22), or [to meet the community](https://discourse.charmhub.io/tag/community-workshop) +- Report a bug on [Launchpad](https://bugs.launchpad.net/juju) (for code) or [GitHub](https://github.com/juju/docs/issues) (for docs) +- Contribute to the documentation on [Discourse](https://discourse.charmhub.io/t/documentation-guidelines-for-contributors/1245) +- Contribute to the code on [GitHub](https://github.com/juju/juju/blob/develop/CONTRIBUTING.md) +- Visit the [Juju careers page](https://juju.is/careers) + + +## Navigation + +[details=Navigation] +| Level | Path | Navlink | +|-------|-----------------------------------------|------------------------------------------------------| +| 1 | | [Juju documentation](/t/1087) | +| 1 | tutorial | [Get started with Juju](/t/6559) | +| 1 | how-to | [How-to guides](/t/how-to-guides/5334) | +| 2 | | Manage your deployment | +| 3 | set-up--tear-down-your-test-environment | [Set up / Tear down your test environment](/t/13403) | +| 3 | harden-your-deployment | [Harden your deployment](/t/12248) | +| 3 | upgrade-your-juju-deployment | [Upgrade your deployment](/t/7530) | +| 3 | troubleshoot-your-deployment | [Troubleshoot your deployment](/t/1187) | +| 4 | debug-bootstrapmachine-failures | [Debug bootstrap/machine failures](/t/6835) | +| 3 | take-your-deployment-offline | [Take your deployment offline](/t/14367) | +| 2 | install-and-manage-the-client | [Install and manage the client](/t/1083) | +| 2 | manage-the-juju-dashboard | [Manage the dashboard](/t/5896) | +| 2 | manage-clouds | [Manage clouds](/t/1100) | +| 2 | manage-credentials | [Manage credentials](/t/1112) | +| 2 | manage-controllers | [Manage controllers](/t/1111) | +| 2 | manage-models | [Manage models](/t/1155) | +| 2 | manage-applications | [Manage applications](/t/5476) | +| 2 | manage-relations | [Manage relations](/t/1073) | +| 2 | manage-offers | [Manage offers](/t/1150) | +| 2 | manage-charms-or-bundles | [Manage charms or bundles](/t/11351) | +| 2 | manage-charm-resources | [Manage charm resources](/t/11313) | +| 2 | manage-machines | [Manage machines](/t/5886) | +| 2 | manage-storage | [Manage storage](/t/5892) | +| 2 | manage-storage-pools | [Manage storage pools](/t/7184) | +| 2 | manage-subnets | [Manage subnets](/t/6663) | +| 2 | manage-spaces | [Manage spaces](/t/6664) | +| 2 | manage-logs | [Manage agent logs](/t/9151) | +| 2 | manage-units | [Manage units](/t/5891) | +| 2 | manage-actions | [Manage actions](/t/1033) | +| 2 | manage-ssh-keys | [Manage SSH keys](/t/7219) | +| 2 | manage-users | [Manage users](/t/1156) | +| 2 | manage-secrets | [Manage secrets](/t/7287) | +| 2 | manage-secret-backends | [Manage secret backends](/t/8701) | +| 2 | manage-metadata | [Manage metadata](/t/13276) | +| 2 | manage-plugins | [Manage plugins](/t/2203) | +| 2 | unsorted | Unsorted | +| 3 | define-instance-tags | [Define instance tags in a cloud](/t/1102) | +| 3 | fan-container-networking | [Fan container networking](/t/1065) | +| 3 | supported-features | [Supported features](/t/supported-features/5451) | +| 1 | reference | [Reference](/t/reference/5348) | +| 2 | action | [Action](/t/6208) | +| 2 | agent | [Agent](/t/5466) | +| 3 | commands-available-on-a-juju-machine | [Commands available on a Juju machine](/t/2999) | +| 4 | agent-introspection | [Agent introspection](/t/117) | +| 5 | agent-introspection-juju-engine-report | [juju_engine_report](/t/146) | +| 5 | agent-introspection-juju-goroutines | [juju_goroutines](/t/118) | +| 5 | agent-introspection-juju-heap-profile | [juju_heap_profile](/t/6640) | +| 5 | agent-introspection-juju-machine-lock | [juju_machine_lock](/t/116) | +| 5 | agent-introspection-juju-metrics | [juju_metrics](/t/6641) | +| 5 | agent-introspection-juju-start-unit | [juju_start_unit](/t/5667) | +| 5 | agent-introspection-juju-stop-unit | [juju_stop_unit](/t/5668) | +| 5 | agent-introspection-juju-unit-status | [juju_unit_status](/t/5666) | +| 2 | application | [Application](/t/5471) | +| 2 | base | [Base](/t/8819) | +| 2 | binding | [Binding](/t/6488) | +| 2 | bootstrapping | [Bootstrapping](/t/6209) | +| 2 | bundle | [Bundle](/t/1158) | +| 2 | channel | [Channel](/t/6562) | +| 2 | charmed-operator | [Charm](/t/5457) | +| 3 | charm-environment-variables | [Charm environment variables](/t/6450) | +| 2 | client | [Client](/t/13146) | +| 2 | cloud | [Cloud](/t/5454) | +| 3 | juju-supported-clouds | [List of supported clouds](/t/6665) | +| 4 | amazon-ec2 | [Amazon AWS](/t/1084) | +| 4 | amazon-eks | [Amazon EKS](/t/3352) | +| 4 | equinix-metal | [Equinix Metal](/t/4988) | +| 4 | google-gce | [Google GCE](/t/1088) | +| 4 | google-gke | [Google GKE](/t/3341) | +| 4 | lxd | [LXD](/t/1093) | +| 4 | maas | [MAAS](/t/1094) | +| 4 | manual | [Manual setup](/t/1095) | +| 4 | microk8s | [MicroK8s](/t/1194) | +| 4 | microsoft-azure | [Microsoft Azure](/t/1086) | +| 4 | microsoft-aks | [Microsoft AKS](/t/3301) | +| 4 | openstack | [OpenStack](/t/1097) | +| 4 | oracle-oci | [Oracle](/t/1096) | +| 4 | vmware-vsphere | [VMware vSphere](/t/1099) | +| 3 | kubernetes-clouds-and-juju | [Kubernetes clouds and Juju](/t/15621) | +| 2 | configuration | [Configuration](/t/6659) | +| 3 | list-of-controller-configuration-keys | [List of controller configuration keys](/t/7059) | +| 4 | audit-log-exclude-methods | [audit-log-exclude-methods](/t/7064) | +| 4 | juju-ha-space | [juju-ha-space](/t/7065) | +| 4 | juju-mgmt-space | [juju-mgmt-space](/t/7066) | +| 3 | list-of-model-configuration-keys | [List of model configuration keys](/t/7068) | +| 2 | constraint | [Constraint](/t/6184) | +| 2 | containeragent-binary | [`containeragent` (binary)](/t/11672) | +| 2 | controller | [Controller](/t/5455) | +| 2 | credential | [Credential](/t/6006) | +| 2 | deployment | [Deploying](/t/11285) | +| 2 | endpoint | [Endpoint](/t/5462) | +| 2 | high-availability | [High-availability](/t/1066) | +| 2 | hook | [Hook](/t/6464) | +| 2 | hook-tool | [Hook tool](/t/1163) | +| 2 | | Juju | +| 3 | roadmap | [Juju roadmap & releases](/t/5064) | +| 3 | cross-version-compatibility-in-juju | [Cross-version compatibility in Juju](/t/14612) | +| 2 | juju-client | [`juju` CLI (Juju client)](/t/5465) | +| 3 | juju-cli-commands | [`juju` CLI commands](/t/10045) | +| 4 | juju-actions | [juju actions](/t/10069) | +| 4 | juju-add-cloud | [juju add-cloud](/t/10162) | +| 4 | juju-add-credential | [juju add-credential](/t/10136) | +| 4 | juju-add-k8s | [juju add-k8s](/t/10049) | +| 4 | juju-add-machine | [juju add-machine](/t/10071) | +| 4 | juju-add-model | [juju add-model](/t/10145) | +| 4 | juju-add-secret | [juju add-secret](/t/11144) | +| 4 | juju-add-secret-backend | [juju add-secret-backend](/t/10062) | +| 4 | juju-add-space | [juju add-space](/t/10117) | +| 4 | juju-add-ssh-key | [juju add-ssh-key](/t/10238) | +| 4 | juju-add-storage | [juju add-storage](/t/10159) | +| 4 | juju-add-unit | [juju add-unit](/t/10141) | +| 4 | juju-add-user | [juju add-user](/t/10193) | +| 4 | juju-agree | [juju agree](/t/10161) | +| 4 | juju-agreements | [juju agreements](/t/10064) | +| 4 | juju-attach-resource | [juju attach-resource](/t/10124) | +| 4 | juju-attach-storage | [juju attach-storage](/t/10126) | +| 4 | juju-autoload-credentials | [juju autoload-credentials](/t/10230) | +| 4 | juju-bind | [juju bind](/t/10244) | +| 4 | juju-bootstrap | [juju bootstrap](/t/10132) | +| 4 | juju-cancel-task | [juju cancel-task](/t/10053) | +| 4 | juju-change-user-password | [juju change-user-password](/t/10118) | +| 4 | juju-charm-resources | [juju charm-resources](/t/10099) | +| 4 | juju-clouds | [juju clouds](/t/10182) | +| 4 | juju-collect-metrics | [juju collect-metrics](/t/10085) | +| 4 | juju-config | [juju config](/t/10139) | +| 4 | juju-constraints | [juju constraints](/t/10060) | +| 4 | juju-consume | [juju consume](/t/10213) | +| 4 | juju-controller-config | [juju controller-config](/t/10237) | +| 4 | juju-controllers | [juju controllers](/t/10152) | +| 4 | juju-create-backup | [juju create-backup](/t/10197) | +| 4 | juju-create-storage-pool | [juju create-storage-pool](/t/10093) | +| 4 | juju-credentials | [juju credentials](/t/10054) | +| 4 | juju-dashboard | [juju dashboard](/t/10091) | +| 4 | juju-debug-code | [juju debug-code](/t/10048) | +| 4 | juju-debug-hook | [juju debug-hook](/t/10229) | +| 4 | juju-debug-hooks | [juju debug-hooks](/t/10242) | +| 4 | juju-debug-log | [juju debug-log](/t/10116) | +| 4 | juju-default-credential | [juju default-credential](/t/10055) | +| 4 | juju-default-region | [juju default-region](/t/10082) | +| 4 | juju-deploy | [juju deploy](/t/10074) | +| 4 | juju-destroy-controller | [juju destroy-controller](/t/10113) | +| 4 | juju-destroy-model | [juju destroy-model](/t/10190) | +| 4 | juju-detach-storage | [juju detach-storage](/t/10089) | +| 4 | juju-diff-bundle | [juju diff-bundle](/t/10142) | +| 4 | juju-disable-command | [juju disable-command](/t/10205) | +| 4 | juju-disable-user | [juju disable-user](/t/10198) | +| 4 | juju-disabled-commands | [juju disabled-commands](/t/10220) | +| 4 | juju-documentation | [juju documentation](/t/10101) | +| 4 | juju-download | [juju download](/t/10134) | +| 4 | juju-download-backup | [juju download-backup](/t/10240) | +| 4 | juju-enable-command | [juju enable-command](/t/10111) | +| 4 | juju-enable-destroy-controlle | [juju enable-destroy-controller](/t/10086) | +| 4 | juju-enable-ha | [juju enable-ha](/t/10206) | +| 4 | juju-enable-user | [juju enable-user](/t/10241) | +| 4 | juju-exec | [juju exec](/t/10195) | +| 4 | juju-export-bundle | [juju export-bundle](/t/10046) | +| 4 | juju-expose | [juju expose](/t/10109) | +| 4 | juju-find | [juju find](/t/10187) | +| 4 | juju-find-offers | [juju find-offers](/t/10097) | +| 4 | juju-firewall-rules | [juju firewall-rules](/t/10061) | +| 4 | juju-grant | [juju grant](/t/10196) | +| 4 | juju-grant-cloud | [juju grant-cloud](/t/10164) | +| 4 | juju-grant-secret | [juju grant-secret](/t/11290) | +| 4 | juju-help | [juju help](/t/1729) | +| 4 | juju-help-tool | [juju help-tool](/t/10050) | +| 4 | juju-import-filesystem | [juju import-filesystem](/t/10047) | +| 4 | juju-import-ssh-key | [juju import-ssh-key](/t/10167) | +| 4 | juju-info | [juju info](/t/10103) | +| 4 | juju-integrate | [juju integrate](/t/10207) | +| 4 | juju-kill-controller | [juju kill-controller](/t/10233) | +| 4 | juju-list-actions | [juju list-actions](/t/10232) | +| 4 | juju-list-agreements | [juju list-agreements](/t/10200) | +| 4 | juju-list-charm-resources | [juju list-charm-resources](/t/10234) | +| 4 | juju-list-clouds | [juju list-clouds](/t/10199) | +| 4 | juju-list-controllers | [juju list-controllers](/t/10079) | +| 4 | juju-list-credentials | [juju list-credentials](/t/10150) | +| 4 | juju-list-disabled-commands | [juju list-disabled-commands](/t/10094) | +| 4 | juju-list-firewall-rules | [juju list-firewall-rules](/t/10114) | +| 4 | juju-list-machines | [juju list-machines](/t/10181) | +| 4 | juju-list-models | [juju list-models](/t/10107) | +| 4 | juju-list-offers | [juju list-offers](/t/10170) | +| 4 | juju-list-operations | [juju list-operations](/t/10158) | +| 4 | juju-list-payloads | [juju list-payloads](/t/10070) | +| 4 | juju-list-regions | [juju list-regions](/t/10131) | +| 4 | juju-list-resources | [juju list-resources](/t/10056) | +| 4 | juju-list-secret-backends | [juju list-secret-backends](/t/10072) | +| 4 | juju-list-secrets | [juju list-secrets](/t/10108) | +| 4 | juju-list-spaces | [juju list-spaces](/t/10100) | +| 4 | juju-list-ssh-keys | [juju list-ssh-keys](/t/10133) | +| 4 | juju-list-storage | [juju list-storage](/t/10138) | +| 4 | juju-list-storage-pools | [juju list-storage-pools](/t/10211) | +| 4 | juju-list-subnets | [juju list-subnets](/t/10076) | +| 4 | juju-list-users | [juju list-users](/t/10154) | +| 4 | juju-login | [juju login](/t/10157) | +| 4 | juju-logout | [juju logout](/t/10183) | +| 4 | juju-machines | [juju machines](/t/10078) | +| 4 | juju-metrics | [juju metrics](/t/10143) | +| 4 | juju-migrate | [juju migrate](/t/10121) | +| 4 | juju-model-config | [juju model-config](/t/10096) | +| 4 | juju-model-constraints | [juju model-constraints](/t/10137) | +| 4 | juju-model-default | [juju model-default](/t/10178) | +| 4 | juju-model-defaults | [juju model-defaults](/t/10057) | +| 4 | juju-models | [juju models](/t/10090) | +| 4 | juju-move-to-space | [juju move-to-space](/t/10192) | +| 4 | juju-offer | [juju offer](/t/10080) | +| 4 | juju-offers | [juju offers](/t/10051) | +| 4 | juju-operations | [juju operations](/t/10203) | +| 4 | juju-payloads | [juju payloads](/t/10120) | +| 4 | juju-refresh | [juju refresh](/t/10189) | +| 4 | juju-regions | [juju regions](/t/10112) | +| 4 | juju-register | [juju register](/t/10160) | +| 4 | juju-relate | [juju relate](/t/10140) | +| 4 | juju-reload-spaces | [juju reload-spaces](/t/10063) | +| 4 | juju-remove-application | [juju remove-application](/t/10067) | +| 4 | juju-remove-cloud | [juju remove-cloud](/t/10216) | +| 4 | juju-remove-credential | [juju remove-credential](/t/10201) | +| 4 | juju-remove-k8s | [juju remove-k8s](/t/10098) | +| 4 | juju-remove-machine | [juju remove-machine](/t/10163) | +| 4 | juju-remove-offer | [juju remove-offer](/t/10235) | +| 4 | juju-remove-relation | [juju remove-relation](/t/10110) | +| 4 | juju-remove-saas | [juju remove-saas](/t/10087) | +| 4 | juju-remove-secret | [juju remove-secret](/t/11414) | +| 4 | juju-remove-secret-backend | [juju remove-secret-backend](/t/10194) | +| 4 | juju-remove-space | [juju remove-space](/t/10084) | +| 4 | juju-remove-ssh-key | [juju remove-ssh-key](/t/10119) | +| 4 | juju-remove-storage | [juju remove-storage](/t/10066) | +| 4 | juju-remove-storage-pool | [juju remove-storage-pool](/t/10068) | +| 4 | juju-remove-unit | [juju remove-unit](/t/10125) | +| 4 | juju-remove-user | [juju remove-user](/t/10130) | +| 4 | juju-rename-space | [juju rename-space](/t/10135) | +| 4 | juju-resolve | [juju resolve](/t/10146) | +| 4 | juju-resolved | [juju resolved](/t/10144) | +| 4 | juju-resources | [juju resources](/t/10218) | +| 4 | juju-resume-relation | [juju resume-relation](/t/10123) | +| 4 | juju-retry-provisioning | [juju retry-provisioning](/t/10209) | +| 4 | juju-revoke | [juju revoke](/t/10077) | +| 4 | juju-revoke-cloud | [juju revoke-cloud](/t/10104) | +| 4 | juju-revoke-secret | [juju revoke-secret](/t/11291) | +| 4 | juju-run | [juju run](/t/10052) | +| 4 | juju-scale-application | [juju scale-application](/t/10171) | +| 4 | juju-scp | [juju scp](/t/10128) | +| 4 | juju-secret-backends | [juju secret-backends](/t/10149) | +| 4 | juju-secrets | [juju secrets](/t/10214) | +| 4 | juju-set-application-base | [juju set-application-base](/t/10174) | +| 4 | juju-set-constraints | [juju set-constraints](/t/10210) | +| 4 | juju-set-credential | [juju set-credential](/t/10169) | +| 4 | juju-set-default-credentials | [juju set-default-credentials](/t/10180) | +| 4 | juju-set-default-region | [juju set-default-region](/t/10092) | +| 4 | juju-set-firewall-rule | [juju set-firewall-rule](/t/10151) | +| 4 | juju-set-meter-status | [juju set-meter-status](/t/10166) | +| 4 | juju-set-model-constraints | [juju set-model-constraints](/t/10208) | +| 4 | juju-show-action | [juju show-action](/t/10219) | +| 4 | juju-show-application | [juju show-application](/t/10177) | +| 4 | juju-show-cloud | [juju show-cloud](/t/10215) | +| 4 | juju-show-controller | [juju show-controller](/t/10156) | +| 4 | juju-show-credential | [juju show-credential](/t/10105) | +| 4 | juju-show-credentials | [juju show-credentials](/t/10147) | +| 4 | juju-show-machine | [juju show-machine](/t/10243) | +| 4 | juju-show-model | [juju show-model](/t/10191) | +| 4 | juju-show-offer | [juju show-offer](/t/10168) | +| 4 | juju-show-operation | [juju show-operation](/t/10083) | +| 4 | juju-show-secret | [juju show-secret](/t/10172) | +| 4 | juju-show-secret-backend | [juju show-secret-backend](/t/10059) | +| 4 | juju-show-space | [juju show-space](/t/10095) | +| 4 | juju-show-status-log | [juju show-status-log](/t/10204) | +| 4 | juju-show-storage | [juju show-storage](/t/10184) | +| 4 | juju-show-task | [juju show-task](/t/10129) | +| 4 | juju-show-unit | [juju show-unit](/t/10239) | +| 4 | juju-show-user | [juju show-user](/t/10212) | +| 4 | juju-spaces | [juju spaces](/t/10236) | +| 4 | juju-ssh | [juju ssh](/t/10153) | +| 4 | juju-ssh-keys | [juju ssh-keys](/t/10202) | +| 4 | juju-status | [juju status](/t/10173) | +| 4 | juju-storage | [juju storage](/t/10075) | +| 4 | juju-storage-pools | [juju storage-pools](/t/10228) | +| 4 | juju-subnets | [juju subnets](/t/10186) | +| 4 | juju-suspend-relation | [juju suspend-relation](/t/10179) | +| 4 | juju-switch | [juju switch](/t/10102) | +| 4 | juju-sync-agent-binary | [juju sync-agent-binary](/t/10106) | +| 4 | juju-trust | [juju trust](/t/10088) | +| 4 | juju-unexpose | [juju unexpose](/t/10221) | +| 4 | juju-unregister | [juju unregister](/t/10165) | +| 4 | juju-update-cloud | [juju update-cloud](/t/10081) | +| 4 | juju-update-credential | [juju update-credential](/t/10065) | +| 4 | juju-update-credentials | [juju update-credentials](/t/10231) | +| 4 | juju-update-k8s | [juju update-k8s](/t/10155) | +| 4 | juju-update-public-clouds | [juju update-public-clouds](/t/10115) | +| 4 | juju-update-secret | [juju update-secret](/t/11413) | +| 4 | juju-update-secret-backend | [juju update-secret-backend](/t/10176) | +| 4 | juju-update-storage-pool | [juju update-storage-pool](/t/10217) | +| 4 | juju-upgrade-controller | [juju upgrade-controller](/t/10058) | +| 4 | juju-upgrade-machine | [juju upgrade-machine](/t/10188) | +| 4 | juju-upgrade-model | [juju upgrade-model](/t/10073) | +| 4 | juju-users | [juju users](/t/10175) | +| 4 | juju-wait-for | [juju wait-for](/t/10122) | +| 4 | juju-wait-for-application | [juju wait-for application](/t/11181) | +| 4 | juju-wait-for-machine | [juju wait-for machine](/t/11183) | +| 4 | juju-wait-for-model | [juju wait-for model](/t/11182) | +| 4 | juju-wait-for-unit | [juju wait-for unit](/t/11184) | +| 4 | juju-whoami | [juju whoami](/t/10148) | +| 3 | environment-variables | [`juju` environment variables](/t/1162) | +| 2 | the-juju-dashboard | [`juju-dashboard` (the Juju dashboard)](/t/4898) | +| 2 | the-juju-web-cli | [`juju` web CLI](/t/3723) | +| 2 | jujuc-binary | [`jujuc` (binary)](/t/13130) | +| 2 | jujud-binary | [`jujud` (binary)](/t/7319) | +| 2 | leader | [Leader](/t/5461) | +| 2 | log | [Log](/t/1184) | +| 2 | machine | [Machine](/t/5459) | +| 2 | metric | [Metric](/t/6463) | +| 2 | model | [Model](/t/5456) | +| 2 | offer | [Offer](/t/13132) | +| 2 | operation | [Operation (script execution)](/t/7934) | +| 2 | placement-directive | [Placement directive](/t/6187) | +| 2 | plugins | [Plugin](/t/1145) | +| 3 | list-of-known-plugins | [List of known plugins](/t/5202) | +| 4 | juju-metadata | [juju-metadata](/t/6877) | +| 4 | juju-stash | [juju-stash](/t/1593) | +| 3 | plugin-flags | [Plugin flags](/t/7171) | +| 2 | python-libjuju-client | [`python-libjuju` (Juju client)](/t/13089) | +| 2 | removing-things | [Removing things](/t/1063) | +| 2 | relation | [Relation (integration)](/t/5464) | +| 2 | charm-resource | [Resource (charm)](/t/11312) | +| 2 | scaling | [Scaling](/t/13137) | +| 2 | secret | [Secret](/t/7286) | +| 3 | secret-backend | [Secret backend](/t/8702) | +| 2 | network-spaces | [Space](/t/1157) | +| 2 | ssh-key | [SSH key](/t/7218) | +| 2 | status | [Status](/t/1168) | +| 2 | storage | [Storage](/t/6185) | +| 3 | storage-constraint | [Storage constraint](/t/7180) | +| 3 | storage-pool | [Storage pool](/t/7183) | +| 3 | storage-provider | [Storage provider](/t/7181) | +| 3 | dynamic-storage | [Dynamic storage](/t/7186) | +| 3 | storage-support | [Storage support](/t/154) | +| 2 | subnet | [Subnet](/t/6234) | +| 2 | task | [Task (script execution)](/t/7933) | +| 2 | telemetry-and-juju | [Telemetry](/t/5188) | +| 2 | terraform-juju-client | [`terraform` CLI (Juju client)](/t/13086) | +| 2 | unit | [Unit](/t/5460) | +| 2 | upgrading | [Upgrading things](/t/1199) | +| 2 | user | [User](/t/6186) | +| 3 | user-permissions | [User access levels](/t/6864) | +| 2 | worker | [Worker](/t/11298) | +| 2 | availability-zone | [Zone](/t/6695) | +| 1 | explanation | [Explanation](/t/5358) | +| 2 | application-modelling | [Application modelling](/t/5524) | +| 2 | juju-performance | [Performance with Juju](/t/12246) | +| 2 | security-with-juju | [Security with Juju](/t/15684) | +| 2 | kubernetes-in-juju | [Kubernetes in Juju](/t/13249) | +| | logfile-varlogjujumachine-locklog | [Logfile: /var/log/juju/machine-lock.log](/t/112) | +[/details] + +## Redirects + +[details=Mapping table] +| Location | Path | +|-------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------| +| /docs/juju/manage-relations | /docs/juju/relations | +| /docs/juju/manage-controllers | /docs/juju/controllers | +| /docs/juju/manage-clouds | /docs/juju/clouds | +| /docs/juju/manage-models | /docs/juju/models | +| /docs/juju/the-juju-web-cli | /docs/juju/using-the-juju-web-cli | +| /docs/juju/the-juju-dashboard | /docs/juju/using-the-dashboard | +| /docs/juju/constraints | /docs/juju/about-constraints | +| /docs/juju/user-types-and-abilities | /docs/juju/about-user-types-and-abilities | +| /docs/juju/manage-the-client | /docs/juju/upgrading-client | +| /docs/juju/migrate-a-model | /docs/juju/migrating-models | +| /docs/juju/configure-a-model | /docs/juju/configuring-models | +| /docs/juju/additional-how-to-guides | /docs/juju/other-tutorials | +| /docs/sdk | /t/charm-bundles/1058 | +| /docs/sdk | /t/juju-resources/1074 | +| /docs/sdk | /t/writing-a-kubernetes-v1-charm-updated/3976 | +| /docs/sdk | /t/charm-writing/1260 | +| /docs/sdk | /t/tools/1181 | +| /docs/sdk | /t/hook-tools/1163 | +| /docs/sdk | /t/charm-metadata/1043 | +| /docs/sdk | /t/bundle-reference/1158 | +| /t/advanced-application-deployment | /t/deploying-advanced-applications | +| http://discourse.charmhub.io/ | /docs/juju/community-help | +| http://discourse.charmhub.io/ | /docs/contact-us | +| http://discourse.charmhub.io/ | /docs/juju/community-help | +| /docs/juju/accessing-the-dashboard | /docs/juju/accessing-juju’s-web-interface | +| /docs/juju/basic-concepts | /docs/juju/reference | +| /docs/juju/quick-reference | /docs/juju/reference | +| /docs/juju/glossary | /docs/juju/reference | +| /docs/juju/deploying-applications | /docs/juju/manage-applications/ | +| /t/5458 | /t/1158 | +| /docs/juju/get-started-on-a-localhost | /docs/juju/tutorial | +| /docs/juju/get-started-on-kubernetes | /docs/juju/tutorial | +| /docs/juju/add-a-relation | /docs/juju/manage-same-model-relations | +| /docs/juju/remove-a-relation | /docs/juju/manage-same-model-relations | +| /docs/juju/cross-model-relations | /docs/juju/manage-cross-model-relations | +| /docs/juju/juju-update-clouds | /docs/juju/juju-update-cloud | +| /docs/juju/working-with-multiple-users | /docs/juju/manage-users | +| /docs/juju/applications-and-charmed-operators | /docs/juju/applications | +| /docs/juju/deploy-charms-offline | /docs/juju/manage-applications | +| /docs/juju/juju-sync-agent-binaries | /docs/juju/juju-sync-agent-binary | +| /docs/juju/list-of-available-plugins | /docs/juju/list-of-known-plugins | +| /docs/juju/plugins-wait-for | /docs/juju/juju-wait-for | +| /docs/juju/configure-a-controller#heading--excluding-information-from-the-audit-log | /docs/juju/audit-log-exclude-methods | +| /docs/juju/defining-and-using-persistent-storage | /docs/juju/manage-storage | +| /docs/juju/remove-storage | /docs/juju/manage-storage | +| /docs/juju/juju-show-units | /docs/juju/juju-show-unit | +| /docs/juju/remove-units | /docs/juju/manage-units | +| /docs/juju/working-with-actions | /docs/juju/manage-actions | +| /docs/juju/juju-add-subnet | /docs/juju/add-unit | +| /docs/juju/commands | /docs/juju/juju-cli-commands | +| /docs/juju/juju-add-relation | /docs/juju/juju-cli-commands | +| /docs/juju/juju-add-subnet | /docs/juju/juju-cli-commands | +| /docs/juju/juju-budget | /docs/juju/juju-cli-commands | +| /docs/juju/juju-cached-images | /docs/juju/juju-cli-commands | +| /docs/juju/juju-cancel-action | /docs/juju/juju-cli-commands | +| /docs/juju/juju-create-wallet | /docs/juju/juju-cli-commands | +| /docs/juju/juju-get-constraints | /docs/juju/juju-cli-commands | +| /docs/juju/juju-get-model-constraints | /docs/juju/juju-cli-commands | +| /docs/juju/juju-gui | /docs/juju/juju-cli-commands | +| /docs/juju/juju-hook-tool | /docs/juju/juju-cli-commands | +| /docs/juju/juju-hook-tools | /docs/juju/juju-cli-commands | +| /docs/juju/juju-list-cached-images | /docs/juju/juju-cli-commands | +| /docs/juju/juju-list-plans | /docs/juju/juju-cli-commands | +| /docs/juju/juju-plans | /docs/juju/juju-cli-commands | +| /docs/juju/juju-remove-cached-images | /docs/juju/juju-cli-commands | +| /docs/juju/juju-run-action | /docs/juju/juju-cli-commands | +| /docs/juju/juju-set-plan | /docs/juju/juju-cli-commands | +| /docs/juju/juju-set-series | /docs/juju/juju-cli-commands | +| /docs/juju/juju-set-wallet | /docs/juju/juju-cli-commands | +| /docs/juju/juju-show-action-output | /docs/juju/juju-cli-commands | +| /docs/juju/juju-show-action-status | /docs/juju/juju-cli-commands | +| /docs/juju/juju-show-status | /docs/juju/juju-cli-commands | +| /docs/juju/juju-show-wallet | /docs/juju/juju-cli-commands | +| /docs/juju/juju-sla | /docs/juju/juju-cli-commands | +| /docs/juju/juju-sync-tools | /docs/juju/juju-cli-commands | +| /docs/juju/juju-upgrade-charm | /docs/juju/juju-cli-commands | +| /docs/juju/juju-upgrade-dashboard | /docs/juju/juju-cli-commands | +| /docs/juju/juju-upgrade-gui | /docs/juju/juju-cli-commands | +| /docs/juju/juju-upgrade-series | /docs/juju/juju-cli-commands | +| /docs/juju/juju-wallets | /docs/juju/juju-cli-commands | +| /docs/juju/juju-attach | /docs/juju/juju-cli-commands | +| /docs/juju/juju-charm | /docs/juju/juju-cli-commands | +| /docs/juju/juju-remove-consumed-application | /docs/juju/juju-cli-commands | +| /docs/juju/juju-upgrade-juju | /docs/juju/juju-cli-commands | +| /docs/juju/manage-relations | /docs/juju/manage-integrations | +| /docs/juju/manage-same-model-relations | /docs/juju/manage-same-model-integrations | +| /docs/juju/manage-cross-model-relations | /docs/juju/manage-cross-model-integrations | +| /docs/juju/relations | /docs/juju/integration | +| /docs/juju cross-model-relation | /docs/juju cross-model-integration | +| /docs/juju/install-snaps-offline | /docs/juju/install-and-manage-the-client | +| /docs/juju/manage-the-client | /docs/juju/manage-juju | +| /docs/juju/installing-juju | /docs/juju/install-and-manage-the-client | +| /docs/juju/use-the-client | /docs/juju/use-juju | +| /docs/juju/back-up-the-juju-client | /docs/juju/back-up-juju | +| /docs/juju/upgrade-the-juju-client | /docs/juju/upgrade-juju | +| /docs/juju/accessing-individual-machines-with-ssh | /docs/juju/manage-machines | +| /docs/juju/set-constraints-for-a-machine | /docs/juju/manage-machines | +| /docs/juju/upgrade-a-machines-series | /docs/juju/manage-machines | +| /docs/juju/remove-a-machine | /docs/juju/manage-machines | +| /docs/juju/juju-set-default-credential | /docs/juju/juju-set-default-credentials | +| /docs/juju/upgrade-the-dashboard | /docs/juju/manage-the-juju-dashboard | +| /docs/juju/accessing-the-dashboard | /docs/juju/manage-the-juju-dashboard | +| /docs/juju/restore-a-controller-from-a-backup | /docs/juju/controller-backups | +| /docs/juju/manage-constraints | /docs/juju/constraint | +| /docs/juju/constraints | /docs/juju/constraint | +| /docs/juju/controllers | /docs/juju/controller | +| /docs/juju/credentials | /docs/juju/credential | +| /docs/juju/endpoints | /docs/juju/endpoint | +| /docs/juju/agents | /docs/juju/agent | +| /docs/juju/bundles | /docs/juju/bundle | +| /docs/juju/clouds | /docs/juju/cloud | +| /docs/juju/leaders | /docs/juju/leader | +| /docs/juju/machines | /docs/juju/machine | +| /docs/juju/models | /docs/juju/model | +| /docs/juju/units | /docs/juju/unit | +| /docs/juju/create-a-controller | /docs/juju/manage-controllers | +| /docs/juju/configure-a-controller | /docs/juju/manage-controllers | +| /docs/juju/set-constraints-for-a-controller | /docs/juju/manage-controllers | +| /docs/juju/controller-backups | /docs/juju/manage-controllers | +| /docs/juju/high-availability-juju-controller | /docs/juju/manage-controllers | +| /docs/juju/remove-a-controller | /docs/juju/manage-controllers | +| /docs/juju/applications | /docs/juju/application | +| /docs/juju/add-a-model | /docs/juju/manage-models | +| /docs/juju/get-information-about-a-model | /docs/juju/manage-models | +| /docs/juju/configure-a-model | /docs/juju/manage-models | +| /docs/juju/set-constraints-for-a-model | /docs/juju/manage-models | +| /docs/juju/switch-to-a-different-model | /docs/juju/manage-models | +| /docs/juju/migrate-a-model | /docs/juju/manage-models | +| /docs/juju/upgrade-models | /docs/juju/manage-models | +| /docs/juju/remove-a-model | /docs/juju/manage-models | +| /docs/juju/disabling-commands | /docs/juju/manage-models | +| /docs/juju/monitor-elasticsearch-with-elasticsearch-and-kibana | /docs/juju/how-to | +| /docs/juju/additional-how-to-guides | /docs/juju/how-to | +| /docs/juju/deploy-postgres-on-ubuntu-server | /docs/juju/how-to | +| /docs/juju/deploy-rabbitmq-cluster-on-ubuntu-server | /docs/juju/how-to | +| /docs/juju/get-started-charmed-kubernetes | /docs/juju/how-to | +| /docs/juju/using-gitlab-as-a-container-registry | /docs/juju/how-to | +| /docs/juju/streaming-hadoop-analysis | /docs/juju/how-to | +| /docs/juju/charmed-kubernetes-kata-containers | /docs/juju/how-to | +| /docs/juju/get-started-hadoop-spark | /docs/juju/how-to | +| /docs/juju/kubeapps-on-canonical-kubernetes | /docs/juju/how-to | +| /docs/juju/deploying-storageos-on-kubernetes | /docs/juju/how-to | +| /docs/juju/charmed-osm-get-started | /docs/juju/how-to | +| /docs/juju/lma-light | /docs/juju/how-to | +| /docs/juju/deploy-a-charm-from-charmhub | /docs/juju/manage-applications | +| /docs/juju/deploy-an-application-from-a-local-charm | /docs/juju/manage-applications | +| /docs/juju/deploy-to-a-lxd-container | /docs/juju/manage-applications | +| /docs/juju/deploy-an-application-with-a-specific-series | /docs/juju/manage-applications | +| /docs/juju/deploy-to-a-specific-machine | /docs/juju/manage-applications | +| /docs/juju/deploy-to-a-specific-availability-zone | /docs/juju/manage-applications | +| /docs/juju/deploy-to-a-network-space | /docs/juju/manage-applications | +| /docs/juju/trust-an-application-with-a-credential | /docs/juju/manage-applications | +| /docs/juju/expose-a-deployed-application | /docs/juju/manage-applications | +| /docs/juju/configure-an-application | /docs/juju/manage-applications | +| /docs/juju/set-constraints-for-an-application | /docs/juju/manage-applications | +| /docs/juju/scale-an-application | /docs/juju/manage-applications | +| /docs/juju/upgrade-applications | /docs/juju/manage-applications | +| /docs/juju/remove-an-application | /docs/juju/manage-applications | +| /docs/juju/debug-charm-hooks | [https://juju.is/docs/sdk/debug-a-charm](https://juju.is/docs/sdk/debug-a-charm) | +| /docs/juju/test | /docs/sdk/debug-a-charm | +| /docs/juju/juju-logs | /docs/juju/log | +| /docs/juju/adding-clouds | /docs/juju/manage-clouds | +| /docs/juju/view-the-available-clouds | /docs/juju/manage-clouds | +| /docs/juju/view-the-available-cloud-regions | /docs/juju/manage-clouds | +| /docs/juju/change-the-default-region-for-a-cloud | /docs/juju/manage-clouds | +| /docs/juju/view-detailed-information-about-a-cloud | /docs/juju/manage-clouds | +| /docs/juju/update-the-definition-of-a-cloud | /docs/juju/manage-clouds | +| /docs/juju/remove-a-cloud | /docs/juju/manage-clouds | +| /docs/juju/charmed-operators | /docs/juju/charmed-operator | +| /docs/juju/user-types-and-abilities | /docs/juju/user | +| /docs/juju/add-credentials | /docs/juju/manage-credentials | +| /docs/juju/list-credentials | /docs/juju/manage-credentials | +| /docs/juju/set-the-default-credential-for-a-cloud | /docs/juju/manage-credentials | +| /docs/juju/relate-a-credential-to-a-model | /docs/juju/manage-credentials | +| /docs/juju/query-a-credential-related-to-a-model | /docs/juju/manage-credentials | +| /docs/juju/update-a-credential | /docs/juju/manage-credentials | +| /docs/juju/remove-a-credential | /docs/juju/manage-credentials | +| /docs/juju/log-in-to-a-controller | /docs/juju/manage-users | +| /docs/juju/remove-a-user-from-a-controller | /docs/juju/manage-users | +| /docs/juju/status-values | /docs/juju/status | +| /docs/juju/deployment-of-juju-agents | /docs/juju/deployment | +| /docs/juju/agent-version | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/apt-mirror | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/automatically-retry-hooks | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/cloudinit-userdata | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/container-inherit-properties | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/disable-network-management | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/enable-os-refresh-update | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/enable-os-upgrade | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/firewall-mode | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/image-stream | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/provisioner-harvest-mode | /docs/juju/list-of-model-configuration-keys | +| /docs/juju/amazon-aws | /docs/juju/amazon-ec2 | +| /docs/juju/amazon-elastic-kubernetes-service-(amazon-eks) | /docs/juju/amazon-eks | +| /docs/juju/google-kubernetes-engine-(gke) | /docs/juju/google-gke | +| /docs/juju/azure | /docs/juju/microsoft-azure | +| /docs/juju/azure-kubernetes-service-(azure-aks) | /docs/juju/microsoft-aks | +| /docs/juju/oracle | /docs/juju/oracle-oci | +| /docs/juju/manual-setup | /docs/juju/manual | +| /docs/juju/agent-binary | /docs/juju/jujud-binary | +| /docs/juju/other-clusters | /docs/juju/tutorial | +| /docs/aws-cloud | /docs/juju/amazon-ec2 | +| /docs/juju/tutorials | /docs/juju/tutorial | +| /docs/juju/get-started-with-juju | /docs/juju/tutorial | +| /docs/juju/use-lxd-clustering | /docs/juju/lxd | +| /docs/juju/manage-a-lxd-cloud | /docs/juju/lxd | +| /docs/juju/use-lxd-profiles | [https://juju.is/docs/sdk/lxd-profile-yaml](https://juju.is/docs/sdk/lxd-profile-yaml) | +| /docs/juju/control-application-network-ingress | /docs/juju/manage-applications | +| /docs/juju/manage-same-model-integrations | /docs/juju/manage-integrations | +| /docs/juju/manage-cross-model-integrations | /docs/juju/manage-offers | +| /docs/juju/controller-agent | /docs/juju/agent | +| /docs/juju/cross-model-integration | /docs/juju/relation | +| /docs/juju/integration | /docs/juju/relation | +| /docs/juju/manage-integrations | /docs/juju/manage-relations | +| /docs/juju/cloud-image-metadata | /docs/juju/manage-metadata | +| /docs/juju/install-juju | /docs/juju/install-and-manage-the-client | +| /docs/juju/use-juju | /docs/juju/install-and-manage-the-client | +| /docs/juju/back-up-juju | /docs/juju/install-and-manage-the-client | +| /docs/juju/upgrade-juju | /docs/juju/install-and-manage-the-client | +| /docs/juju/manage-juju | /docs/juju/install-and-manage-the-client | +| /docs/juju/the-juju-client | /docs/juju/juju-client | +| /docs/juju/terraform-cli-client | /docs/juju/terraform-juju-client | +| /docs/juju/manage-charm-bundles | /docs/juju/manage-charms | +| /docs/juju/manage-charms | /docs/juju/manage-charms-or-bundles | +| /docs/juju/upgrade-your-juju-deployment-from-2-9-to-3-x | /docs/juju/upgrade-your-juju-deployment | +| /docs/juju/collecting-juju-metrics | /docs/juju/manage-controllers | +| /docs/juju/juju-security | /docs/juju/harden-your-deployment | +| /docs/juju/troubleshooting | /docs/juju/troubleshoot-your-deployment | +| /docs/juju/troubleshoot-additions | /docs/juju/troubleshoot-your-deployment | +| /docs/juju/troubleshoot-clouds | /docs/juju/troubleshoot-your-deployment | +| /docs/juju/working-offline | /docs/juju/take-your-deployment-offline | +| /docs/juju/use-the-localhost-cloud-offline | /docs/juju/take-your-deployment-offline | +| /docs/juju/configure-juju-for-offline-usage | /docs/juju/take-your-deployment-offline | +| /docs/juju/offline-mode-strategies | /docs/juju/take-your-deployment-offline | +| /docs/juju/troubleshoot-model-upgrades | /docs/juju/troubleshoot-your-deployment | +| /docs/juju/troubleshoot-removals | /docs/juju/troubleshoot-your-deployment | +| /docs/juju/juju-version-compatibility-matrix | /docs/juju/cross-version-compatibility-in-juju | +| /docs/juju/agent-introspection-juju-leases | /docs/juju/agent-introspection | +| /docs/juju/agent-introspection-juju-revoke-lease | /docs/juju/agent-introspection | +[/details] + +------------------------- + +nottrobin | 2019-02-20 12:31:18 UTC | #2 + + + +------------------------- + +timClicks | 2019-07-02 20:59:59 UTC | #7 + +Question for @anthonydillon, @nottrobin & @pmatulis - would changing the title of this post impact negatively downstream? If it's being pinned globally, I believe it should be inviting. (Also, why is it being pinned globally?) + +"Juju documentation" makes it hard to know whether it's worth clicking on. If it's changed to "Need help? Links to Juju's documentation". + +The introduction should also be more welcoming, in my opinion. It reads to me as "you've come to the wrong place": + +[quote="system, post:1, topic:1087"] +Most of this documentation can be discussed and changed on a per-page basis in the Juju forum. +See the [Documentation guidelines ](/t/documentation-guidelines) if you’d like to contribute. +[/quote] + +------------------------- + +nottrobin | 2019-07-04 07:27:23 UTC | #8 + +Yes, you can happily change the title, that won't have any impact on the system. + +The thinking behind it being pinned is that it provides effectively the navigation table for the documentation system. But I don't think it needs to be pinned globally, that seems like a mistake. + +But I consider it very much your call whether it's the right decision to pin it at all. You can happily unpin it, again without having any impact on the documentation system. + +The content above the "Navigation" title is also currently not used by the docs site, so you can change it to whatever you like for the time being. However, in other documentation sets - docs.snapcraft.io and the soon-to-be created maas.io/docs, the content above the "Navigation" title in that topic forms the content of the docs homepage, so we might want to move this way in Juju too before too long. + +Does that all make sense? + +------------------------- + +timClicks | 2019-07-05 02:14:00 UTC | #9 + +Thanks for the response Robin. I'll pin this thread to the #docs category, rather than globally. That way it can still be useful as an index for people writing documentation. + +------------------------- + +szeestraten | 2019-11-15 07:08:41 UTC | #12 + +Heads up, the following pages under the **Reference** section are missing actual links: +* Juju commands +* Environment variables + +------------------------- + +zicklag | 2020-01-27 19:03:03 UTC | #14 + +FYI, I very much like this page pinned to the Docs category. Helps me a lot. :) + +------------------------- + +doismellburning | 2020-07-14 10:44:37 UTC | #15 + +[quote="system, post:1, topic:1087"] +Getting started on Kubernetes +[/quote] + +This currently links to https://discourse.charmhub.io/t/getting-started-with-juju/1970 - the same target as "Getting started on traditional infrastructure" which is highly confusing until you notice it. + +As a minimum it should probably link to https://discourse.charmhub.io/t/using-kubernetes-with-juju/1090 and be moved lower (because that document references https://discourse.charmhub.io/t/getting-started-with-juju/1970 as a prerequisite) + +------------------------- + +timClicks | 2020-07-21 02:27:42 UTC | #16 + +Thanks for raising this @doismellburning. I will get this addressed very shortly :slight_smile: + +Edit: updated + +------------------------- + +gomboli | 2020-07-21 08:55:29 UTC | #17 + +I found these 404 broken links: +* https://juju.is/docs/kubernetes/azure-aks +* https://juju.is/docs/kubernetes/amazon-eks +* https://juju.is/docs/kubernetes/google-gke +@timClicks + +------------------------- + +timClicks | 2020-07-30 03:16:48 UTC | #18 + +That's strange.. I must have edited the index incorrectly. I will take a look now. + +Edit: the problem was that I had edited the titles of those URLs. Discourse is not sensitive to these changes (it only cares about the post ID). + +------------------------- + +romaincout | 2021-06-17 15:31:44 UTC | #19 + +relate link is broken +(4. Relate applications:) +I believe it should point to https://discourse.charmhub.io/t/managing-relations/1073 + +------------------------- + +pedroleaoc | 2022-04-07 09:24:57 UTC | #20 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:28:01 UTC | #21 + + + +------------------------- + +ismailkayi | 2024-07-10 11:18:53 UTC | #22 + +Hello all, should we post our improvement recommendations here or should the GitHub page be used? I believe the initial drawing is a bit confusing, it seems like there is a flow starting from the left (user) to the right (application) which is not the case. I would suggest a new drawing if it is possible? + +https://discourse-charmhub-io.s3.eu-west-2.amazonaws.com/original/2X/8/8d7419018f04c4106df07b19fea05e052934066c.jpeg + +------------------------- + +tmihoc | 2024-07-10 11:31:58 UTC | #23 + +@ismailkayi Either here or on https://github.com/juju/docs/issues is fine. + +What about this picture do you think is wrong? + +------------------------- + +ismailkayi | 2024-07-10 11:38:09 UTC | #24 + +Hİ @tmihoc , thanks for the reply. It is not wrong but I think it is a bit confusing. When I first look at it from a newbie perspective, it is like flow starting with the user and will trigger juju to connect Charmhub and then CharmSDK and ends up with Application. I believe we can segregate Juju and Charm creation parts more obviously. + +------------------------- + +ismailkayi | 2024-07-10 11:52:57 UTC | #25 + +Just a quick example of what I meant. I am not saying this one is better btw :) but this kind of placement and 2 way arrows might be a bit better for all unexperienced users of Juju. Just an idea. + + +https://discourse-charmhub-io.s3.eu-west-2.amazonaws.com/original/2X/7/7a96fdecba28aa84691a7eccf337615a2296d3d5.png + +![image|690x362](upload://5WXSXe2XEKRyzmKmpfD0DHGP0b4.png) + +------------------------- + +tmihoc | 2024-07-10 18:17:40 UTC | #26 + +Thank you for your suggestion! + +Regarding 2-way arrows: We deliberately chose not to use them. 1-way arrows following a data or a dependency flow result in much easier to understand stories. + +Regarding the arrow from Juju to the Cloud Environment -- I'm not sure I understand the suggestion. We try to make sure that going from one entity to another you get a full sentence, but "Juju deploy and manage cloud cloud environment" doesn't do that (compare with "Juju manages charms from Charmhub"). + +Regarding the fact that the bigger diagram unites the Juju and charm SDK story, with a direct line from User to Application: In a sense that is exactly the point we want to make, that with Juju you the user can manage applications. A user typing `juju deploy mattermost-k8s` in their CLI will in fact cause Juju to connect to Charmhub and get the `mattermost-k8s` charm, and that charm lands there through the charm SDK after being released there by charm authors who spent time charming the application. Should we segregate the stories? We do currently but we are also considering bringing them closer together, so this diagram would be a step in that direction. + +That said, I am always looking to improve things further (e.g., I can see how the arrows from Charmhub to Charm Developer make some leaps...). Happy to discuss further! + +------------------------- + diff --git a/tmp/t/1088.md b/tmp/t/1088.md new file mode 100644 index 000000000..3787609ac --- /dev/null +++ b/tmp/t/1088.md @@ -0,0 +1,83 @@ +system | 2024-05-29 12:36:18 UTC | #1 + + + +> [List of supported clouds](/t/6665) > Google GCE + +This document describes details specific to using your existing Google GCE cloud with Juju. + +> See more: [Google GCE](https://cloud.google.com/compute) + +When using the Google GCE cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +|Juju points of variation|Notes for the Google GCE cloud| +|---|---| +|**setup (chronological order):**|| +|[CLOUD](/t/5454)|| +|supported versions:|| +|requirements:|Permissions: Service Account Key Admin, Compute Instance Admin, and Compute Security Admin.
See more: [Google \| Compute Engine IAM roles and permissions](https://cloud.google.com/compute/docs/access/iam).| +|[definition:](/t/5454#heading--cloud-definition)|:information_source: Juju automatically defines a cloud of this type.| +|- name:|`google` or user-defined| +|- type:|`gce`| +|- authentication types:|`[oauth2, jsonfile`]| +|- regions:|[TO BE ADDED]| +|- cloud-specific model configuration keys:|**`base-image-path`** (string)
Sets the base path to look for machine disk images.| +|[CREDENTIAL](/t/6006)|| +|definition:|`auth-type`: `jsonfile` or `oauth2`
> See more: [Google \| Authenticate to Compute Engine](https://cloud.google.com/compute/docs/authentication), [Google \| Create and delete service account keys](https://cloud.google.com/iam/docs/keys-create-delete#iam-service-account-keys-create-gcloud)

**If you want to use environment variables:**

- `CLOUDSDK_COMPUTE_REGION`

- `GOOGLE_APPLICATION_CREDENTIALS=`| +|[CONTROLLER](/t/5455)|| +|notes on bootstrap:|--| +||| +||| +|**other (alphabetical order:)**|| +| [CONSTRAINT](/t/6184)|| +|conflicting:|`[instance-type]` vs. `[arch, cores, cpu-power, mem]`| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip)|:white_check_mark:| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:| +|- [`container`](/t/6184#heading--container)|:white_check_mark:| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|:white_check_mark:| +|- [`image-id`](/t/6184#heading--image-id)|❌ | +|- [`instance-role`](/t/6184#heading--instance-role)|❌| +|- [`instance-type`](/t/6184#heading--instance-type)|:white_check_mark:| +|- [`mem`](/t/6184#heading--mem)|:white_check_mark:| +|- [`root-disk`](/t/6184#heading--root-disk)|:white_check_mark:| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|❌| +|- [`spaces`](/t/6184#heading--spaces)|❌| +|- [`tags`](/t/6184#heading--tags)|❌| +|- [`virt-type`](/t/6184#heading--virt-type)|❌| +|- [`zones`](/t/6184#heading--zones)|:white_check_mark:| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)|TBA| +|[`subnet=...`](/t/6187#heading--subnet)|❌ | +|[`system-id=...`](/t/6187#heading--system-id)|❌| +|[`zone=...`](/t/6187#heading--zone)|:white_check_mark:| +|[MACHINE](/t/5459)|| +|[RESOURCE (cloud)](/t/1102)

Consistent naming, tagging, and the ability to add user-controlled tags to created instances.|❌| + +------------------------- + +pedroleaoc | 2021-06-08 18:06:33 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:58 UTC | #3 + + + +------------------------- + +nuccitheboss | 2023-08-15 14:53:03 UTC | #4 + +Hello hello Juju team - quick question for yinz. I was wondering what happened to the HOWTO documentation for setting up GCP with Juju? I noticed that this page has been recently updated to remove the set up information, so I have been going into the revision history to find the commands that I am looking for. + +Could you please point me to where the GCP set up instructions has moved to in the Juju documentation? Thank you! + +------------------------- + diff --git a/tmp/t/1093.md b/tmp/t/1093.md new file mode 100644 index 000000000..447850316 --- /dev/null +++ b/tmp/t/1093.md @@ -0,0 +1,270 @@ +system | 2024-07-29 11:40:43 UTC | #1 + + + +> [List of supported clouds](/t/6665) > LXD + + + +This document describes details specific to using your existing LXD cloud with Juju. + +---- +[details=Expand to view how to get a LXD cloud quickly on Ubuntu] + +Your Ubuntu likely comes with LXD preinstalled. Configure it as below. Juju will then recognize it as the `localhost` cloud. + +```text +lxd init --auto +lxc network set lxdbr0 ipv6.address none +``` +[/details] +--- +> See more: [LXD](https://documentation.ubuntu.com/lxd/en/latest/) + +When using the LXD cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +--- +[details=Expand to view some reasons to use a LXD cloud] +The LXD cloud, especially when used locally, is great for:

- creating a repeatable deployment: Juju enables you to quickly iterate to construct the optimal deployment for your situation, then distribute that across your team

-- local development: Juju's localhost cloud can mirror the production ops environment (without incurring the costs involved with duplicating it)

- learning Juju: LXD is a lightweight tool for exploring Juju and how it operates

- rapid prototyping: LXD is great for when you're creating a new charm and want to be able to quickly provision capacity and tear it down +[/details] + +---- +[details=Expand to find out why Docker wouldn't work] +Juju expects to see an operating system-like environment, so a LXD system container fits the bill. Docker containers are laid out for a singular application process, with a self-contained filesystem rather than a base userspace image. +[/details] +----- + + + +|Juju points of variation|Notes for the LXD cloud| +|---|---| +|**setup (chronological order):**|| +|[CLOUD](/t/5454)|| +|supported versions:|Juju `2.9.x`: LXD `5.0`

Juju `3.x.x`: LXD `5.x`| +|requirements:|TBA| +|[definition:](/t/5454#heading--cloud-definition)|:information_source: Juju automatically defines a cloud of this type.| +|- name:|`localhost` or user-defined| +|- type:|`lxd`| +|- authentication types:|`[certificate, interactive]`| +|- regions:|[TO BE ADDED]| +|- cloud-specific model configuration keys:|**project** (string)
The LXD project name to use for Juju's resources.| +|[CREDENTIAL](/t/6006)|| +|definition:|**local LXD cloud:** If you are a Juju admin user: Already known to Juju. Run `juju bootstrap`, then `juju credentials` to confirm. (Pre-defined credential name in Juju: `localhost`.) Otherwise: Add manually as you would a remote.

**clustered LXD cloud**: In Juju, this counts as a remote cloud. You must add its definition to Juju explicitly.

**remote LXD cloud:** Requires the API endpoint URL for the remote LXD server.
> See more: [LXD \| How to add remote servers](https://documentation.ubuntu.com/lxd/en/latest/remotes/)

**If you want to use a YAML file:**

(Pro tip: If you define a trust password, you can just use a `trust-password` key, and that will retrieve the certificates for you.)

`credentials:`
 `:`
   `:`
    `auth-type: certificate`
    `client-key: \|`
     `-----BEGIN RSA PRIVATE KEY---`
     ``
     `-----END RSA PRIVATE KEY-----`
    `client-cert: \|`
     `-----BEGIN CERTIFICATE-----`
     ``
     `-----END CERTIFICATE-------`
    `server-cert: \|`
     `-----BEGIN CERTIFICATE-----`
     ``
     `-----END CERTIFICATE-------` | +|[CONTROLLER](/t/5455)|| +|notes on bootstrap:|--| +||| +||| +|**other (alphabetical order:)**|| +| [CONSTRAINT](/t/6184)|With LXD system containers, constraints are interpreted as resource *maximums* (as opposed to *minimums*).

There is a 1:1 correspondence between a Juju machine and a LXD container. Compare `juju machines` and `lxc list`.| +|conflicting:|TBA| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip)|❌| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:
Valid values: `[host arch]`.| +|- [`container`](/t/6184#heading--container)|❌| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|❌| +|- [`image-id`](/t/6184#heading--image-id)|❌ | +|- [`instance-role`](/t/6184#heading--instance-role)|❌| +|- [`instance-type`](/t/6184#heading--instance-type)|| +|- [`mem`](/t/6184#heading--mem)|The maximum amount of memory that a machine/container will have.| +|- [`root-disk`](/t/6184#heading--root-disk)|| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|:white_check_mark:
`root-disk-source` is the LXD storage pool for the root disk. The default LXD storage pool is used if root-disk-source is not specified.| +|- [`spaces`](/t/6184#heading--spaces)|❌| +|- [`tags`](/t/6184#heading--tags)|❌| +|- [`virt-type`](/t/6184#heading--virt-type)|❌| +|- [`zones`](/t/6184#heading--zones)|❌| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)|TBA| +|[`subnet=...`](/t/6187#heading--subnet)|❌ | +|[`system-id=...`](/t/6187#heading--system-id)|❌| +|[`zone=...`](/t/6187#heading--zone)|:white_check_mark:
If there's no '=' delimiter, assume it's a node name.| +|[MACHINE](/t/5459)|| +|[RESOURCE (cloud)](/t/1102)

Consistent naming, tagging, and the ability to add user-controlled tags to created instances.|❌ | + + + +## Other notes + + + +

Simple bootstrap of a remote LXD server

+ +From Juju 2.9.5, the easiest method for bootstrapping a remote LXD server is to add the remote to your local LXC config then bootstrap with `juju`. + +On the remote server: +```bash +# ensure the LXD daemon is listening on an accessible IP +lxc config set core.https_address '[::]' +# give the LXD daemon a trust password so the client can register credentials +lxc config set core.trust_password mytrustpassword +``` + +On the bootstrapping client: +```bash +# add the remote LXD server to the local LXC config +lxc remote add myremote 11.22.33.44 --password mytrustpassword +# bootstrap juju using the remote name in LXC +juju bootstrap myremote +``` + +[note]The bootstrapping client must be able to reach the remote LXD containers. This may require the setup of a bridge device with the hosts ethernet device.[/note] + + + +

Non-admin user credentials

+ + +See [Credentials](/t/credentials/1112) for more details on how Juju credentials are used to share a bootstrapped controller. + +To share a LXD server with other users on the same machine or remotely, the best method is to use LXC remotes. See [Simple bootstrap of a remote LXD server](https://discourse.charmhub.io/t/lxd/1093#heading--simple-bootstrap-of-a-remote-lxd-server) above. + +

Add resilience via LXD clustering

+ + +LXD clustering provides the ability for applications to be deployed in a high-availability manner. In a clustered LXD cloud, Juju will deploy units across its nodes. For more, see [Using LXD clustering with Juju](https://discourse.charmhub.io/t/using-lxd-clustering-with-juju/1091). + +

Use LXD profiles from a charm

+ + +LXD Profiles allows the definition of a configuration that can be applied to any instance. Juju can apply those profiles during the creation or modification of a LXD container. For more, see [Using LXD profiles with Juju](https://discourse.charmhub.io/t/using-lxd-profiles-with-juju/4453). + +

LXD images

+ + +LXD is image based: All LXD containers come from images and any LXD daemon instance (also called a "remote") can serve images. When LXD is installed a locally-running remote is provided (Unix domain socket) and the client is configured to talk to it (named 'local'). The client is also configured to talk to several other, non-local, ones (named 'ubuntu', 'ubuntu-daily', and 'images'). + +An image is identified by its fingerprint (SHA-256 hash), and can be tagged with multiple aliases. + +For any image-related command, an image is specified by its alias or by its fingerprint. Both are shown in image lists. An image's *filename* is its *full* fingerprint, while an image *list* displays its *partial* fingerprint. Either type of fingerprint can be used to refer to images. + +Juju pulls official cloud images from the 'ubuntu' remote (http://cloud-images.ubuntu.com) and creates the necessary alias. Any subsequent requests will be satisfied by the LXD cache (`/var/lib/lxd/images`). + +Image cache expiration and image synchronization mechanisms are built-in. + +
+ +**Contributors: @barryprice , @danieleprocida , @hpidcock, @jameinel , @pedroleaoc , @pmatulis , @timClicks , @tmihoc** + +------------------------- + +erik-lonroth | 2021-05-24 18:11:13 UTC | #2 + +[quote="system, post:1, topic:1093"] +On the LXD host generate the certificate with the `autoload-credentials` command. Use the below sample session as a guide: +[/quote] + +This information is misleading since it is phrased that this command comes from the LXD host, which is not the case. + +I would like to see link to a clear instruction on how to generate those LXD server side certificates and how to transfer it over to the juju client. + +The command is: "juju autoload-credentials" and now-a-days with juju2.9, the output from that command is not like the one presented... + +I'm not confident enough yet to give the full description on how to achieve it or I would have done it already. @pedroleaoc + +------------------------- + +pedroleaoc | 2021-05-25 13:52:46 UTC | #3 + +Thanks, Erik. I will add this to the list of documents that require reviewing and ask someone from the team to check this info. + +------------------------- + +simonrichardson | 2021-05-28 14:28:35 UTC | #4 + +@erik-lonroth is the confusion between host and admin here? + +------------------------- + +pedroleaoc | 2021-06-08 18:06:19 UTC | #5 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:52 UTC | #6 + + + +------------------------- + +txiao | 2023-03-23 13:44:35 UTC | #7 + +Configure networking section was removed in a previous revision, but its (broken) [link](https://discourse.charmhub.io/t/how-to-use-lxd-with-juju/1093#heading--configure-networking) is still in the table of content. + +------------------------- + +jameinel | 2023-03-30 18:27:48 UTC | #8 + +removed, thanks for letting us know + +------------------------- + +seth-arnold | 2023-05-02 12:57:07 UTC | #9 + +These instructions look suspect: + +``` +newgrp lxd +sudo adduser $USER lxd +``` + +The first command is likely to fail if the second hasn't already been run. + +This is actually good, as the first command would also set the *primary* group to `lxd` for the spawned process, which will infect files that the user creates in that shell and all descendant processes. `newgrp -` is better. + +Thanks + +------------------------- + +dparv | 2023-08-09 06:00:21 UTC | #10 + +Can you please change the green :negative_squared_cross_mark: with a red one? It's quite confusing for people seeing green color on both symbols: :negative_squared_cross_mark: and :white_check_mark: Also on the docs here [juju.is](https://juju.is/docs/juju/lxd#heading--use-lxd-profiles-from-a-charm) + +------------------------- + +barryprice | 2024-07-18 02:25:54 UTC | #11 + +I think there's a bug in the generated page here, specifically the YAML example for certificate-based credentials. + +Above, and on [the live page](https://juju.is/docs/juju/lxd), you have: + +```yaml +credentials: + : + : + auth-type: certificate + client-key: + -----BEGIN RSA PRIVATE KEY--- + + -----END RSA PRIVATE KEY----- + `client-cert: +``` + +It looks as if there ought to be more after that line, perhaps caused by the stray backtick - probably a placeholder for the certificate data, and perhaps a `server-cert:` section as well? + +I'm trying to set up a credential for an LXD cluster using certificates (without a `trust-password` key), and getting parsing errors from the example given: + +`ERROR parsing credentials file: credentials.lxd-microcloud. lxd-microcloud: expected map, got nothing` + +------------------------- + +tmihoc | 2024-07-19 09:08:19 UTC | #12 + +It was a rendering issue. Thanks for noticing it and taking the time to point it out to us. I've fixed it now and also added you to the list of contributors on the bottom of the doc. Thanks again! + +------------------------- + +jameinel | 2024-07-29 11:41:40 UTC | #13 + +BTW, since we pointed out `client-cert:` it was also clear that it was missing the YAML `|` to indicate that what followed was a multi line (ignore surrounding whitespace) string. I believe I fixed it correctly. + +------------------------- + diff --git a/tmp/t/1094.md b/tmp/t/1094.md new file mode 100644 index 000000000..573ff108c --- /dev/null +++ b/tmp/t/1094.md @@ -0,0 +1,124 @@ +system | 2024-04-25 11:59:22 UTC | #1 + + + +> [List of supported clouds](/t/6665) > MAAS + +This document describes details specific to using your existing MAAS cloud with Juju. + +> See more: [MAAS](https://maas.io/) + +When using the MAAS cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +|Juju points of variation|Notes for the MAAS cloud| +|---|---| +|**setup (chronological order):**|| +|[CLOUD](/t/5454)|| +|supported versions:|Starting with `juju v.3.0`, versions of MAAS <2 are no longer supported.| +|requirements:|TBA| +|[definition:](/t/5454#heading--cloud-definition)|| +|- name:|user-defined| +|- type:|`maas`| +|- authentication types:|`[oauth1]`| +|- regions:|[TO BE ADDED]| +|- cloud-specific model configuration keys:|-| +|[CREDENTIAL](/t/6006)|| +|definition:|`auth-type`: `oauth1`, which requires you to provide your `maas-oauth`, i.e., your MAAS API key.
See more: [`MAAS` \| How to add an API key for a user](https://maas.io/docs/how-to-manage-user-accounts#heading--api-key)
| +|[CONTROLLER](/t/5455)|| +|notes on bootstrap:|--| +||| +||| +|**other (alphabetical order:)**|| +| [CONSTRAINT](/t/6184)|| +|conflicting:|TBA| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip)|❌| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:
Valid values: See cloud provider.| +|- [`container`](/t/6184#heading--container)|:white_check_mark:| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|❌| +|- [`image-id`](/t/6184#heading--image-id)|:white_check_mark: (Starting with Juju 3.2)
Type: String.
Valid values: An image name from MAAS.| +|- [`instance-role`](/t/6184#heading--instance-role)|❌ | +|- [`instance-type`](/t/6184#heading--instance-type)|❌| +|- [`mem`](/t/6184#heading--mem)|:white_check_mark:| +|- [`root-disk`](/t/6184#heading--root-disk)|:white_check_mark:| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|❌| +|- [`spaces`](/t/6184#heading--spaces)|:white_check_mark:| +|- [`tags`](/t/6184#heading--tags)|:white_check_mark:| +|- [`virt-type`](/t/6184#heading--virt-type)|❌| +|- [`zones`](/t/6184#heading--zones)|:white_check_mark:| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)|TBA| +|[`subnet=...`](/t/6187#heading--subnet)|❌ | +|[`system-id=...`](/t/6187#heading--system-id)|:white_check_mark:| +|[`zone=...`](/t/6187#heading--zone)|:white_check_mark:
If there's no '=' delimiter, assume it's a node name.| +|[MACHINE](/t/5459)|| +|[RESOURCE (cloud)](/t/1102)

Consistent naming, tagging, and the ability to add user-controlled tags to created instances.|❌ | + +
+ +> **Contributors:** @anthonydillon, @jadonn, @pedroleaoc, @pmatulis, @sparkiegeek, @timclicks, @tmihoc, @toaksoy, @wallyworld + +------------------------- + +dasm | 2021-02-26 16:35:55 UTC | #2 + +I believe `--local` is outdated +``` +--local (= false) + DEPRECATED (use --client): Local operation only; controller not affected +``` + +Can it be updated? +Thank you. + +------------------------- + +pedroleaoc | 2022-04-07 09:25:13 UTC | #3 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:22 UTC | #4 + + + +------------------------- + +addyess | 2023-09-26 21:02:56 UTC | #5 + +consider adding an example to this doc: + +``` +clouds: + this-maas: + type: maas + auth-types: [oauth1] + endpoint: http://10.10.0.16:5240/MAAS/api/2.0 +``` + +------------------------- + +jadonn | 2024-01-18 21:02:16 UTC | #6 + +I updated the example YAML config for the clouds configuration to use correctly formatted YAML. I had problems myself when trying to use the old example in the documentation. + +------------------------- + +tmihoc | 2024-01-18 21:53:47 UTC | #7 + +@jadonn Thanks! PS Adding you to the Contributors to this doc (bottom of the doc). + +------------------------- + +jadonn | 2024-01-19 13:07:49 UTC | #8 + +It's my pleasure @tmihoc ! Thank you for adding me as a contributor! + +------------------------- + diff --git a/tmp/t/1095.md b/tmp/t/1095.md new file mode 100644 index 000000000..813d9467a --- /dev/null +++ b/tmp/t/1095.md @@ -0,0 +1,138 @@ +system | 2024-02-28 09:29:18 UTC | #1 + + + +> [List of supported clouds](/t/6665) > Manual + +This document describes details specific to using the Manual (`manual`) cloud with Juju. + +[note type=information] +The Manual (`manual`) cloud is a cloud you create with Juju from existing machines. + +The purpose of the Manual cloud is to cater to the situation where you have machines (of any nature) at your disposal and you want to create a backing cloud out of them. + +If this collection of machines is composed solely of bare metal you might opt for a [MAAS cloud](/t/using-maas-with-juju/1094). However, recall that such machines would also require [IPMI hardware](https://docs.maas.io/en/nodes-power-types) and a MAAS infrastructure. In contrast, the Manual cloud can make use of a collection of disparate hardware as well as of machines of varying natures (bare metal or virtual), all without any extra overhead/infrastructure. +[/note] + + +When using the Manual cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +|Juju points of variation|Notes for the Manual cloud| +|---|---| +|**setup (chronological order):**|| +|[CLOUD](/t/5454)|| +|supported versions:| N/A| +|requirements:|- At least two pre-existing machines (one for the controller and one where charms will be deployed).
- The machines must be running on Ubuntu.
- The machines must be accessible over SSH from the terminal you're running the Juju client from using public key authentication (in whichever way you want to make that possible using generic Linux mechanisms).

(`sudo` rights will suffice if this provides root access. If a password is required for `sudo`, juju will ask for it on the command line.)

- The machines must be able to `ping` one another.| +|[definition:](/t/5454#heading--cloud-definition)|You will need to supply a name you wish to call your cloud and the ssh connection string for the controller, the username@, or the .| +|- name:|user-defined| +|- type:|`manual`| +|- authentication types:|No preset auth-types. Just make sure you can SSH into the controller machine.| +|- regions:|[TO BE ADDED]| +|- cloud-specific model configuration keys:|N/A| +|[CREDENTIAL](/t/6006)|| +|definition:|Credentials should already have been set up via SSH. Nothing to do!| +|[CONTROLLER](/t/5455)|| +|notes on bootstrap:|The machine that will be allocated to run the controller on is the one specified during the `add-cloud` step.

**If you encounter an error of the form "initializing ubuntu user: subprocess encountered error code 255 (ubuntu@{IP}: Permission denied (publickey).)":**
Edit your `~/.ssh/config` to include the following:
`Host `     
    `IdentityFile ~/.ssh/id_ed25519`
     `ControlMaster no`
See more: https://bugs.launchpad.net/juju/+bug/2030507 | +||| +||| +|**other (alphabetical order:)**|| +| [CONSTRAINT](/t/6184)|| +|conflicting:|| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip)|❌| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:
Valid values: For controller: `[host arch]`. For other machines: arch from machine hardware.| +|- [`container`](/t/6184#heading--container)|:white_check_mark:| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|❌| +|- [`image-id`](/t/6184#heading--image-id)|❌ | +|- [`instance-role`](/t/6184#heading--instance-role)|❌| +|- [`instance-type`](/t/6184#heading--instance-type)|❌| +|- [`mem`](/t/6184#heading--mem)|:white_check_mark:| +|- [`root-disk`](/t/6184#heading--root-disk)|:white_check_mark:| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|❌| +|- [`spaces`](/t/6184#heading--spaces)|❌| +|- [`tags`](/t/6184#heading--tags)|❌| +|- [`virt-type`](/t/6184#heading--virt-type)|❌| +|- [`zones`](/t/6184#heading--zones)|:white_check_mark:| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)|TBA| +|[`subnet=...`](/t/6187#heading--subnet)|❌ | +|[`system-id=...`](/t/6187#heading--system-id)|❌| +|[`zone=...`](/t/6187#heading--zone)|TBA| +|[MACHINE](/t/5459)|With any other cloud, the Juju client can trigger the creation of a backing machine (e.g. a cloud instance) as they become necessary. In addition, the client can also cause charmed operators to be deployed automatically onto those newly-created machines. However, with a Manual cloud the machines must pre-exist and they must also be specifically targeted during charmed operator deployment.

(Note: A MAAS cloud must also have pre-existing backing machines. However, Juju, by default, can deploy charmed operators onto those machines, or add a machine to its pool of managed machines, without any extra effort.)

Machines must be added manually, unless they are LXD. Example:

`juju add-machine ssh:bob@10.55.60.93`
`juju add-machine lxd -n 2`

Further notes:
- Juju machines are always managed on a per-model basis. With a Manual cloud the `add-machine` process will need to be repeated if the model hosting those machines is destroyed.
- To improve the performance of provisioning newly-added machines consider running an APT proxy or an APT mirror. See more: [Offline mode strategies](/t/offline-mode-strategies/1071). | +|[RESOURCE (cloud)](/t/1102)

Consistent naming, tagging, and the ability to add user-controlled tags to created instances.| N/A | + +> Contributors: @swalladge , @whershberger, @hmlanigan + +------------------------- + +pedroleaoc | 2021-06-08 18:06:29 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:59 UTC | #3 + + + +------------------------- + +swalladge | 2023-08-16 01:35:46 UTC | #4 + +[quote="system, post:1, topic:1095"] +:negative_squared_cross_mark: +[/quote] + +For the tables of constraints and placement directives, could we use emojis that are less similar for supported and not supported? Maybe ❌ for not supported for example? + +Currently the table is difficult to quickly scan, because at first glance they all appear to be green squares. + +------------------------- + +whershberger | 2023-12-06 20:48:20 UTC | #5 + +Hi! + +The "requirements" section is unclear about which ssh key needs to be authorized on the machines in the cloud. I initially took "contactable over SSH with public key authentication" to mean "accessible with a key located in the standard `~/.ssh/` dir"; It looks like it actually means "accessible over SSH with `~/.local/share/juju/ssh/juju_id_rsa.pub`". + +------------------------- + +tmihoc | 2023-12-07 10:39:50 UTC | #6 + +Updated the doc, thanks! PS Also added you as a contributor to this doc -- thanks again for your contribution! + +------------------------- + +tmihoc | 2023-12-14 11:05:58 UTC | #7 + +Having looked into this a little bit more with @hmlanigan , I've reverted the change in the docs. As @hmlanigan clarified, `~/.local/share/juju/ssh/juju_id_rsa.pub` is used by Juju automatically for Juju machines, *after provisioning*. + +As a general rule, if as a Linux user + +- you are able to do `ssh ubuntu@x.x.x.x`, then you don't have to specify a key. + +- you have to do `ssh -i ubuntu@x.x.x.x`, then you'll have to provide a key. You can do so with the `--private-key` flag of the `juju add-machine` command. + +------------------------- + +whershberger | 2023-12-13 22:39:28 UTC | #8 + +You're right; I spun a reproducer this afternoon and checked `auth.log` after bootstrapping the manual cloud. The key signatures are the ones I expect, not the ones from `~/.local/share/juju`. + +That said, I'm still not able to bootstrap a manual cloud based on the information on this page. I opened a [new topic](https://discourse.charmhub.io/t/bootstrap-manual-cloud-fails/12812) with more details. + +I think it would still be good to call out public-key authentication in the doc here, passwords won't work: "...must be accessible over SSH from the terminal you’re running the Juju client from using public key authentication" + +------------------------- + +tmihoc | 2023-12-14 11:04:12 UTC | #9 + +@whershberger Thanks for continuing to try and make it work. I'll keep an eye on the other issue and update the docs accordingly, when I know how. + +------------------------- + diff --git a/tmp/t/1096.md b/tmp/t/1096.md new file mode 100644 index 000000000..353ce77d9 --- /dev/null +++ b/tmp/t/1096.md @@ -0,0 +1,73 @@ +system | 2024-07-18 21:10:41 UTC | #1 + + + +> [List of supported clouds](/t/6665) > Oracle OCI + +This document describes details specific to using your existing Oracle OCI cloud with Juju. + +> See more: [Oracle OCI](https://docs.oracle.com/en-us/iaas/Content/home.htm) + +When using the Oracle OCI cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +|Juju points of variation|Notes for the Oracle OCI cloud| +|---|---| +|**setup (chronological order):**|| +|[CLOUD](/t/5454)|| +|supported versions:|| +|requirements:|| +|[definition:](/t/5454#heading--cloud-definition)|:information_source: Juju automatically defines a cloud of this type.| +|- name:|`oracle` or user-defined| +|- type:|`oci`| +|- authentication types:|`[httpsig]`| +|- regions:|[TO BE ADDED]| +|- cloud-specific model configuration keys:|**`address-space`** (string)
The CIDR block to use when creating default subnets. The subnet must have at least a /16 size.

**`compartment-id`** (string)
The OCID of the compartment in which juju has access to create resources.
| +|[CREDENTIAL](/t/6006)|| +|definition:| :warning: Starting with Juju 3.3.1, the region field is ignored.

`auth-type`: `httpsig`. You will be asked to provide your SSL private key fingerprint, SSL private key, a cloud region, your SSL private key passphrase, and your user, tenancy, and compartment OCID.
> See more: [Oracle OCI `|` Account and access concepts](https://docs.oracle.com/en-us/iaas/Content/GSG/Concepts/concepts-account.htm)

**If you want to use a YAML file:**

`credentials:`
 `oracle:`
  `:`
   `auth-type: httpsig`
   `fingerprint: `
   `key: `
   `region: `
   `pass-phrase: `
   `tenancy: `
   `user: `| +|[CONTROLLER](/t/5455)|| +|notes on bootstrap:|You have to specify the compartment OCID via the cloud-specific `compartment-id` model configuration key (see below).
Example: `juju bootstrap --config compartment-id= oracle oracle-controller`| +|**other (alphabetical order:)**|| +|[CONSTRAINT](/t/6184)|| +|conflicting:|TBA| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip)|| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:
Valid values: `[amd64, arm64]`.| +|- [`container`](/t/6184#heading--container)|❌| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|:white_check_mark:| +|- [`image-id`](/t/6184#heading--image-id)|❌| +|- [`instance-role`](/t/6184#heading--instance-role)|❌| +|- [`instance-type`](/t/6184#heading--instance-type)|:white_check_mark:| +|- [`mem`](/t/6184#heading--mem)|:white_check_mark:| +|- [`root-disk`](/t/6184#heading--root-disk)|:white_check_mark:| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|❌| +|- [`spaces`](/t/6184#heading--spaces)|❌| +|- [`tags`](/t/6184#heading--tags)|❌| +|- [`virt-type`](/t/6184#heading--virt-type)|❌| +|- [`zones`](/t/6184#heading--zones)|:white_check_mark:| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)|TBA| +|[`subnet=...`](/t/6187#heading--subnet)|❌| +|[`system-id=...`](/t/6187#heading--system-id)|❌| +|[`zone=...`](/t/6187#heading--zone)|TBA| +|[MACHINE](/t/5459)|| +|[RESOURCE (cloud)](/t/1102)

Consistent naming, tagging, and the ability to add user-controlled tags to created instances.|❌ | + +------------------------- + +pedroleaoc | 2021-06-08 18:06:29 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:57 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/1097.md b/tmp/t/1097.md new file mode 100644 index 000000000..b9770966a --- /dev/null +++ b/tmp/t/1097.md @@ -0,0 +1,262 @@ +system | 2024-10-22 09:37:50 UTC | #1 + + + +> [List of supported clouds](/t/6665) > OpenStack + +This document describes details specific to using your existing OpenStack cloud with Juju. + +> See more: [OpenStack](https://www.openstack.org/) + +When using the OpenStack cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +## Supported cloud versions + +Any version that supports:
- compute v2 (Nova)
- network v2 (Neutron) (optional)
- volume2 (Cinder) (optional)
- identity v2 or v3 (Keystone) + +## Notes on `juju add-cloud` + +Type in Juju: `openstack`. + +Name in Juju: User-defined. + +**If you want to use the novarc file (recommended):**
Source the OpenStack RC file (`source `). This will allow Juju to detect values from preset OpenStack environment variables. Run `add-cloud` in interactive mode and accept the suggested defaults. + +## Notes on `juju add-credential` + +[note type=information] +**If you want to use environment variables (recommended):**
Source the OpenStack RC file (see above). Run `add-credential` and accept the suggested defaults. +[/note] + +### Authentication types + +#### `userpass` + +Attributes: + +- username: The username to authenticate with. (required) +- password: The password for the specified username. (required) +- tenant-name: The OpenStack tenant name. (optional) +- tenant-id: The Openstack tenant ID (optional) +- version: The Openstack identity version (optional) +- domain-name: The OpenStack domain name. (optional) +- project-domain-name: The OpenStack project domain name. (optional) +- user-domain-name: The OpenStack user domain name. (optional) + + +## Notes on `juju bootstrap` + +You will need to create an OpenStack machine metadata. If the metadata is available locally, you can pass it to Juju via `juju bootstrap ... --metadata-source > See more: [How to manage metadata](/t/13276)

**If your cloud has multiple private networks:** You will need to specify the one that you want the instances to boot from via `juju bootstrap ... --model-default network=`.

**If your cloud's topology requires that its instances are accessed via floating IP addresses:** Pass the `allocate-public-ip=true` (see constraints below) as a bootstrap constraint. + + +## Cloud-specific model configuration keys + +### external-network +The network label or UUID to create floating IP addresses on when multiple external networks exist. + +| | | +|-|-| +| type | string | +| default value | "" | +| immutable | false | +| mandatory | false | + +### use-openstack-gbp +Whether to use Neutrons Group-Based Policy + +| | | +|-|-| +| type | bool | +| default value | false | +| immutable | false | +| mandatory | false | + +### policy-target-group +The UUID of Policy Target Group to use for Policy Targets created. + +| | | +|-|-| +| type | string | +| default value | "" | +| immutable | false | +| mandatory | false | + +### use-default-secgroup +Whether new machine instances should have the "default" Openstack security group assigned in addition to juju defined security groups. + +| | | +|-|-| +| type | bool | +| default value | false | +| immutable | false | +| mandatory | false | + +### network +The network label or UUID to bring machines up on when multiple networks exist. + +| | | +|-|-| +| type | string | +| default value | "" | +| immutable | false | +| mandatory | false | + +## Supported constraints + +|Juju points of variation|Notes for the OpenStack cloud| +| --- | --- | +|[CONSTRAINT](/t/6184)|| +|conflicting:|`[instance-type]` vs. `[mem, root-disk, cores]`| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip)|:white_check_mark:| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:| +|- [`container`](/t/6184#heading--container)|:white_check_mark:| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|❌| +|- [`image-id`](/t/6184#heading--image-id)|:white_check_mark: (Starting with Juju 3.3)
Type: String.
Valid values: An OpenStack image ID.| +|- [`instance-role`](/t/6184#heading--instance-role)|❌| +|- [`instance-type`](/t/6184#heading--instance-type)|:white_check_mark:
Valid values: Any (cloud admin) user defined OpenStack flavor.| +|- [`mem`](/t/6184#heading--mem)|:white_check_mark:| +|- [`root-disk`](/t/6184#heading--root-disk)|:white_check_mark:| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|:white_check_mark:
`root-disk-source` is either `local` or `volume`.| +|- [`spaces`](/t/6184#heading--spaces)|❌| +|- [`tags`](/t/6184#heading--tags)|❌| +|- [`virt-type`](/t/6184#heading--virt-type)|:white_check_mark:
Valid values: `[kvm, lxd]`.| +|- [`zones`](/t/6184#heading--zones)|:white_check_mark:| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)|TBA| +|[`subnet=...`](/t/6187#heading--subnet)|❌| +|[`system-id=...`](/t/6187#heading--system-id)|❌| +|[`zone=...`](/t/6187#heading--zone)|:white_check_mark:| +|[MACHINE](/t/5459)|--| +|[RESOURCE (cloud)](/t/1102)

Consistent naming, tagging, and the ability to add user-controlled tags to created instances.|:white_check_mark:| + + +> Contributors: @acsgn, @gerdner, @hallback, @tmihoc, @wallyworld + +------------------------- + +aurelien-lourot | 2019-12-01 20:55:32 UTC | #2 + +We should tell people to do + +``` +juju bootstrap ... --model-default use-floating-ip=true +``` + +instead of + +``` +juju bootstrap ... --config use-floating-ip=true +``` + +because the latter only affects the `default` model, whereas the former will affect all models the user will create. See [Configuring models](https://jaas.ai/docs/configuring-models). I personally wasted a lot of time because of this. I was creating my own model right after bootstrapping the controller and couldn't understand why that option had no effect. + +------------------------- + +timClicks | 2019-12-01 21:57:13 UTC | #3 + +Oh wow, I feel your pain. Thanks for taking the time to comment. I'll update the recommendation. [Edit: I've made several changes to the document. Hopefully it's easier to follow now.] + +------------------------- + +aurelien-lourot | 2019-12-02 08:48:39 UTC | #4 + +@timClicks Thanks for the quick reaction! It's actually `--model-default`, not `--model-defaults` (or do both work?). It's easy to mix up with `juju model-defaults` which takes an 's' ;) + +------------------------- + +timClicks | 2019-12-02 09:11:02 UTC | #5 + +Fixed! Thanks for taking another look :) + +------------------------- + +nikolay.vinogradov | 2021-08-20 14:49:42 UTC | #6 + +[quote="system, post:1, topic:1097"] +OpenStack networks (public and private) can be listed with: +[/quote] + +Perhaps "$ openstack network list " is missing here. + +------------------------- + +hmlanigan | 2021-08-20 18:07:41 UTC | #7 + +Thank you! I've made the change. + +------------------------- + +pedroleaoc | 2022-04-07 09:24:57 UTC | #8 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:49 UTC | #9 + + + +------------------------- + +gerdner | 2024-09-09 14:31:59 UTC | #10 + +"access-key" seems to be a v2 auth method and is not supported since 2018. The only working auth-method for up to date openstack deployments is userpass. See https://bugs.launchpad.net/juju/+bug/1834433 + +------------------------- + +tmihoc | 2024-09-09 15:00:21 UTC | #11 + +Thanks for raising this -- I'll investigate! + +PS At first glance, the `access-key` does still seem to be around: https://github.com/juju/juju/blob/3aec73a1c7bab405b04c0ff97eac042fda55f178/provider/openstack/credentials.go#L80 I'll see how those bugs impact its usability though -- thanks again. + +------------------------- + +gerdner | 2024-09-09 15:03:41 UTC | #12 + +Juju client uses goose under the hood. The access-key auth type will fall back to a v2 request see: https://github.com/go-goose/goose/blob/v5/client/client.go#L302 and will fail with a 404 not found (old v2 url) the error is missleading since v3 was specified in url and config but not used. + +------------------------- + +tmihoc | 2024-09-09 15:05:22 UTC | #13 + +Thanks for the extra detail -- I'll definitely investigate! + +------------------------- + +wallyworld | 2024-09-10 06:10:32 UTC | #14 + +`access-key` auth was done almost 10 years ago now back when Openstack offered an AWS compatibility shim. As stated above, my understanding is also that it's no longer supported and it should be removed from the Juju code base. + +------------------------- + +gerdner | 2024-09-10 11:23:42 UTC | #15 + +I don't want to sound harsh, but this also means that juju currently does not support openstack clouds with external auth via saml/oidc like most public openstack clouds and most private openstack clouds. I mention this because this was a not expected limitation, I found out after debugging for hours. I guess this is not the right place to push this feature request, but it could help others looking for juju+openstack support to know about this limitation. + +------------------------- + +tmihoc | 2024-09-10 11:32:21 UTC | #16 + +I'll raise this with the developers. Thanks! + +------------------------- + +tmihoc | 2024-09-12 07:16:17 UTC | #17 + +@gerdner Updated authentication types to exclude `access-key` (and added you to the list of contributors on the bottom of the doc -- thanks for making me aware of the issue). + +Also filed a bug to have related info removed from the codebase: https://bugs.launchpad.net/juju/+bug/2080203 . + +Also raised the issue of what this means for OpenStack clouds with external authentication via SAML/OIDC with the developers: Turns out this has been discussed before and there is indeed already a feature request to this effect. I've added your comment to the existing request to strengthen the case. + +Thanks again! + +------------------------- + diff --git a/tmp/t/1099.md b/tmp/t/1099.md new file mode 100644 index 000000000..9c23f604e --- /dev/null +++ b/tmp/t/1099.md @@ -0,0 +1,338 @@ +system | 2024-07-22 13:59:42 UTC | #1 + + + +> [List of supported clouds](/t/6665) > VMware vSphere + +This document describes details specific to using your existing VMware vSphere cloud with Juju. + +> See more: [VMware vSphere](https://docs.vmware.com/) + +When using the VMware vSphere cloud with Juju, it is important to keep in mind that it is a (1) [machine cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +|Juju points of variation|Notes for the VMware vSphere cloud| +|---|---| +|**setup (chronological order):**|| +|[CLOUD](/t/5454)|| +|supported versions:|| +|requirements:|In order to add a vSphere cloud you will need to have an existing vSphere installation which supports, or has access to, the following:

- VMware Hardware Version 8 (or greater)

- ESXi 5.0 (or greater)

- Internet access

- DNS and DHCP

Juju supports both high-availability vSAN deployments as well and standard deployments. | +|[definition:](/t/5454#heading--cloud-definition)|| +|- cloud name: | user-defined| +|- type:|`vsphere`| +|- endpoint | vSphere endpoint | +|- region | Datacenter| +|- authentication types:|`[userpass]`| +|- cloud-specific model configuration keys:|**`datastore`**
The datastore in which to create VMs. If this is not specified, the process will abort unless there is only one datastore available.

**`disk-provisioning-type`**
This dictates how template VM disks should be cloned when creating a new machine.
Valid values:

- *thin* - Sparse provisioning, only written blocks will take up disk space on the datastore

- *thick* - The entire size of the virtual disk will be deducted from the datastore, but unwritten blocks will not be zeroed out. This adds 2 potential pitfalls. See comments in provider/vsphere/internal/vsphereclient/client.go regarding DiskProvisioningType.

- *thickEagerZero (default)* - The entire size of the virtual disk is deducted from the datastore, and unwritten blocks are zeroed out. Improves latency when committing to disk, as no extra step needs to be taken before writing data.

**`external-network`**
An external network that VMs will be connected to. The resulting IP address for a VM will be used as its public address. An external network provides the interface to the internet for virtual machines connected to external organization vDC networks.

**`force-vm-hardware-version`** (integer)
Adds a new model level flag that allows operators to set a newer compatibility version for the instances that get spawned by juju. E.g., `juju bootstrap vsphere --config force-vm-hardware-version=17`

**`primary-network`**
The primary network that VMs will be connected to. If this is not specified, Juju will look for a network named `VM Network`.| +|[CREDENTIAL](/t/6006)|| +|definition:|`auth-type:` userpass. You will have to provide your username, password and, optionally, the vmfolder.

:warning: **If your credential stops working:** Credentials for the vSphere cloud have been reported to occasionally stop working over time. If this happens, try `juju update-credential` (passing as an argument the same credential) or `juju add-credential` (passing as an argument a new credential) + `juju default-credential`. | +|[CONTROLLER](/t/5455)|| +|notes on bootstrap:|Recommended: Bootstrap with the following cloud-specific model-configuration keys: `datastore` and `primary-network`. See more below.

**Pro tip:** When creating a controller with vSphere, a cloud image is downloaded to the client and then uploaded to the ESX host. This depends on your network connection and can take a while. Using [templates](#heading--using-templates) can speed up bootstrap and machine deployment.| +||| +||| +|**other (alphabetical order:)**|| +| [CONSTRAINT](/t/6184)|| +|conflicting:|| +|supported?|| +|- [`allocate-public-ip`](/t/6184#heading--allocate-public-ip) |❌| +|- [`arch`](/t/6184#heading--arch)|:white_check_mark:
Valid values: `[amd64]`.| +|- [`container`](/t/6184#heading--container)|:white_check_mark:| +|- [`cores`](/t/6184#heading--cores)|:white_check_mark:| +|- [`cpu-power`](/t/6184#heading--cpu-power)|:white_check_mark:| +|- [`image-id`](/t/6184#heading--image-id)|❌ | +|- [`instance-role`](/t/6184#heading--instance-role)|❌| +|- [`instance-type`](/t/6184#heading--instance-type)|:white_check_mark:| +|- [`mem`](/t/6184#heading--mem)|:white_check_mark:| +|- [`root-disk`](/t/6184#heading--root-disk)|:white_check_mark:| +|- [`root-disk-source`](/t/6184#heading--root-disk-source)|:white_check_mark:
`root-disk-source` is the datastore for the root disk| +|- [`spaces`](/t/6184#heading--spaces)|❌| +|- [`tags`](/t/6184#heading--tags)|❌| +|- [`virt-type`](/t/6184#heading--virt-type)|❌| +|- [`zones`](/t/6184#heading--zones)|:white_check_mark:

Use to specify resurce pools within a host or cluster, e.g.

`juju deploy myapp --constraints zones=myhost`

`juju deploy myapp --constraints zones=myfolder/myhost`

`juju deploy myapp --constraints zones=mycluster/mypool`

`juju deploy myapp --constraints zones=mycluster/myparent/mypool`| +|[PLACEMENT DIRECTIVE](/t/6187)|| +|[``](/t/6187#heading--machine)| :white_check_mark: | +|[`subnet=...`](/t/6187#heading--subnet)|❌ | +|[`system-id=...`](/t/6187#heading--system-id)|❌| +|[`zone=...`](/t/6187#heading--zone)|:white_check_mark:
Valid values: ``.

:warning: If your topology has a cluster without a host, Juju will see this as an availability zone and may fail silently. To solve this, either make sure the host is within the cluster, or use a placement directive: `juju bootstrap vsphere/ --to zone=`.| +|[MACHINE](/t/5459)|| +|[RESOURCE (cloud)](/t/1102)

Consistent naming, tagging, and the ability to add user-controlled tags to created instances.|❌ | + + +## Other notes + +

Using templates

+ +To speed up bootstrap and deploy, you can use VM templates, already created in your vSphere. Templates can be created by hand on your vSphere, or created from an existing VM. + +Examples assume that the templates are in directory $DATA_STORE/templates. + +Via simplestreams: +```text +mkdir -p $HOME/simplestreams +juju-metadata generate-image -d $HOME/simplestreams/ -i "templates/juju-focal-template" --base ubuntu@22.04 -r $DATA_STORE -u $CLOUD_ENDPOINT +juju-metadata generate-image -d $HOME/simplestreams/ -i "templates/juju-noble-template" --base ubuntu@24.04 -r $DATA_STORE -u $CLOUD_ENDPOINT +juju bootstrap --metadata-source $HOME/image-streams vsphere +``` + +Bootstrap juju with the controller on a VM running focal: +```text +juju bootstrap vsphere --bootstrap-image="templates/focal-test-template" --bootstrap-base ubuntu@22.04 --bootstrap-constraints "arch=amd64" +``` + +Using [add-image](https://discourse.charmhub.io/t/new-feature-in-juju-2-8-add-custom-machine-images-with-the-juju-metadata-command/3171): +```text +juju metadata add-image templates/bionic-test-template --base ubuntu@22.04 +``` + +------------------------- + +erik-lonroth | 2019-11-27 00:21:27 UTC | #2 + +Question: "How do I add a "centos7" image to vsphere? + +Question: "How do I make use of additional datastores apart from the one configured in the controller or model?" + +Question: "Can juju handle NFS type datastores which exists in vsphere?" Very useful for providing NAS to juju units. + +Question: "How do I make use of networks in vsphere?" - Is spaces available yet for vsphere? + +------------------------- + +panda | 2020-02-11 15:15:13 UTC | #3 + +What is a correct example for +``` +juju deploy myapp --constraints zones=mycluster/mygroup +juju deploy myapp --constraints zones=mycluster/myparent/mygroup +``` + +Because I cannot get it to work + +------------------------- + +erik-lonroth | 2020-02-11 20:20:13 UTC | #4 + +We have a mapping as: + +(vsphere) datacenter = (juju) region + +According to docs for vsphere, this mapping can be different depending on your vsphere configuration. + +Hope it helps. + +------------------------- + +nniehoff | 2020-03-02 16:06:14 UTC | #5 + +It would be nice to explain the minimum required privileges need in vSphere to bootstrap/deploy a juju environment to a vSphere cloud. Based on this commit [1] we know at least System.Read is required on the Datacenter entity. Clearly you need more than just read. + +Thanks, +Nick + +[1] https://github.com/juju/juju/commit/c9eeea11702d1172e5f68deff2c298b9199e8299#diff-4f35754aeeeeb70bd4f4e59b409336ab + +------------------------- + +erik-lonroth | 2020-03-03 19:11:38 UTC | #6 + +Hey Nick + +We are running juju with vsphere and have some experience. + +The latest release of juju came with alot of changes to improve the situation with vsphere but I agree that it's unclear just exactly how to setup any cloud for a proper juju situation. Lxd might be the best documented cloud in my opinion. + +We have experimented our way forward and are not super happy about that. + +Perhaps we missed something, but, yeah. + +@rick_h + +------------------------- + +nniehoff | 2020-03-03 19:30:42 UTC | #7 + +Erik, + When giving juju an admin account everything is fine, but in the spirit of security and least privileged access it would be good to know exactly what that least privilege is. We also have customers that have this requirement and would like to see this documentation. + +Nick + +------------------------- + +erik-lonroth | 2020-03-03 20:22:24 UTC | #8 + +@nniehoff I feel with you. + +I am not an expert in VMware, so I rely on some people at work to deal with these things which in our case also relates to Active directory I think. + +But, the main issue for me at the moment is to be able to properly separate users views on provisioned vms etc. This is what seems to be partially solved/addressed/mitigated with later versions of juju. + +The previous situation was that every user had a view of all vms for everyone since user access was enabled for the whole datacenter in vsphere. + +------------------------- + +timClicks | 2020-03-04 07:40:35 UTC | #9 + +[quote="erik-lonroth, post:6, topic:1099"] +unclear just exactly how to setup any cloud for a proper juju situation +[/quote] + +There certainly is missing information for best practice. I particularly hate that we ask people to give Juju full admin privileges on AWS: https://discourse.jujucharms.com/t/using-amazon-aws-with-juju/1084 + +To address some of these concerns, I hope to expand the post below into a full guide. It sounds like guide should be supplemented for guides for each cloud. + +https://discourse.jujucharms.com/t/what-are-your-tips-for-running-juju-in-production/2573 + +------------------------- + +nobuto | 2020-10-12 05:36:19 UTC | #11 + +[quote="system, post:1, topic:1099"] +Here is an example: + +``` +clouds: + vsp-cloud: + type: vsphere + auth-types: [userpass] + endpoint: 178.X.Y.Z + regions: + dc0: {} # these empty maps + dc1: {} # are necessary +``` +[/quote] + +That IP address is real. Can you please change it to something else? e.g., `192.0.2.1` from [RFC 5737](https://tools.ietf.org/html/rfc5737). + +------------------------- + +benhoyt | 2020-10-11 23:39:34 UTC | #12 + +Good call -- updated. + +------------------------- + +nobuto | 2020-10-12 01:07:58 UTC | #13 + +[quote="system, post:1, topic:1099"] +Here is an example: + +``` +clouds: + vsp-cloud: + type: vsphere + auth-types: [userpass] + endpoint: 192.0.2.1 + regions: + dc0: {} # these empty maps + dc1: {} # are necessary +``` +[/quote] + +[quote="system, post:1, topic:1099"] +juju bootstrap vsphere/ --to zone= +[/quote] + +The description assumes that datacenters are the top level resources. However, we saw some cases where a datacenter was under a folder, for example in [LP: #1884490](https://bugs.launchpad.net/juju/+bug/1884490). + +It would be nice if the doc covers such a scenario and the usecase is validated. I'm talking about the case `govc` returns the following. + +``` +$ govc ls +/MyFolder/MyDC/vm +/MyFolder/MyDC/network +/MyFolder/MyDC/host +/MyFolder/MyDC/datastore +``` + +and Juju config syntax would look like the following: + +``` + regions: + dc0: {} # these empty maps + dc1: {} # are necessary + MyFolder/MyDC: {} # datacenter under a folder +``` + +``` +juju bootstrap vsphere// --to zone= +``` + +------------------------- + +erik-lonroth | 2020-10-28 14:04:04 UTC | #14 + +What would the "primary-network" typically look like from a IP point of view? 172.X.X.X or would it be something else? + +Will juju automatically create VM:s with 2 NICs connected to these networks (primary-network + external-network) if they are available? + +[Update #1] Yes, I see from experimentation that juju seem to spawn nodes with a second NIC if I set `"juju config external-network=NETWORK_NAME_2"` and attaches that to the running VM:s. So, I guess that the assignment if their IP:s now needs a second DHCP server also running on that network? + +------------------------- + +sssler-scania | 2021-09-30 05:44:11 UTC | #15 + +[quote="nobuto, post:13, topic:1099"] +`govc ` +[/quote] + +I didn't know there was such a tool "govc" is this a supported tool or what can you tell me about it? It looks very interesting to use in conjunction with juju for vsphere cloud. + +@jamesbeedy @hallback @heitor + +------------------------- + +pedroleaoc | 2022-04-07 09:24:56 UTC | #16 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:44 UTC | #17 + + + +------------------------- + +sombrafam | 2023-11-24 20:53:00 UTC | #18 + +I cant get this to work. I did the following: + +```sh +DATA_STORE="datastore1" +CLOUD_ENDPOINT=10.230.65.170 +mkdir -p $HOME/simplestreams +/home/ubuntu/internal_git/juju/_build/linux_amd64/bin/juju-metadata generate-image -d $HOME/simplestreams/ -i "cloud-images/ubuntu-20.04-server-cloudimg-amd64.ova" -s focal -r $DATA_STORE -u $CLOUD_ENDPOINT + +/home/ubuntu/internal_git/juju/_build/linux_amd64/bin/juju --debug bootstrap vsphere --metadata-source $HOME/simplestreams --bootstrap-image="cloud-images/ubuntu-20.04-server-cloudimg-amd64.ova" --bootstrap-series=focal --bootstrap-constraints "arch=amd64" --config datastore=$DATA_STORE + +``` + +I'm getting the following error: + +```log +20:27:43 DEBUG juju.environs.simplestreams simplestreams.go:491 skipping index "file:///home/ubuntu/simplestreams/images/streams/v1/index.json" because of missing information: "image-downloads" data not found +``` + +It seems that Juju is still searching for 'image-url' in order to download the data, when it should not do any downloads. In the end, the bootstrap works but because it falls back to download from the internet. + +[1] Full logs: https://bit.ly/3urzDe9 + +------------------------- + +hmlanigan | 2023-12-01 18:27:01 UTC | #19 + +@sombrafam, + + How did you create the template? + +Juju does inject some things into the general templates when one is not specified. I've had the best experience copying a template created by juju to a folder which can be shared within the datastore. + +Also double check the location specified in the `$HOME/simplestreams/images/streams/v1/com.ubuntu.cloud-released-imagemetadata.json` file. Each image has an id like ` "id": "juju-ci-root/templates/focal-test-template",` that you should be able to find in the datastore. `juju-ci-root` is a directory in the datastore. I also use it as the `vmfolder` specified in my vsphere credentials. + +A second thought is there is a conflict by specifying both `--metadata-source $HOME/simplestreams --bootstrap-series=focal` and `--bootstrap-image="cloud-images/ubuntu-20.04-server-cloudimg-amd64.ova" ` at bootstrap. You're giving juju seemingly 2 sources of truth. Please try again, leaving out the `--bootstrap-image` flag. + +------------------------- + diff --git a/tmp/t/1100.md b/tmp/t/1100.md new file mode 100644 index 000000000..97107266e --- /dev/null +++ b/tmp/t/1100.md @@ -0,0 +1,504 @@ +system | 2024-08-09 13:41:31 UTC | #1 + + + +> See also: [Cloud](/t/5454), [List of supported clouds](/t/6665) + +This document shows how to manage your existing cloud(s) with Juju. + +**Contents:** + +- [Add a cloud](#heading--add-a-cloud) +- [View all the known clouds](#heading--view-all-the-known-clouds) +- [View details about a cloud](#heading--view-details-about-a-cloud) +- [Manage cloud regions](#heading--manage-cloud-regions) +- [Manage cloud credentials](#heading--manage-cloud-credentials) +- [Update a cloud](#heading--update-a-cloud) +- [Remove a cloud](#heading--remove-a-cloud) + + +

Add a cloud

+> See also: [List of supported clouds > \ > CLOUD](/t/6665) + + +[note type=information] +This step is typically required only for non-local private clouds. +[/note] + +[tabs] +[tab version="juju"] + + + +The procedure for how to add a cloud definition to Juju depends on whether the cloud is a machine (traditional, non-Kubernetes) cloud or rather a Kubernetes cloud. + +[note type=information] + +In either case, the cloud definition is saved to directory defined in the `JUJU_DATA` environment variable (default path: `~/.local/share/juju/`), in a file called `clouds.yaml`. +[/note] + +- [Add a machine cloud](#heading--add-a-machine-cloud) +- [Add a Kubernetes cloud](#heading--add-a-kubernetes-cloud) + +

Add a machine cloud

+ +[note type=positive] +**If your cloud is a public cloud or a local LXD cloud:**
+Juju likely already knows about it, so you can skip this step. Run `juju clouds` to confirm. +[/note] + +To add a machine cloud to Juju, run the `add-cloud` command: + +```text +juju add-cloud +``` + +This will start an interactive session where you'll be asked to choose a cloud type (from a given list), the name that you want to use for your cloud, the API endpoint, the authentication type, etc. Juju will use this to create a cloud definition. + + + +The command also has a manual mode where you can specify the desired cloud name and cloud definition file in-line; whether you want this definition to be known just to the Juju client or also to an existing controller (the latter creates what is known as a multi-cloud controller); etc. + +> See more: [`juju add-cloud`](/t/10162), [Cloud definition](/t/5454#heading--cloud-definition), [`juju` environment variables > `JUJU_DATA`](/t/1162#heading--jujudata) + +

Add a Kubernetes cloud

+ +[note type=positive] +**If your cloud is a local MicroK8s cloud:**
+Juju likely already knows about it, so you can skip this step. Run `juju clouds` to confirm. +[/note] + +To add a Kubernetes cloud to Juju: + +1. Prepare your kubeconfig file. + +2. Run the `add-k8s` command followed by the desired cloud name: + +[note type=caution status="If your cloud is a public Kubernetes cloud (Amazon EKS, Google GKE, or Microsoft AKS)"] +You must replace the client from the strictly confined snap (`juju`) with its 'raw' version from the snap directory (`/snap/juju/current/bin/juju`). So, instead of the command below, run `/snap/juju/current/bin/juju add-k8s `. + + +This is required because, starting with Juju 3.0, the `juju` CLI client is a strictly confined snap, whereas the public cloud CLIs are not (see [discussion](https://bugs.launchpad.net/juju/+bug/2007575)), and it is only necessary for this step – for any other step you can go back to using the client from the strictly confined snap (so, you can keep typing just `juju`). + +[/note] + + +```text +juju add-k8s +``` + +Juju will check the default location for the kubeconfig file and use the information in there to create a cloud definition. + +The command also allows you to specify a non-default kubeconfig file path (via the `KUBECONFIG` environment variable); in the case where you have multiple cluster definitions and credentials in your kubeconfig file, which cluster and credential to use; what name you want to assign to your new cloud; whether you want to make this cloud known just to the client or also to an existing controller (the latter gives rise to what is known as a multi-cloud controller); etc. + +> See more: [`juju add-k8s`](/t/10049) + +[/tab] +[tab version="terraform juju"] + +Terraform Juju does not support managing clouds. Please use the `juju` CLI instead. + +The cloud for your Terraform plan will be whichever cloud the controller in the plan's provider definition is associated with. + +> See more: [How to manage the client](/t/1083) + +If the controller is associated with more than one cloud (i.e., if it is a multi-cloud controller), you can decide which cloud a specific model in your plan should be associated with by specifying the cloud attribute. + +> See more: [How to add a model](/t/1155#heading--add-a-model) + +[/tab] + +[tab version="python libjuju"] + +> Note that, with `python-libjuju`, you can only add a cloud definition to a controller you've already bootstrapped with the `juju` client. + +To add a cloud, you may use the `Controller.add_cloud()` method on a connected `Controller` object. +```python +from juju.client import client as jujuclient + +await my_controller.add_cloud("my-cloud", + jujuclient.Cloud( + auth_types=["userpass"], + endpoint="http://localhost:1234", + type_="kubernetes", + )) + +``` + + + +> See more: [`add_cloud` (method)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.add_cloud), [`Cloud` (object)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.client.html#juju.client._definitions.Cloud) + +[/tab] +[/tabs] + + +

View all the known clouds

+ +[tabs] +[tab version="juju"] + +To get a list of all the clouds that your Juju client is currently aware of, run the `clouds` command with the `--client` and `-all` flags: + +```text +juju clouds --client --all +``` + +This will return something similar to: + +```text +You can bootstrap a new controller using one of these clouds... + +Clouds available on the client: +Cloud Regions Default Type Credentials Source Description +aws 22 us-east-1 ec2 0 public Amazon Web Services +aws-china 2 cn-north-1 ec2 0 public Amazon China +aws-gov 2 us-gov-west-1 ec2 0 public Amazon (USA Government) +azure 43 centralus azure 0 public Microsoft Azure +azure-china 4 chinaeast azure 0 public Microsoft Azure China +equinix 25 px equinix 0 public +google 25 us-east1 gce 0 public Google Cloud Platform +localhost 1 localhost lxd 1 built-in LXD Container Hypervisor +microk8s 1 localhost k8s 1 built-in A Kubernetes Cluster +oracle 4 us-phoenix-1 oci 0 public Oracle Compute Cloud Service +``` + +where each line represents a cloud that Juju can interact with -- the cloud name (that you will have to use to interact with the cloud), the number of cloud regions Juju is aware of, the default region (for the current Juju client), the type/API used to control it, the number of credentials associated with a cloud, the source of the cloud, and a brief description. + +By omitting the flags, you will see a list of the clouds available on the client for which you have also registered the credentials. Alternatively, by passing other flags you can specify an output format or file, etc. + +> See more: [`juju clouds`](/t/10182) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +To get all clouds known to the controller, you may use the `Controller.clouds()` method on a connected `Controller` object. It will return a list of Cloud objects. +```python + +await my_controller.clouds() + +``` + +> See more: [`clouds` (method)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.clouds), [`Cloud` (object)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.client.html#juju.client._definitions.Cloud) + +[/tab] +[/tabs] + +

View details about a cloud

+ +[tabs] +[tab version="juju"] + +To get more detail about a particular cloud, run the `show-cloud` command followed by the cloud name, e.g., + +```text +juju show-cloud azure +``` + +The command also has flags that allow you to specify whether you want this information from the client or rather a controller; whether you want the output to include the configuration options specific to the cloud; an output format or file; etc. + +> See more: [`juju show-cloud`](/t/10215) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +To get more detail about a particular cloud, you may use the `Controller.cloud()` method on a connected `Controller` object. It will return a Cloud object. +```python + +await my_controller.cloud() + +``` + +> See more: [`cloud` (method)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.cloud), [`Cloud` (object)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.client.html#juju.client._definitions.Cloud) + + +[/tab] +[/tabs] + +

Manage cloud regions

+ +[tabs] +[tab version="juju"] + +- [View all the known regions](#heading--view-all-the-known-regions) +- [Manage the default region](#heading--manage-the-default-region) + +

View all the known regions

+ + +To see which regions Juju is aware of for any given cloud, use the `regions` command. For example, for the 'aws' cloud, run: + +```text +juju regions aws +``` + +This should output something similar to: + +```text +Client Cloud Regions +us-east-1 +us-east-2 +us-west-1 +us-west-2 +ca-central-1 +eu-west-1 +eu-west-2 +eu-west-3 +eu-central-1 +eu-north-1 +eu-south-1 +af-south-1 +ap-east-1 +ap-south-1 +ap-southeast-1 +ap-southeast-2 +ap-southeast-3 +ap-northeast-1 +ap-northeast-2 +ap-northeast-3 +me-south-1 +sa-east-1 +``` + +The command also has flags that allow you to select a specific controller, choose an output format or file, etc. + +> See more: [`juju regions`](/t/10112) + +

Manage the default region

+ +**Set the default region.** To set the default region for a cloud, run the `default-region` command followed by the name of the cloud and the name of the region that you want to start using as a default. For example: + +```text + +juju default-region aws eu-central-1 +``` + +If at any point you want to reset this value, drop the region argument and pass the `--reset` flag. + +> See more: [`juju default-region`](/t/10082) + + +**Get the default region.** To get the current default region for a cloud, run the `default-region` command followed by the name of the cloud. For example: + +```text +juju default-region azure-china +``` + +> See more: [`juju default-region`](/t/10082) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Manage cloud credentials

+ +> See: [How to manage credentials](/t/1112) + + +

Update a cloud

+ +[tabs] +[tab version="juju"] + +The procedure for how to update a cloud on Juju depends on whether the cloud is public or private. + +- [Update a public cloud](#heading--update-a-public-cloud) +- [Update a private cloud](#heading--update-a-private-cloud) + +

Update a public cloud

+ +To synchronise the Juju client with changes occurring on public clouds (e.g. cloud API changes, new cloud regions) or on Juju's side (e.g. support for a new cloud), run the `update-public-clouds` command: + +```text +juju update-public-clouds +``` + +The command also allows you to specify whether you want this update to happen on the client or rather a controller. + +> See more: [`juju update-public-clouds`](/t/10115) + + +

Update a private cloud

+ +[tabs] +[tab version="juju"] + +To update Juju's definition for a private cloud, run the `update-cloud` command followed by the cloud name and the `-f` flag followed by the path to the new cloud definition file. For example: + +```text +juju update-cloud mymaas -f path/to/maas.yaml +``` + +The command also allows you to indicate whether the update should happen on the client or the controller; to to update the definition on a controller to match the one on the client; etc. + +> See more: [`juju update-cloud`](/t/10081) + + + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Remove a cloud

+> See also: [Removing things reference](/t/1063) + +[note type=information] +This only applies to cloud definitions added explicitly via `add-cloud` or `add-k8s`. It removes the cloud definition from the client and/or the controller. +[/note] + +To remove a cloud definition from Juju, run the `remove-cloud` command followed by the name of the cloud. For example: + +```text +juju remove-cloud lxd-remote +``` + +The command also allows you to specify whether this operation should be performed on the client or rather on a specific controller. + +> See more: [`juju remove-cloud`](/t/10216) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +To remove a cloud definition, you may use the `Controller.remove_cloud()` method on a connected `Controller` object. +```python + +await my_controller.remove_cloud() + +``` + +> See more: [`remove_cloud` (method)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.remove_cloud) +[/tab] +[/tabs] + + +
+ +> **Contributors:** @anthonydillon, @cderici, @danieleprocida, @hmlanigan, @nottrobin , @pedroleaoc, @pmatulis, @timclicks, @tmihoc + +------------------------- + +sabdfl | 2020-04-04 10:23:34 UTC | #2 + +How can I ask Juju to tell me the zones in a particular cloud? I have a particular interest in LXD for this (in other words, from the Juju command-line and API I want to determine how many zones are in a LXD cloud and what they are called). The concept, however, is general to all clouds so I expect the mechanism would be general too. + +------------------------- + +timClicks | 2020-04-05 02:58:56 UTC | #3 + +Good question. This should be an extension of `juju show-cloud`, but Juju doesn't yet automatically load this information, as far as I'm aware. Happy to be corrected here by others in the team. + +Juju requests availability zone info at deploy-time via the `zone` constraint. + +At this stage, our recommendation is use provider-based tools for each cloud, such as `openstack availability zone list`. + +------------------------- + +sabdfl | 2020-04-05 10:24:56 UTC | #4 + +Yes, this should be part of show-cloud. It should also be updated with update-clouds. And we will need API and Python libjuju access to the data too. + +------------------------- + +sabdfl | 2020-04-05 10:30:22 UTC | #5 + +I don't think it makes sense to require external tooling (even though this would work) because we need API access to the data anyway, and it would be most useful to do show-cloud before you do the deployment with zone= constraints. In other words, since you need to use the right zones in a juju command, you should be able to get the available zones through a juju command. + +Thinking about it, we maybe want a show-region command too, which would give you just the details of say azure/norwayeast. + +------------------------- + +pmatulis | 2020-04-06 15:45:00 UTC | #6 + +[quote="sabdfl, post:5, topic:1100"] +In other words, since you need to use the right zones in a juju command, you should be able to get the available zones through a juju command. +[/quote] + +This would be very appreciated for OpenStack clouds where zones are an integral part of setting up HA. + +------------------------- + +pedroleaoc | 2022-04-07 09:25:37 UTC | #7 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:31 UTC | #8 + + + +------------------------- + diff --git a/tmp/t/11012.md b/tmp/t/11012.md new file mode 100644 index 000000000..7ffe3a534 --- /dev/null +++ b/tmp/t/11012.md @@ -0,0 +1,207 @@ +ppasotti | 2024-07-25 12:55:12 UTC | #1 + +[note] +This howto has been incorporated [in the tempo docs](/t/14046). +[/note] + + +In order to instrument a charm with tracing telemetry, you will need to: + +1. Set up a model with `cos-lite`: + + +```bash +juju add-model clite +juju deploy cos-lite --trust +``` + +> See more: [Charmhub | `cos-lite`](https://charmhub.io/cos-lite) + +2. Deploy `tempo`: + +```bash +juju deploy tempo-k8s +``` + +> See more: [Charmhub | `tempo-k8s`](https://charmhub.io/tempo-k8s) + +3. Integrate tempo with `cos-lite`: + +```bash +jhack imatrix fill +``` + +> See more: [`jhack`](/t/8047), [GitHub | `jhack` > `imatrix` > `fill`](https://github.com/PietroPasotti/jhack#fill) + +Alternatively, you can integrate manually by: +``` +juju integrate tempo:logging loki:logging +juju integrate tempo:ingress traefik:ingress +juju integrate grafana:grafana-source tempo:grafana-source +juju integrate prometheus:metrics-endpoint, tempo:metrics-endpoint +juju integrate grafana:grafana-dashboard, tempo:grafana-dashboard +juju integrate traefik:tracing, tempo:tracing +juju integrate prometheus:tracing, tempo:tracing +``` + +[note] At some point there will be an overlay bundle to deploy `cos-lite` + `tempo` and integrate them; see [this PR](https://github.com/canonical/cos-lite-bundle/pull/79) to follow the progress on the overlay. [/note] + +At this point you should have a working `cos-lite` plus `tempo` deployment; something like this: +![image|690x333](upload://7ohHj2JBT9Zaezsgx6p3a56n5cO.png) + +4. Fetch the `charm_tracing` and the `tracing` libs: +```bash +charmcraft fetch-lib charms.tempo-k8s.v1.charm_tracing +charmcraft fetch-lib charms.tempo-k8s.v2.tracing +``` + +> See more: [`charmcraft fetch-lib`](/t/6123), [Charmhub | `tempo-k8s` > Libraries > `charm-tracing`](https://charmhub.io/tempo-k8s/libraries/charm_tracing), [Charmhub | `tempo-k8s` > Libraries > `tracing`](https://charmhub.io/tempo-k8s/libraries/tracing) + +5. Add opentelemetry exporter dependency used by `charm_tracing` to your `requirements.txt`: + +``` +opentelemetry-exporter-otlp-proto-http>=1.21.0 +``` + +6. Add an integration with `tempo`: + +```yaml +# in charmcraft.yaml +provides: + tracing: + interface: tracing + limit: 1 +``` + +> See more: [File `charmcraft.yaml` > `provides`](/t/7132#heading--peers-provides-requires) + +7. Instrument your charm code: + +```python +# in /your/charm/project/src/charm.py +from charms.tempo_k8s.v1.charm_tracing import trace_charm +from charms.tempo_k8s.v2.tracing import TracingEndpointRequirer + +@trace_charm(tracing_endpoint="tracing_endpoint") +class MyCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # add a provider wrapper for the tracing endpoint + self.tracing = TracingEndpointRequirer(self, protocols=["otlp_http"]) + + @property + def tracing_endpoint(self) -> Optional[str]: + """Tempo endpoint for charm tracing.""" + if self.tracing.is_ready(): + return self.tracing.get_endpoint("otlp_http") + return None +``` +[note] +By default, the traces generated by this charm will be tagged with service name equal to the class name of the charm, so `"MyCharm"` in this case. You override this default by passing +`service_name="foo"` to `trace_charm`. +[/note] + + +8. Pack, deploy, and integrate your charm with `cos-lite`: + +```bash +charmcraft pack +juju deploy ./my-charm-operator_ubuntu-20.04-amd64.charm mycharm \ + $(yq eval '.resources | to_entries | map(select(.value.upstream-source != null) | "--resource " + .key + "=" + .value.upstream-source) | join(" ")' charmcraft.yaml) +juju integrate mycharm:tracing tempo-k8s:tracing +``` + +[note] +`cos-lite` is a kubernetes bundle. `tempo` is a Kubernetes charm. + [/note] + +> See more: [`charmcraft pack`](6129), [Juju | `juju deploy`](https://juju.is/docs/juju/juju-deploy), [Juju | `juju integrate`](https://juju.is/docs/juju/juju-integrate) + +9. View the traces for the charm. + +Open the Grafana dashboard in a browser ([see here](https://github.com/canonical/grafana-k8s-operator) for more detailed instructions). + +Next, navigate to the traces for your charm: +- go to `Explore` and select the Tempo datasource. +- pick the `service_name` you gave to MyCharm above (the default is the application name) to see the traces for that charm +![image|690x505](upload://rAZOurgeYtFfymdB1V0qzKg7HcW.png) +- click on a trace ID to visualize it. For example, this is the trace for an `update-status` event on the Tempo charm itself: +![image|690x492](upload://6lufy3oZUYDkwxrnufQJKf5tpGN.png) + + +## Mapping events to traces with jhack tail +`jhack tail` supports a `-t` option to show the trace IDs associated with a charm execution: + +![image|607x326](upload://1UUxRnTkBR3hkCPfWd0sd4AXqfJ.png) + +This means that you can tail a charm, grab the trace id from tail, put it in the grafana dashboard query and get to the trace in no time. + +------------------------- + +ghibourg | 2023-07-06 22:43:22 UTC | #2 + +The instructions are missing the fetching and importing of `tempo_scrape`. + +```shell +charmcraft fetch-lib charms.tempo_k8s.v0.tempo_scrape + +```python +from charms.tempo_k8s.v0.tempo_scrape import TracingEndpointProvider +``` + +Also, the deploy command for tempo does not have the right charm file name. It should be `tempo-k8s_ubuntu-22.04-amd64.charm` according to my testing today. + +The tempo charm also goes into error in `logging-relation-changed`, failing to start `promtail` because the port 9095 seems already in use. + +I did not get the chance to debug more. + +I think this looks interesting overall, I will definitely try to play with it a bit more. + +------------------------- + +ppasotti | 2023-07-07 06:36:41 UTC | #3 + +thanks for the feedback, will polish it up a bit today + +------------------------- + +ppasotti | 2023-07-07 07:10:14 UTC | #4 + +I'm locked out of the tempo charm while IS handles giving us back ownership after I transferred the repo to the Canonical org, so I can't merge a fix for that one, but [the branch is up](https://github.com/canonical/tempo-k8s-operator/pull/8) + +------------------------- + +ppasotti | 2023-07-24 09:41:48 UTC | #5 + + + +------------------------- + +mthaddon | 2024-01-23 15:43:14 UTC | #6 + +[quote="ppasotti, post:1, topic:11012"] +Integrate tempo with `cos-lite`: +[/quote] + +Would it be possible to include the regular `juju integrate` commands here for those that might not have `jhack` installed (e.g. on a production environment)? + +------------------------- + +ppasotti | 2024-01-23 15:57:06 UTC | #7 + +good point! we have no official docs on that yet. + +------------------------- + +ppasotti | 2024-01-23 16:00:46 UTC | #8 + +done @mthaddon ! + +------------------------- + +ppasotti | 2024-07-26 11:23:30 UTC | #9 + + + +------------------------- + diff --git a/tmp/t/1102.md b/tmp/t/1102.md new file mode 100644 index 000000000..36646a52b --- /dev/null +++ b/tmp/t/1102.md @@ -0,0 +1,93 @@ +system | 2024-06-13 08:07:15 UTC | #1 + +Juju now tags instances and volumes created in supported clouds with the Juju model UUID, and related Juju entities. This document describes the default instance naming and tagging scheme and then shows you how you can define your own tags. + +**Contents:** + +- [The default naming and tagging scheme](#heading--the-default-naming-and-tagging-scheme) +- [How to define your own tags](#heading--how-to-define-your-own-tags) + + +

The default naming and tagging scheme

+ +Instances and volumes are now named consistently across EC2 and OpenStack, using the scheme: + +``` text +juju--- +``` + +...where `` is the given name of the model; `` is the type of the resource ("machine" or "volume") and `` is the numeric ID of the Juju machine or volume corresponding to the IaaS resource. + +Tagging also follows a scheme: Instances will be tagged with the names of units initially assigned to the machine. Volumes will be tagged with the storage-instance name, and the owner (unit or service) of said storage. + +For example, names in Amazon AWS appear like this: ![named instances in Amazon](https://assets.ubuntu.com/v1/0261cc58-config-tagging-named.png) +...and tags like this: +![tagged instances in Amazon](https://assets.ubuntu.com/v1/f480625d-config-tagging-tagged.png) + +

How to define your own tags

+ + +Juju also adds any user-specified tags set via the "resource-tags" model setting to instances and volumes. The format of this setting is a space-separated list of key=value pairs. + +``` text +resource-tags: key1=value1 [key2=value2 ...] +``` + +Alternatively, you can change the tags allocated to new machines in a bootstrapped model by using the `juju model-config` command + +``` text +juju model-config resource-tags="origin=v2 owner=Canonical" +``` + +![user tagged instances in Amazon](https://assets.ubuntu.com/v1/1fac4427-config-tagging-user.png) + +You can change the tags back by running the above command again with different values. Changes will not be made to existing machines, but the new tags will apply to any future machines created. + +> See more: [List of model configuration keys > `resource-tags`](/t/7068#heading--resource-tags) + +These tags may be used, for example, to set up chargeback accounting. + +Any tags that Juju manages will be prefixed with "juju-"; users must avoid modifying these, and for safety, it is recommended none of your own tags start with "juju". + +
+ +> **Contributors:** @alejdg, @timClicks , @tmihoc + +------------------------- + +pedroleaoc | 2022-04-07 08:35:29 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:43 UTC | #3 + + + +------------------------- + +alejdg | 2024-06-06 00:58:26 UTC | #4 + +Since which version is this feature available? + +------------------------- + +alejdg | 2024-06-12 20:05:26 UTC | #5 + +The title says instance tags but the command is *"juju model-config **resource-tags** [...]"*. I suggest that the documentation matches the command. + +------------------------- + +tmihoc | 2024-06-13 08:08:31 UTC | #6 + +> Since which version is this feature available? + +Apparently since https://github.com/juju/juju/pull/9194 + +> The title says instance tags but the command is *“juju model-config **resource-tags** […]”* . I suggest that the documentation matches the command. + +Updated the title (and also added you to the list of contributors on the bottom of the doc -- thanks!). + +------------------------- + diff --git a/tmp/t/11102.md b/tmp/t/11102.md new file mode 100644 index 000000000..3f2311e59 --- /dev/null +++ b/tmp/t/11102.md @@ -0,0 +1,107 @@ +tmihoc | 2023-07-10 08:29:13 UTC | #1 + +> See also: [Channel > Track](/t/6562#heading--track) + +This document shows how to create a track for your charm. + +**Contents:** + +1. [Request a track guardrail](#heading--request-a-track-guardrail) +1. [Create the track](#heading--create-the-track) + + +

Request a track guardrail

+> See also: [Channel > Track > Guardrail](/t/6562#heading--track-guardrail) + +To request a track guardrail, contact a Charmhub admin by creating a post on Discourse under the **charmhub requests** category, that is, here: https://discourse.charmhub.io/c/charmhub-requests/46 . + +

Create the track

+ +Once you've requested a track guardrail, there are two ways to create a new track for your charm -- you can keep contacting a Charmhub admin every time or you can self-service. For most cases the latter option is likely to be more convenient and faster. + + + + +- [Contact a Charmhub admin](#heading--contact-a-charmhub-admin) +- [Self-service](#heading--self-service) + +

Contact a Charmhub admin

+ +To create a new track by contacting a Charmhub admin, create a post on Discourse under the **charmhub requests** category, that is, here: https://discourse.charmhub.io/c/charmhub-requests/46 . The admin will create the new track that fits within the track guardrail you’ve set up for your charm. + +

Self-service

+ +To create a new track yourself, follow the steps below: + +[note type=information] +As you might notice, this path is currently a little hacky. In the long-term it should become a lot smoother as there are plans to support it through the Charmcraft CLI. +[/note] + +[note type=information] +As you will see, this method currently relies on `charmcraft`+ `curl`. We recommend the Charmcraft bit because Charmcraft already understands the authentication mechanism used by Charmhub and can generate a suitable authentication token (macaroon) that will make it possible to then use `curl` directly to interact with the Charmhub API. This method also has the advantage that it can be adapted to use any HTTP client or library as long as it can pass custom headers. +[/note] + + +**1. Enable `curl` access to the Charmhub API.** + +First, install `curl` and `jq`. + +[note type=information] +You might already have both. +[/note] + +Then, use Charmcraft to log in to Charmhub and export your Charmhub credentials / token (macaroon) to a file: + +```text +charmcraft login --export charmhub-creds.dat +``` + +Next, decode and extract the macaroon from the .dat file and place it in a header in an environment variable: + +```text +export CHARMHUB_MACAROON_HEADER="Authorization: Macaroon $(cat charmhub-creds.dat | base64 -d | jq -r .v)" +``` + +At this point you can use this variable in `curl` commands -- just make sure to specify the correct `Content-Type`. + +**2. Use `curl` to view the existing guardrails and tracks.** To view the guardrails and tracks associated with your charm, issue an HTTP `GET` request to `/v1//`. For example, for a charm named `hello-world-charm`: + +```text +curl https://api.charmhub.io/v1/charm/hello-world-charm -H'Content-type: application/json' -H "$CHARMHUB_MACAROON_HEADER" +``` + +The guardrails and tracks of the package will be under the `track-guardrails` and `tracks` keys of `metadata`. Now you know what the new track may look like. + +> See more: [Charmhub API docs > `package_metadata`](https://api.charmhub.io/docs/default.html#package_metadata) + +[note type=information] +**If you want to view the guardrails and tracks for *all* published charms:** Issue an HTTP `GET` request to `/v1/`, as below: + +```text +curl https://api.charmhub.io/v1/charm -H'Content-type: application/json' -H "$CHARMHUB_MACAROON_HEADER" +``` + +> See more: [Charmhub API docs > `list_registered_names`](https://api.charmhub.io/docs/default.html#list_registered_names) + + +[/note] + + +**3. Use `curl` to create a new track.** Finally, to create a new track for your charm, issue an HTTP `POST` request to `/v1///tracks`, where `name` and `namespace` refer to the name and type of the package respectively. For example, given a charm named `hello-world-charm`, one can create two tracks `v.1` and `v.2` as follows: + +```text +curl https://api.charmhub.io/v1/charm/hello-world-charm/tracks -X POST -H'Content-type: application/json' -H "$CHARMHUB_MACAROON_HEADER" -d '[{"name": "v.1"}, {"name": "v.2"}]' +``` + +Of course, the tracks must conform to the existing guardrail for the charm. + + +> See more: [Charmhub API docs > `create_tracks`](https://api.charmhub.io/docs/default.html#create_tracks) + + +That's it, you now have a new track for your charm! + +------------------------- + diff --git a/tmp/t/1111.md b/tmp/t/1111.md new file mode 100644 index 000000000..bc46afcc9 --- /dev/null +++ b/tmp/t/1111.md @@ -0,0 +1,1016 @@ +system | 2024-09-06 11:25:01 UTC | #1 + +> See also: [Controller](/t/5455) + +[note type=information] +To be able to manage a controller, a user must have a [controller `superuser`](/t/6864#heading--controller-superuser) access level. +[/note] + +This document demonstrates various ways in which you can interact with a controller. + +**Contents:** + +- [Bootstrap a controller](#heading--bootstrap-a-controller) +- [View all the known controllers](#heading--view-all-the-known-controllers) +- [View details about a controller](#heading--view-details-about-a-controller) +- [Switch to a different controller](#heading--switch-to-a-different-controller) +- [Configure a controller](#heading--configure-a-controller) +- [Manage constraints for a controller](#heading--manage-constraints-for-a-controller) +- [Share a controller with other users](#heading--share-a-controller-with-other-users) +- [Manage a controller's connection to the client](#heading--manage-a-controllers-connection-to-the-client) +- [Make a controller highly available](#heading--make-a-controller-highly-available) +- [Collect metrics about a controller](#heading--collect-metrics-about-a-controller) +- [Back up a controller](#heading--back-up-a-controller) +- [Upgrade a controller](#heading--upgrade-a-controller) +- [Remove a controller](#heading--remove-a-controller) + + +

Bootstrap a controller

+ +> See also: [Bootstrapping](/t/6209), [List of supported clouds > \ > CLOUD](/t/6665) + + +[tabs] +[tab version="juju"] + + +To create a `juju` controller in a cloud, use the `bootstrap` command: + +[note type=information] +**On Kubernetes:** The Juju controller needs two container images (one for the controller agent container and one for the database container). These are by default downloaded from Docker Hub, but can also be downloaded from `public.ecr.aws/juju` or `https://ghcr.io/juju` if you pass them to the `caas-image-repo` bootstrap configuration key. **We currently recommend you get them from `public.ecr.aws/juju`: `juju bootstrap mycloud --config caas-image-repo="public.ecr.aws/juju"`.** + +> See more: [List of controller configuration keys > `caas-image-repo`](/t/7059#heading--caas-image-repo)
:warning: While this key *can* technically be changed after bootstrap, that is only for a very specific use case (adjusting credentials used for a custom registry). For most cases it is safe to assume you can only set it during bootstrap. +[/note] + + +```text +juju bootstrap +``` +This will start an interactive session where you will be asked for the name of the cloud and the name you want to give the controller. + +Alternatively, you can specify these things directly by adding the name of the cloud and of the controller right after the `bootstrap` command. For example, below we bootstrap a controller with the name `aws-controller` into our aws cloud: + + +```text +juju bootstrap aws aws-controller +``` + +When you use the bootstrap command in this way (non-interactively), you can also add many different options, to specify the cloud credentials to be used, to select a specific cloud region, to specify a storage pool, to constrain the controller or workload machines, to configure the deployment in various ways, to pass a cloud-specific setting, to choose a specific `juju` agent version, etc. + +> See more: [`juju bootstrap`](/t/10132) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[note type=information] +With the `terraform juju` client, in your Terraform plan, you can only connect to a pre-existing controller. You do that by configuring the `juju` provider to point to the pre-existing controller. + +> See more: [How to manage the client](/t/1083) +[/note] + +[/tab] + +[tab version="python libjuju"] + +The `python-libjuju` client does not support this. Please use the `juju` client. + +[note type=information] +With the `python-libjuju` client, you can only connect to a pre-existing controller. See the section Switch to a different controller for more details. + +> See more: [How to manage the client](/t/1083) +[/note] + +[/tab] +[note type=positive] +**Tips for production:** + +**- Machines:** Make sure to bootstrap with no less than 50 GB disk, 2 CPUs, and 4 GB RAM (e.g., + `juju bootstrap aws/us-east-1 mymachinecontroller --bootstrap-constraints "root-disk=50G cores=2 mem=4G"`). Bootstrapping a controller like this allows you to manage a few hundred units. However, if your needs go beyond this, consider making the controller highly available. + +> See more: [How to manage machine constraints for a controller](/t/1111#heading--manage-machine-constraints-for-a-controller), [How to make the controller highly available](/t/1111#heading--make-a-controller-highly-available) + +**- Kubernetes:** Juju does not currently support high-availability and backup and restore for Kubernetes controllers. Consider bootstrapping your controller on a machine cloud and then adding your Kubernetes cloud(s) to it, in a multi-cloud controller setup (`juju add-k8s myk8scloud --controller mymachinecontroller`). + +> See more: [How to add a cloud](/t/1100#heading--add-a-cloud) + +[/note] + +[note type=information status="Troubleshooting"] + +- **Machines:** + +Bootstrap on machines consists of the following steps: + +1. Provision resources/a machine M from the relevant cloud, via cloud-init write a nonce file to verify we’ve found the machine we’ve provisioned. +1. Poll the newly created instance for an IP address, and attempt to connect to M. +1. Run the machine configuration script for M, which downloads, e.g., the `jujud` binaries, sets up networking, and starts jujud. + +For failure at any point, retry the `bootstrap` command with the `--debug`, `--verbose`, and `keep-broken` flags: + +```text +juju bootstrap --debug --verbose --keep-broken +``` + +> See more: [`juju bootstrap --keep-broken`](/t/10132) + +~5% of the time bootstrap failure is due to some mirror server; in that case, retrying should succeed, and the flags won't matter. However, ~95% of the time bootstrap failure is due to something else; in that case, `keep-broken` will ensure that the machine isn't destroyed, so you can connect to it and examine the logs. + +> See more: [How to manage logs > View the log files](/t/9151#heading--view-the-log-files), [How to troubleshoot your deployment](/t/1187) + +- **Kubernetes:** + +Bootstrap on Kubernetes includes creating a Kubernetes pod called `controller-0` containing a container called `api-server`. Matching this, the output of the bootstrap command includes `Creating k8s resources for controller `, where `` is something like `controller-foobar`. To troubleshoot, inspect this `api-server` container with `kubectl`: + +```text + +kubectl exec controller-0 -itc api-server -n [namespace] -- bash +``` + +[/note] + +[/tabs] + +

View all the known controllers

+ +[tabs] +[tab version="juju"] + + +To see a list of all the controllers known to the `juju` client, run the `controllers` command: + +```text +juju controllers +``` + +Sample output for a case where there is just a single controller boostrapped into the `localhost` cloud: + + +```text +Use --refresh option with this command to see the latest information. + +Controller Model User Access Cloud/Region Models Nodes HA Version +localhost-controller* controller admin superuser localhost/localhost 1 1 none 3.0.0 +``` + +By specifying various options you can also choose a specific output format, an output file, etc. + +> See more: [`juju controllers`](/t/10152) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +The `python-libjuju` client does not support this. Please use the `juju` client. + +[/tab] +[/tabs] + +

View details about a controller

+ +[tabs] +[tab version="juju"] + + +To view detailed information about a controller, use the `show-controller` command, optionally followed by one or more controller names. For example, below we examine a controller called `localhost-controller`: + +```text +juju show-controller localhost-controller +``` + +By specifying various options you can also choose an output format, an output file, or get an output that includes the password for the logged in user. + +> See more: [`juju show-controller`](/t/10156) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +To view details about a controller in `python-libjuju`, with a connected controller object (below, `controller`), you can call the `Controller.info()` function to retrieve information about the connected controller: + +```python +await controller.info() +``` +> See more: [`Controller.info()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.info) + + +[/tab] +[/tabs] + +

Switch to a different controller

+ +[tabs] +[tab version="juju"] + + +To switch from one controller to another, use the `switch` command followed by the name of the controller. For example, below we switch to a controller called `localhost-controller-prod`: + +```text +juju switch localhost-controller-prod +``` + +[note type=caution] +The `switch` command can also be used to switch to a different model. To remove any ambiguity, in some cases it may be safer to specify the model name explicitly on the template `:` + +[/note] + +> See more: [`juju switch`](/t/10102) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +To switch to a different controller with `python-libjuju`, simply connect to the controller you want to work with, which is done by calling `connect` on the [Controller](https://pythonlibjuju.readthedocs.io/en/latest/narrative/controller.html) object (below, `controller`): + +```python +from juju.model import Controller + +controller = Controller() +await controller.connect() # will connect to the "current" controller + +await controller.connect('mycontroller') # will connect to the controller named "mycontroller" +``` + +Note that if the `controller` object is already connected to a controller, then that connection will be closed before making the new connection. + +> See more: [`Controller.connect()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.connect), [Connect with Authentication](https://pythonlibjuju.readthedocs.io/en/latest/narrative/controller.html#connecting-with-authentication), [Connect with explicit endpoints](https://pythonlibjuju.readthedocs.io/en/latest/narrative/controller.html#connecting-with-an-explicit-endpoint) + +[/tab] +[/tabs] + +

Configure a controller

+> See also: [Configuration](/t/6659), [List of controller configuration keys](/t/7059) +> +> See related: [How to configure a model](/t/1155#heading--configure-a-model) + +[tabs] +[tab version="juju"] + + +**Set values.** +A controller configuration key can be assigned a value during controller-creation time or post-creation time. The vast majority of keys are set in the former way. + +- To set a controller's configuration at controller-creation time, use the `bootstrap` command with the `--config` followed by the relevant `=` pair(s). For example, the code below creates a controller `localhost` on a cloud `lxd` and at the same time configures the controller such that the `bootstrap-timeout` key is 700 seconds: + +``` text +juju bootstrap --config bootstrap-timeout=700 localhost lxd +``` + +- To set a controller's configuration once it's already been created, use the `controller-config` command followed by the relevant `=` pair(s). For example, the code below configures an existing controller named `aws` so as to record auditing information, with the number of old audit log files to keep being set at 5. + +``` text +juju controller-config -c aws auditing-enabled=true audit-log-max-backups=5 +``` + +> See more: [`juju bootstrap --config`](/t/10132), [`juju controller-config`](/t/10237) + +**Get values.** To get a controller's current configuration, run: + +``` text +juju controller-config +``` + +This will output a list of configuration keys and their values. This will include those that were set during controller creation, inherited as a default value, or dynamically set by Juju. + +> See more: [`juju controller-config`](/t/10237) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +The `python-libjuju` client does not support this. Please use the `juju` client. + +[/tab] +[/tabs] + +

Manage constraints for a controller

+> See also: [Constraint](/t/6184) + +[tabs] +[tab version="juju"] + +To manage constraints for the controller, manage them for the `controller` model or the `controller` application. + + + +[note type=information] +**If you want to set both types of constraints at the same time, and they are different:**
+You can. While the model-level constraints will apply to the entire `controller` model, the application-level constraints will make sure to override them for the `controller` application. +[/note] + + +> See more: +> - [How to manage constraints for a model](/t/1155#heading--manage-constraints-for-a-model) +> - [How to manage constraints for an application](/t/5476#heading--manage-constraints-for-an-application) + + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +The `python-libjuju` client does not support this. Please use the `juju` client. + +[/tab] +[/tabs] + +

Share a controller with other users

+> See also: [User](/t/6186) + +[tabs] +[tab version="juju"] + + +The procedure for how to share a controller with other users depends on whether your controller is private or public. + +**Share a private controller.** To share a private controller with other users: + +1. Create the users. + +> See more: [How to add a user](/t/1156#heading--add-a-user) + +2. Send the users the information they need to register your controller with their client and to set up their login information for the controller. + +> See more: [How to register a private controller ](/t/1111#heading--register-a-controller) + +**Share a public controller.** + +[TO BE ADDED] + +[/tab] + +[tab version="terraform juju"] + +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + +

Manage a controller's connection to the client

+ +[tabs] +[tab version="juju"] + +To add / remove details of a controller to / from your Juju client, you need to register / unregister the controller. + +- [Register a controller](#heading--register-a-controller) +- [Unregister a controller](#heading--unregister-a-controller) + + +

Register a controller

+ +[note type=information] +**If you are the creator of the controller:** You can skip this step. It only applies for cases where you are trying to connect to an external controller. +[/note] + +The procedure for how to register a controller with the local system varies slightly depending on whether the controller is private or public. + +**Register a private controller.** To register a private controller, use the `register` command followed by your unique registration key -- that is, copy-paste and run the line of code provided to you by the person who has added you to the controller via the `juju add-user` command. For example: + +```text +juju register MFATA3JvZDAnExMxMDQuMTU0LjQyLjQ0OjE3MDcwExAxMC4xMjguMC4yOjE3MDcwBCBEFCaXerhNImkKKabuX5ULWf2Bp4AzPNJEbXVWgraLrAA= + +``` + +This will start an interactive session prompting you to supply a local name for the controller as well as a username and a password for you as a new `juju` user on the controller. + +------- +[details=Example session] +Admin adding a new user 'alex' to the controller: + +```text +# Add a user named `alex`: +$ juju add-user alex +User "alex" added +Please send this command to alex: + juju register MFUTBGFsZXgwFRMTMTAuMTM2LjEzNi4xOToxNzA3MAQghBj6RLW5VgmCSWsAesRm5unETluNu1-FczN9oVfNGuYTFGxvY2FsaG9zdC1jb250cm9sbGVy + +"alex" has not been granted access to any models. You can use "juju grant" to grant access. +``` + +New user 'alex' accessing the controller: + +```text +$ juju register MFUTBGFsZXgwFRMTMTAuMTM2LjEzNi4xOToxNzA3MAQghBj6RLW5VgmCSWsAesRm5unETluNu1-FczN9oVfNGuYTFGxvY2FsaG9zdC1jb250cm9sbGVy +Enter a new password: ******** +Confirm password: ******** +Enter a name for this controller [localhost-controller]: localhost-controller +Initial password successfully set for alex. + +Welcome, alex. You are now logged into "localhost-controller". + +There are no models available. You can add models with +"juju add-model", or you can ask an administrator or owner +of a model to grant access to that model with "juju grant". + +``` +[/details] + +----- + +The command also has a flag that allows you to overwrite existing information, for cases where you need to reregister a controller. + +> See more: [`juju register`](/t/10160), [How to add a user](/t/1156#heading--add-a-user) + +**Register a public controller.** + +[note type=information] +**Network requirements:** The client must be able to connect to the controller API over port `17070`. Juju takes care of everything else. (And in most cases it takes care of this requirement too: for all clouds except for OpenStack Juju defaults to provisioning the controller with a public IP, and even for OpenStack you can choose to bootstrap with a floating IP as well.) +[/note] + + + +To register a public controller, use the `register` command followed by the DNS host name of the public controller. For example: + +```text +juju register public-controller.example.com +``` + +This will open a login window in your browser. + +By specifying various flags you can also use this to reregister a controller or to type in your login information in your terminal rather than the browser. + +> See more: [`juju register`](/t/10160) + + +

Unregister a controller

+ +To remove knowledge of the controller from the `juju` client, run the `unregister` command followed by the name of the controller. For example: + +```text +juju unregister localhost-controller-prod +``` + +Note that this does not destroy the controller (though, to regain access to it, you will have to re-register it). + +> See more: [`juju unregister`](/t/10165) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Make a controller highly available

+> See also: [High availability](/t/1066) + +[tabs] +[tab version="juju"] + +To make a controller highly available, use the `enable-ha` command: + +[note type=caution] +Currently only supported for controllers on a machine cloud. +[/note] + +```text +juju enable-ha +``` + +This will make sure that the number of controllers increases to the default minimum of 3. Sample output: + +```text +maintaining machines: 0 +adding machines: 1, 2 +``` + +Optionally, you can also mention a specific controller and also the number of controller machines you want to use for HA, among other things (e.g., constraints). + +[note type=information] +The number of controllers must be an odd number in order for a master to be "voted in" amongst its peers. A cluster with an even number of members will cause a random member to become inactive. This latter system will become a "hot standby" and automatically become active should some other member fail. Furthermore, due to limitations of the underlying database in an HA context, that number cannot exceed seven. All this means that a cluster can only have three, five, or seven **active** members. +[/note] + +---- + +If a controller is misbehaving, or if you've decided that you don't need as many controllers for HA after all, you can remove them. To remove a controller, remove its machine from the controller model via the `remove-machine` command. + + +[note type=information] +The `enable-ha` command cannot be used to remove machines from the cluster. +[/note] + +For example, below we remove controller 1 by removing machine 1 from the controller model: + +```text +juju remove-machine -m controller 1 +``` + +[note type=information] +If the removal of a controller will result in an **even** number of systems then one will act as a "hot standby".
+ If the removal of a controller will result in an **odd** number of systems then each one will actively participate in the cluster. +[/note] + + +> See more: [`juju enable-ha`](/t/10206) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Collect metrics about a controller

+ +Each controller provides an HTTPS endpoint to expose Prometheus metrics. To feed these metrics into Prometheus, configure Prometheus to scrape the controller's metrics. You can do that automatically via Juju relations or manually. + +- [Configure Prometheus automatically](#heading--configure-prometheus-automatically) +- [Configure Prometheus manually](#heading--configure-prometheus-manually) + +

Configure Prometheus automatically

+ +> Available starting with Juju 3.3. +> +> Whether your controller is on machines or Kubernetes, requires a Kubernetes cloud. (That is because the required Prometheus charm is only available for Kubernetes.) +> +> If you're on a Kubernetes cloud: While it is possible to deploy Prometheus directly on the controller model, it's always best to keep your observability setup on a different model (and ideally also a different controller and a different cloud region or cloud). + +To configure Prometheus to scrape the controller for metrics automatically, on a Kubernetes cloud add a model; on it, deploy `prometheus-k8s`, either directly or through the `cos-lite` bundle; offer `prometheus-k8s`' `metrics-endpoint` for cross-model relations; switch to the controller model and integrate the controller application with the offer; run `juju status --relations` to verify that the relation is up and running; and query Prometheus for your metric of interest. + + +--- +[details=Sample session] + +Assumes your controller application and Prometheus are on different models on the same Kubernetes cloud and that you are deploying Prometheus (`prometheus-k8s`) through the Canonical Observability Stack bundle (`cos-lite`). However, the logic would be entirely the same if they were on the same controller but different clouds (multi-cloud controller setup) or on different controllers on different clouds (except in some cases you may also have to explicitly grant access to the offer). + +```text + +$ juju add-model observability + +$ juju deploy cos-lite + +$ juju status -m cos-lite --watch 1s + +$ juju offer prometheus:metrics-endpoint + +$ juju switch controller + +$ juju integrate controller admin/cos-lite.prometheus + +$ juju status --relations + +# Query Prometheus: +# (where the bit before `9090` is the Prometheus unit's IP address +# and juju_apisever_request_duration_seconds` is an example metric) +$ curl 10.1.170.185:9090/api/v1/query?query=juju_apiserver_request_duration_seconds + +``` +[/details] + +------ + + +> See more: +> - [Charmhub | `juju-controller` > `metrics-endpoint | prometheus-scrape`](https://charmhub.io/juju-controller/integrations#metrics-endpoint) +> - [Charmhub | `juju-controller` > Endpoint `metrics-endpoint`: List of metrics](https://charmhub.io/juju-controller/docs/endpoint-metrics-endpoint-metrics) +> - [Charmhub | `prometheus-k8s` > `metrics-endpoint`](https://charmhub.io/prometheus-k8s/integrations#metrics-endpoint) +> - [Charmhub | `cos-lite`](https://charmhub.io/cos-lite) +> - [How to switch to a different model](/t/1155#heading--switch-to-a-different-model) +> - [How to add a cross-model relation](/t/1073#heading--add-a-cross-model-relation) + + +

Configure Prometheus manually

+ +> Useful if your Prometheus is outside of Juju. +> +> The Prometheus server must be able to contact the controller's API address/port `17070. (Juju controllers are usually set up to allow this automatically.) + +To configure Prometheus to scrape the controller for metrics manually: + +1. On the Juju side create a user for Prometheus and grant the user read access to the controller model (e.g., `juju add-user prometheus`, `juju change-user-password prometheus`, `juju grant prometheus read controller` -- where `prometheus` is just the name we've assigned to our Juju user for Prometheus). + +2. Either: On the Prometheus side, configure Prometheus to skip validation. Or: On the Juju side, configure the controller to store its CA certificate in a file that Prometheus can then use to verify the server’s certificate against (`juju controller-config ca-cert > /path/to/juju-ca.crt`). + +3. Add a scrape target to Prometheus by configure your `prometheus.yaml` with the following: + +[note type="caution"] +:warning: In the `username` field, the `user-` portion in front of the name we've assigned to the Juju user for Prometheus is required. +[/note] + +```text +scrape_configs: + job_name: juju + metrics_path: /introspection/metrics + scheme: https + static_configs: + targets: [':17070'] + basic_auth: + username: user- + password: + tls_config: + ca_file: /path/to/juju-ca.crt +``` + + + + + +

Back up a controller

+[tabs] +[tab version="juju"] + + +This section demonstrates the various steps involved in backing up a controller. + + +[note type=caution] +The procedure documented below is currently supported only for machine (non-Kubernetes) controllers. +[/note] + +- [Create a controller backup](#heading--create-a-controller-backup) +- [Download a controller backup](#heading--download-a-controller-backup) +- [Restore a controller from a backup](#heading--restore-a-controller-from-a-backup) + + +

Create a controller backup

+ +To create a backup of a controller configuration / metadata, use the `create-backup` followed by the `-m` flag and the name of the target controller model. For example, assuming a controller called `localhost-controller`, and the standard controller model name (`controller`), we will do: + +```text +juju create-backup -m localhost-controller:controller +``` +[note type=information] +Alternatively, you can switch to the controller model and use this command without any arguments or use the `-m` flag followed by just `controller`. However, due to the delicate nature of data backups, the verbose but explicit method demonstrated above is highly recommended. +[/note] + + +Sample output: + +```text +backup format version: 1 +juju version: 3.0.0 +base: ubuntu@22.04 + +controller UUID: ca60f7e9-647b-4460-8232-fe75749e17c7 +model UUID: a04d7604-3073-45b7-871f-030ac0360fb4 +machine ID: 0 +created on host: juju-360fb4-0 + +checksum: BrOGsXIK375529xlXJHX7m23Amk= +checksum format: SHA-1, base64 encoded +size (B): 114919198 +stored: 0001-01-01 00:00:00 +0000 UTC +started: 2022-11-09 09:06:46.800165238 +0000 UTC +finished: 2022-11-09 09:07:05.133077079 +0000 UTC + +notes: + +Downloaded to juju-backup-20221109-090646.tar.gz +``` + + +The backup is downloaded to a default location on your computer (e.g., `/home/user`). A backup of a fresh (empty) environment, regardless of cloud type, is approximately 75 MiB in size. + +The `create-backup` command also allows you to specify a custom filename for the backup file (`--filename `). Note: You can technically also choose to save the backup on the controller (`--no-download`), but starting with `juju v.3.0` this flag is deprecated. + +> See more: [`juju create-backup`](/t/10197) + + +

Download a controller backup

+ +Suppose you've created a backup with the `--no-download` option, as shown below (where `controller` is the name of the controller model). + +[note type=caution] +Starting with `juju v.3.0`, this flag is deprecated. +[/note] + +```text +$ juju create-backup -m controller --no-download +WARNING --no-download flag is DEPRECATED. + +backup format version: 1 +juju version: 3.0.0 +base: ubuntu@22.04 + +controller UUID: ca60f7e9-647b-4460-8232-fe75749e17c7 +model UUID: a04d7604-3073-45b7-871f-030ac0360fb4 +machine ID: 0 +created on host: juju-360fb4-0 + +checksum: tjqEvlspc88mYQmjV9u/m4i+prg= +checksum format: SHA-1, base64 encoded +size (B): 114919131 +stored: 0001-01-01 00:00:00 +0000 UTC +started: 2022-11-09 09:08:51.314128218 +0000 UTC +finished: 2022-11-09 09:09:10.296320799 +0000 UTC + +notes: + +Remote backup stored on the controller as /tmp/juju-backup-20221109-090851.tar.gz +``` + +As you can see from the output, this has resulted in the backup being saved remotely on the controller as `/tmp/juju-backup-20221109-090851.tar.gz`. + +To download the backup, use the `download-backup` command followed by the remote location of the backup. In our case: + +```text +juju download-backup /tmp/juju-backup-20221109-090851.tar.gz +``` + +This will output the name of the downloaded backup file. In our case: + +```text +juju-backup-20221109-090851.tar.gz +``` + +This file will have been downloaded to a temporary location (in our case, `/home/user`). + +> See more: [`juju download-backup`](/t/10240) + + +

Restore a controller from a backup

+ + +To restore a controller from a backup, you can use the [stand-alone `juju-restore` tool](https://github.com/juju/juju-restore). + + +First, download the `juju-restore` tool and copy it to the target controller's `ha-primary` machine (typically, machine 0). To identify the primary controller machine, you can use the `juju show-controller` -- its output will list all the machines and the primary will contain `ha-primary: true`: + +```text +juju show-controller +... + controller-machines: + "0": + instance-id: i-073443a840f1a3626 + ha-status: ha-enabled + ha-primary: true + "1": + instance-id: i-0be2c1b818e54a2ba + ha-status: ha-enabled + "2": + instance-id: i-0b4705ede7d3c0faa + ha-status: ha-enabled +... +``` + +Then you can copy the restore tool: + +```text +# Download the latest release binary (Linux, AMD64): +wget https://github.com/juju/juju-restore/releases/latest/download/juju-restore +chmod +x juju-restore + +# Switch to the controller model: +juju switch controller + +# Copy juju-restore to the primary controller machine: +juju scp juju-restore 0: +``` + +Second, assuming that during the `create-backup` step you chose to save a local copy (the default option), use `scp` to copy the file to the same controller machine, as shown below. +```text +juju scp 0: +``` + +[note type=information] +If you've used `create-download` with the `--no-download` option, you can skip this step -- the backup is already on the primary controller machine. +[/note] + +Now, SSH into this machine and run `./juju-restore` followed by the path to the backup file, as shown below. All replica set nodes need to be healthy and in `PRIMARY` or `SECONDARY` state. + + +```text +# SSH into the controller machine +juju ssh 0 + +# Start the restore! (it will ask for confirmation) +./juju-restore +``` + + +The `juju-restore` tool also provides several options, among which: + +* `--yes`: answer "yes" to confirmation prompts (for non-interactive mode) +* `--include-status-history`: restore the status history collection for machines and units (which can be large, and usually isn't needed) +* `--username`, `--password`, and related options: override the defaults for connecting to MongoDB +* `--allow-downgrade`: restore from a backup created with an earlier `juju` version +* `--manual-agent-control`: (in the case of restoring backups to [high availability controllers](/t/1066#heading--controller-high-availability)) stop and restart `juju` agents and Mongo daemons on the secondary controller machines manually +* `--copy-controller`: clone the configuration of an old controller into a new controller (download the latest `juju-restore` to see this option). + +For the full list of options, type: `./juju-restore --help` + +> See more: [`juju-restore`](https://github.com/juju/juju-restore) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + + + +

Upgrade a controller

+ +[tabs] +[tab version="juju"] + +The procedure depends on whether you're upgrading your controller's patch version (e.g. 2.9.25 → 2.9.48) or rather its minor or major version (e.g., 3.1 -> 3.4 or 2.9 → 3.0). + +- [Upgrade your controller's patch version](#heading--upgrade-your-controllers-patch-version) +- [Upgrade your controller's minor or major) version](#heading--upgrade-your-controllers-minor-or-major-version) + +

Upgrade your controller's patch version

+ +To upgrade your controller's patch version, on the target controller, use the `juju upgrade-controller` command with the `--agent-version` flag followed by the desired patch version (of the same major and minor): + +```text +juju upgrade-controller --agent-version +``` + +For example, assuming a controller version `3.0.0`, to upgrade to `3.0.2`: + +```text +juju upgrade-controller --agent-version 3.0.2 +``` + + +

Upgrade your controller's minor or major version

+ + +It is not possible to upgrade a controller's minor or major version. + + Instead, you should +- use a client upgraded to the desired version to bootstrap a new controller of that version; +- recreate your old controller's configuration (settings, users, clouds, and models -- for machine clouds you can use the `juju create-backup` command and the standalone `juju-restore` tool) in the new controller; +- migrate your models from the old controller to the new (`juju migrate`) and upgrade them to match the new controller's version; +- help your users connect to the new controller (run `juju change-user-password`, then send them the new registration string). + +[note type=information] +See [How to upgrade your deployment](/t/7530) for a full demo! +[/note] + +> See more: +> - [How to back up a controller > Create a backup, Restore from a backup](/t/1111#heading--back-up-a-controller) +> - [How to migrate a workload model to another controller](/t/1155#heading--migrate-a-workload-model-to-another-controller) +> - [How to manage a user's login details](/t/1156#heading--manager-a-users-login-details) + + + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + + +

Remove a controller

+> See also: [Removing things](/t/1063) + +[tabs] +[tab version="juju"] + +There are two ways to remove a controller. Below we demonstrate each, in order of severity. + + +- [Destroy a controller](#heading--destroy-a-controller) +- [Kill a controller](#heading--kill-a-controller) + +[note type=information] +For how to remove *knowledge* about a controller from a `juju` client, see [Unregister a controller](#heading--unregister-a-controller). +[/note] + + +

Destroy a controller

+ +A controller can be destroyed with: + +`juju destroy-controller ` + +You will always be prompted to confirm this action. Use the `-y` option to override this. + +As a safety measure, if there are any models (besides the 'controller' model) associated with the controller you will need to pass the `--destroy-all-models` option. + +Additionally, if there is persistent [storage](/t/using-juju-storage/1079) in any of the controller's models you will be prompted to either destroy or release the storage, using the `--destroy-storage` or `--release-storage` options respectively. + +For example: + +```bash +juju destroy-controller -y --destroy-all-models --destroy-storage aws +``` + +[note type=information] +Any model in the controller that has disabled commands will block a controller +from being destroyed. A controller administrator is able to enable all the commands across all the models in a Juju controller so that the controller can be destroyed if desired. This can be done via the [`enable-destroy-controller`](/t/1717) command: `juju enable-destroy-controller`. +[/note] + +> See more: [`juju destroy-controller`](/t/10113) + + +Use the `kill-controller` command as a last resort if the controller is not accessible for some reason. + + +

Kill a controller

+ +The `kill-controller` command deserves some attention as it is very destructive and also has exceptional behaviour modes. This command will first attempt to remove a controller and its models in an orderly fashion. That is, it will behave like `destroy-controller`. If this fails, usually due the controller itself being unreachable, then the controller machine and the workload machines will be destroyed by having the client contact the backing cloud's API directly. + +> See more: [`juju kill-controller`](/t/10233) + +[/tab] + + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + + +
+ + +> **Contributors:** @cderici, @hmlanigan, @pmatulis, @tmihoc + +------------------------- + +elias1884 | 2019-09-27 22:46:14 UTC | #2 + +As the juju controller occupies one machine all by itself and if redundancy is neede (which most certainly always is) 3 full machines are "lost" to juju, it would be beneficial to learn, how to conserve resources in different scenarios. + +E.g., if I have a MAAS cloud with multiple regions, do I need a dedicated juju controller in each region, or can I make one work for all regions combined - makes quite a difference. + +Also important for this scenario is, if juju controller only manages state and is therefore bandwidth extensive or if it acts as a sort of proxy for packages installed also, which would make the scenario above greatly depend on the bandwidth of the interconnection between regions. + +------------------------- + +timClicks | 2019-10-01 05:11:29 UTC | #3 + +[quote="elias1884, post:2, topic:1111"] +As the juju controller occupies one machine all by itself and if redundancy is neede (which most certainly always is) 3 full machines are “lost” to juju, it would be beneficial to learn, how to conserve resources in different scenarios. +[/quote] + +One option is to use JAAS, which is a hosted controller provided for the Juju community. At this stage, no billing is associated with it. + +------------------------- + +tartley | 2021-12-03 15:17:29 UTC | #4 + +I propose adding a short para to this page showing how to switch the active controller. Unlike the other paragraphs in this page, I don't think it needs to include enough content that it's worth linking to another page. We should just put the content here. Something like the following (Caveat: I'm just figuring this out myself, so feel free to override if this is inaccurate or a bad idea): + +**Switch active controller** + +If you have more than one controller, commands will act on the one that is currently active. `juju switch` will output the name of the active controller in the format `:/`, eg. "`mycontroller:admin/default`". You can pass in a controller name to set the currently active controller, eg: `juju switch mycontroller`. The given controller name can be qualified with a user and model name, eg: `juju switch mycontroller:admin/default`. Discover the available controller, user and model names using `juju controllers`. + +------------------------- + +pedroleaoc | 2022-04-07 08:35:28 UTC | #5 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:49 UTC | #6 + + + +------------------------- + diff --git a/tmp/t/1112.md b/tmp/t/1112.md new file mode 100644 index 000000000..630113e5d --- /dev/null +++ b/tmp/t/1112.md @@ -0,0 +1,426 @@ +system | 2024-07-25 10:24:00 UTC | #1 + +> See also: [Credential](/t/6006) + +This document shows how to manage credentials in Juju. + +**Contents:** + +- [Add a credential](#heading--add-a-credential) +- [View all the known credentials](#heading--view-all-the-known-credentials) +- [View details about a credential](#heading--view-details-about-a-credential) +- [Set the default credential](#heading--set-the-default-credential) +- [Add a credential to a model](#heading--add-a-credential-to-a-model) +- [Update a credential](#heading--update-a-credential) +- [Remove a credential](#heading--remove-a-credential) + + + +

Add a credential

+ +> See also: [Credential definition](/t/6006#heading--credential-definition), [List of supported clouds > \ > CREDENTIAL](/t/6665) + +[tabs] +[tab version="juju"] + +The procedure for how to add a cloud credential to Juju depends on whether the cloud is a machine (traditional, non-Kubernetes) cloud or rather a Kubernetes cloud. + +- [Add a credential for a machine cloud](#heading--add-a-credential-for-a-machine-cloud) +- [Add a credential for a Kubernetes cloud](#heading--add-a-credential-for-a-kubernetes-cloud) + +

Add a credential for a machine cloud

+ +[note type=positive] + **If your cloud is a local LXD cloud and if you are a Juju admin user:** +Your cloud credential is set up and retrieved automatically for you, so you can skip this step. Run `juju credentials` to confirm. (If you are not a Juju admin user, run `autoload-credentials`.) +[/note] + +**1.** Choose a cloud authentication type and collect the information required for that type from your cloud account. + +[note type=caution] +The authentication types and the information needed for each type depend on your chosen cloud. Run `juju show-cloud` or consult the cloud Reference doc to find out. + +> See more: [List of supported clouds](/t/6665) +[/note] + +**2.** Provide this information to Juju. You may do so in three ways -- interactively, by specifying a YAML file, or automatically, by having Juju check your local YAML files or environment variables. + +[note type=caution] +In general, we recommend the interactive method -- the latter two are both error-prone, and the last one is not available for all clouds. +[/note] + + +**2a.** To add a credential interactively, run the `add-credential` command followed by the name of your machine cloud. For example: + +```text +juju add-credential aws +``` + +This will start an interactive session where you’ll be asked to choose a cloud region (if applicable), specify a credential name (you can pick any name you want), and then provide the credential information (e.g., access key, etc.) + +The command also offers various flags that you can use to provide all this information in one go (e.g., the path to a YAML file containing the credential definition) as an alternative to the interactive session. + +> See more: [`juju add-credential`](/t/10136) + + +**2b.** To add a credential by specifying a YAML file, use your credential information to prepare a `credentials.yaml` file, then run the `add-credential` command with the `-f` flag followed by the path to this file. + +> See more: [`juju add-credential -f`](/t/10136) + + +**2c.** To add a credential automatically, use your credential information to prepare a `credentials.yaml` file / environment variables, then run the `autoload-credentials` command: + +```text +juju autoload-credentials +``` + +Juju will scan your local credentials files / environment variables / rc files and, if it detects something suitable for the present cloud, it will display a prompt asking you to confirm the addition of the credential and to specify a name for it. + +The command also allows you to restrict the search to a specific cloud, a specific controller, etc. + +> See more: [`juju autoload-credentials`](/t/10230) + +

Add a credential for a Kubernetes cloud

+ +For a Kubernetes cloud, credential definitions are added automatically when you add the cloud definition to Juju. Run `juju credentials` to verify. + +> See more: [How to add a Kubernetes cloud](/t/1100#heading--add-a-kubernetes-cloud) + +[/tab] + +[tab version="terraform juju"] + +To add a credential, in your Terraform plan create a resource of the `juju_credential` type, specifying the credential's name, cloud, authentication type, and the attributes associated with the authentication type. + + +```text +resource "juju_credential" "this" { + name = "creddev" + + cloud { + name = "localhost" + } + + auth_type = "certificate" + + attributes = { + client-cert = "/srv/cert.crt" + client-key = "/srv/cert.key" + trust-password = "S0m3P@$$w0rd" + } +} +``` + +> See more: [`juju_credential` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/credential) +[/tab] + +[tab version="python libjuju"] +To add a credential, you may use the `Controller.add_credential()` method on a connected `Controller` object. `add_credential` is an upsert method (where it inserts if the given credential is new, and updates if the given credential name already exists). + +```python +from juju.client import client as jujuclient + +my_controller.add_credential("my-credential", + jujuclient.CloudCredential(auth_type="jsonfile", attrs={'file':'path_to_cred_file'}) + +``` + +> See more: [`add_credential` (method)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.add_credential) + +[/tab] +[/tabs] + +

View all the known credentials

+ +[tabs] +[tab version="juju"] + +To see a list of all the known credentials, run the `credentials` command: + +```text +juju credentials +``` + +This should output something similar to this: + +```text +Controller Credentials: +Cloud Credentials +lxd localhost* + +Client Credentials: +Cloud Credentials +aws bob*, carol +google wayne +``` + +where the asterisk denotes the default credential for a given cloud. + +By passing various flags, you can also choose to view just the credentials known to the client, or just those for a particular controller; you can select a different output format or an output file (and also choose to include secrets); etc. + +> See more: [`juju credentials`](/t/10054) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

View details about a credential

+ +[tabs] +[tab version="juju"] + +You can view details about all your credentials at once or just about a specific credential. + +**All credentials.** To view details about all your credentials at once, run the `show-credential` command with no argument: + +```text +juju show-credential +``` + +By passing various flags you can filter by controller, select an output format or an output file, etc. + +> See more: [`juju show-credential`](/t/1822) + + +**A specific credential.** To view details about just one specific credential, run the `show-credential` command followed by the name of the cloud and the name of the credential. For example: + +```text +juju show-credential mycloud mycredential +``` + +By passing various flags you can specify an output format or an output file, display secret attributes, etc. + +> See more: [`juju show-credential`](/t/10105) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Set the default credential

+ +[tabs] +[tab version="juju"] + +**Set.** To set the default credential for a cloud on the current client, run the `default-credential` command followed by the name of the cloud and the name of the credential. For example: + +```text +juju default-credential aws carol +``` + +> See more: [`juju default-credential`](/t/6330) + +**Get.** To view the currrently set default credential for a cloud, run the `default-credential` command followed by the name of the cloud. For example: + +```text +juju default-credential aws +``` +This should display the default credential. + + + +> See more: [`juju default-credential`](/t/10055) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Add a credential to a model

+ +[note type=caution] +You can only do this if you are a controller admin or a model owner. +[/note] + +[tabs] +[tab version="juju"] + +To add a controller credential to a model, run the `set-credential` command followed by a flag for the intended model, the host cloud, and the name of the credential. For example: + +```text +juju set-credential -m trinity aws bob +``` + +[note type=information] +If the credential is only known to the client, this will first upload it to the controller and then relate it to the model. +[/note] + +[note type=positive] +This command does not affect any existing relations between the credential and other models. If the credential is already related to a single model, this operation will just cause the credential to be related to two models. +[/note] + +> See more: [`juju set-credential`](/t/10169) + +[/tab] + +[tab version="terraform juju"] +To add a controller credential to a model, in your Terraform plan, specify it as an attribute to the model definition. For example: + +```text +resource "juju_model" "this" { + name = "development" + + cloud { + name = "aws" + region = "eu-west-1" + } + + credential = juju_credential..name +} +``` + + + +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Update a credential

+ +[tabs] +[tab version="juju"] + +To update a credential, run the `update-credential` command followed by the name of the cloud and the name of the credential. For example: + +```text +juju update-credential mycloud mycredential +``` + +This will start an interactive session where you will be asked to specify various parameters for the update. + +By passing various flags, you can also perform this operation in-line. And by dropping the credential (and the cloud) argument and passing a flag with a credential YAMl file, you can also update all your credentials at once. + +> See more: [`juju update-credential`](/t/10065) + +[/tab] + +[tab version="terraform juju"] +To update a credential, in your Terraform plan, update its resource definition. + +> See more: [Resource `juju_credential`](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/credential) +[/tab] + +[tab version="python libjuju"] +To update a credential, you may use the `Controller.add_credential()` method on a connected `Controller` object. `add_credential` is an upsert method (where it inserts if the given credential is new, and updates if the given credential name already exists). + +```python +from juju.client import client as jujuclient + +my_controller.add_credential("my-credential", + jujuclient.CloudCredential(auth_type="jsonfile", attrs={'file':'path_to_cred_file'}) + +``` + +> See more: [`add_credential` (method)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.add_credential) +[/tab] +[/tabs] + +

Remove a credential

+ +[tabs] +[tab version="juju"] + +To remove a credential, run the `remove-credential` command followed by the name of the cloud and the name of the credential. For example: + +``` text +juju remove-credential mycloud mycredential +``` + +This will start an interactive session where you will be asked to choose whether to apply this operation for the client or a specific controller or both. You can bypass this by using the client and controller flags in-line. + +> See more: [`juju remove-credential`](/t/10201) + +[/tab] + +[tab version="terraform juju"] + +To remove a credential, remove its resource definition from your Terraform plan. + +> See more: [Resource `juju_credential`](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/credential) +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] +[/tabs] + + +
+ +> **Contributors:** @cderici, @erik-lonroth , @pedroleaoc, @pmatulis, @timclicks, @tmihoc, @wallyworld + +------------------------- + +erik-lonroth | 2019-10-26 07:14:12 UTC | #2 + +I'm learning more about credentials and curious how Juju manages remote credentials. This is an important topic for security reasons. + +Its a good place to explain how end-user credentials are managed in a remote location, for example on the public jaas-infrastructure but more generally also in a controller context. + +It would be good also to provide information on how to **remove a remote credential**, since that is missing (does it exist?) from the documentation here: https://jaas.ai/docs/credentials#heading--updating-remote-credentials + +@timClicks @rick_h + +------------------------- + +pmatulis | 2019-10-26 13:42:35 UTC | #3 + +Here maybe? + +https://jaas.ai/docs/t/tutorial-managing-credentials/1289 + +------------------------- + +ppasotti | 2022-01-21 16:48:38 UTC | #4 + +I stumbled upon an error with `openstack-integrator` trying to fetch a 'project_name' field in the openstack credentials. +After a lot of digging I found out that it's a synonym of a 'tenant-name' field that can be passed through the yaml spec (not sure about the interactive prompt). + +To help towards documenting this, I propose adding "tenant-name: frodo" to the yaml example above, although I'm not sure whether the idea is 'document all fields' or 'document the necessary fields only', in which case I rest my... case. + +------------------------- + +pedroleaoc | 2022-04-07 08:33:17 UTC | #5 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:35 UTC | #6 + + + +------------------------- + +tmihoc | 2024-02-20 11:56:17 UTC | #7 + +We should document the full crdentials.yaml schema in https://discourse.charmhub.io/t/credential/6006#heading--file-credentials-yaml , then the specifics for OpenStack in https://discourse.charmhub.io/t/openstack-and-juju/1097 . + +------------------------- + diff --git a/tmp/t/11125.md b/tmp/t/11125.md new file mode 100644 index 000000000..1934b3b2f --- /dev/null +++ b/tmp/t/11125.md @@ -0,0 +1,126 @@ +tmihoc | 2024-09-10 08:09:46 UTC | #1 + +Charms should have tests to verify that they are functioning correctly. This document describes some of the various types of testing you may want to consider -- their meaning, recommended coverage, and recommended tooling in the context of a charm. + + + + +## Unit testing + +> See also: [How to write a unit test for a charm with Harness](/t/4461), [How to write a unit test for a charm with Scenario](/t/10585) + +A **unit test** is a test that targets an individual unit of code (function, method, class, etc.) independently. In the context of a charm, it refers to testing charm code against mock Juju APIs and mocked-out workloads as a way to validate isolated behaviour without external interactions. + +Unit tests are intended to be isolating and fast to complete. These are the tests you would run every time before committing code changes. + +**Coverage.** Unit testing a charm should cover: + +- how relation data is modified as a result of an event +- what pebble services are running as a result of an event +- which configuration files are written and their contents, as a result of an event + +**Tools.** Unit testing a charm can be done using: + +- [`pytest`](https://pytest.org/) and/or [`unittest`](https://docs.python.org/3/library/unittest.html) and +- [`ops.testing.Harness`](https://operator-framework.readthedocs.io/en/latest/#module-ops.testing) and/or [`ops-scenario`](/t/10583) + + + + + +**Examples.** + +- [https://github.com/canonical/prometheus-k8s-operator/blob/main/tests/unit/test_charm.py](https://github.com/canonical/prometheus-k8s-operator/blob/main/tests/unit/test_charm.py) + +## Interface testing + +In the context of a charm, interface tests help validate charm library behavior without individual charm code against mock Juju APIs. For more information, see [Interface tests](https://juju.is/docs/sdk/interface-tests). + + + + +## Integration testing +> See also: [How to write integration tests for a charm](/t/12734) + +An **integration test** is a test that targets multiple software components in interaction. In the context of a charm, it checks that the charm operates as expected when Juju-deployed by a user in a test model in a real controller. + +Integration tests should be focused on a single charm. Sometimes an integration test requires multiple charms to be deployed for adequate testing, but ideally integration tests should not become end-to-end tests. + +Integration tests typically take significantly longer to run than unit tests. + +**Coverage.** + +* Charm actions +* Charm integrations +* Charm configurations +* That the workload is up and running, and responsive +* Upgrade sequence + * Regression test: upgrade stable/candidate/beta/edge from charmhub with the locally-built charm. + + +[note type=caution] +When writing an integration test, it is not sufficient to simply check that Juju reports that running the action was successful; rather, additional checks need to be executed to ensure that whatever the action was intended to achieve worked. +[/note] + +**Tools.** + +- [`pytest`](https://pytest.org/) and/or [`unittest`](https://docs.python.org/3/library/unittest.html) and +- [pytest-operator](https://github.com/charmed-kubernetes/pytest-operator) and/or [`zaza`](https://github.com/openstack-charmers/zaza) + + +**Examples.** + +- [https://github.com/canonical/prometheus-k8s-operator/blob/main/tests/integration/test_charm.py](https://github.com/canonical/prometheus-k8s-operator/blob/main/tests/integration/test_charm.py) + + + + + + +------------------------- + +tmihoc | 2023-07-12 10:41:34 UTC | #2 + + + +------------------------- + diff --git a/tmp/t/11144.md b/tmp/t/11144.md new file mode 100644 index 000000000..0207f78d1 --- /dev/null +++ b/tmp/t/11144.md @@ -0,0 +1,52 @@ +system | 2024-09-16 15:55:13 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Add a new secret. + +## Usage +```juju add-secret [options] [key[#base64|#file]=value...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--file` | | a YAML file containing secret key values | +| `--info` | | the secret description | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju add-secret my-apitoken token=34ae35facd4 + juju add-secret my-secret key#base64=AA== + juju add-secret my-secret key#file=/path/to/file another-key=s3cret + juju add-secret db-password \ + --info "my database password" \ + data#base64=s3cret== + juju add-secret db-password \ + --info "my database password" \ + --file=/path/to/file + + +## Details + +Add a secret with a list of key values. + +If a key has the '#base64' suffix, the value is already in base64 format and no +encoding will be performed, otherwise the value will be base64 encoded +prior to being stored. + +If a key has the '#file' suffix, the value is read from the corresponding file. + +A secret is owned by the model, meaning only the model admin +can manage it, ie grant/revoke access, update, remove etc. + + +--- + +------------------------- + diff --git a/tmp/t/11148.md b/tmp/t/11148.md new file mode 100644 index 000000000..b4977b443 --- /dev/null +++ b/tmp/t/11148.md @@ -0,0 +1,45 @@ +tmihoc | 2024-02-05 15:41:37 UTC | #1 + + + +> See first: [Juju | Status](https://juju.is/docs/juju/status) +> +> See also: [How to set a charm's status](/t/11771) + +In charm development, status refers to the workload status of the application or individual units of the application as it responds to events. Each status consists of a status value and a message. Juju will show the application and unit statuses in the `juju status` output. + +**Contents:** + +- [Possible statuses](#heading--possible-statuses) + +

Possible statuses

+ +The possible status values are listed below, along with a link to their [`ops.StatusBase`](https://ops.readthedocs.io/en/latest/#ops.StatusBase) subclass for use in `ops`. They are listed in order from highest to lowest priority, where in case of multiple statuses the higher priority status is what gets surfaced to the user: + +* error: the unit is in error, likely from a hook failure (this status is set by Juju, not by the charm) +* [`blocked`](https://ops.readthedocs.io/en/latest/#ops.BlockedStatus): the unit requires manual intervention from the Juju user, as specified by the charm author (see status notes or the charm's docs) +* [`maintenance`](https://ops.readthedocs.io/en/latest/#ops.MaintenanceStatus): the unit is performing maintenance tasks to get up and running +* [`waiting`](https://ops.readthedocs.io/en/latest/#ops.WaitingStatus): the unit is waiting on an application it's integrated with +* [`active`](https://ops.readthedocs.io/en/latest/#ops.ActiveStatus): the unit is ready and offering the services it has been designed to offer +* [`unknown`](https://ops.readthedocs.io/en/latest/#ops.UnknownStatus): the initial, unknown status when the unit has not set its status yet + +------------------------- + +bartz | 2024-02-05 13:16:42 UTC | #2 + +@tmihoc Can the `blocked` status be explained a bit more? It seems its purpose is not well understood (see also https://discourse.charmhub.io/t/its-probably-ok-for-a-unit-to-go-into-error-state/13022/14). What is manual intervention? This can be anything (like ssh'ing into the unit and fixing something, or just running basic juju commands like relate or config) and can also be applied to an error state. I would expect the charm to go into `BlockedStatus` if we know (and can indicate in the message) exactly what the admin can do using juju’s cli (like relate, config). cc @jameinel + +------------------------- + +tmihoc | 2024-02-05 14:35:48 UTC | #3 + +@bartz My understanding is that the necessary manual intervention differs from charm to charm and, as such, ought to be clarified by the charm authors (e.g., you shouldn't just see the charm go into `blocked` -- there should also be a prompt telling you what you need to do next). I will double-check this and, if corroborated, update the doc to say that more explicitly. + +------------------------- + +jameinel | 2024-02-07 17:02:28 UTC | #4 + +Certainly the intent is that Blocked is meant to be "something isn't right, here is the information I can give you". Usually that is "I need to be related to a database" or "my configuration has no possible resolution because you have conflicting values". + +------------------------- + diff --git a/tmp/t/11181.md b/tmp/t/11181.md new file mode 100644 index 000000000..bd73d9808 --- /dev/null +++ b/tmp/t/11181.md @@ -0,0 +1,59 @@ +barrettj12 | 2024-09-16 15:56:04 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [wait-for model](#wait-for model), [wait-for machine](#wait-for machine), [wait-for unit](#wait-for unit) + +## Summary +Wait for an application to reach a specified state. + +## Usage +```juju wait-for application [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--query` | life=="alive" && status=="active" | query the goal state | +| `--summary` | true | output a summary of the application query on exit | +| `--timeout` | 10m0s | how long to wait, before timing out | + +## Examples + +Waits for 4 units to be present. + + juju wait-for application ubuntu --query='len(units) == 4' + +Waits for all the application units to start with ubuntu and to be created +and available. + + juju wait-for application ubuntu --query='forEach(units, unit => unit.life=="alive" && unit.status=="available" && startsWith(unit.name, "ubuntu"))' + + +## Details + +The wait-for application command waits for the application to reach a goal +state. The goal state can be defined programmatically using the query DSL +(domain specific language). The default query for an application just waits +for the application to be created and active. + +The wait-for command is an optimized alternative to the status command for +determining programmatically if a goal state has been reached. The wait-for +command streams delta changes from the underlying database, unlike the status +command which performs a full query of the database. + +The application query DSL can be used to programmatically define the goal state +for machines and units within the scope of the application. This can +be achieved by using lambda expressions to iterate over the machines and units +associated with the application. Multiple expressions can be combined to define +a complex goal state. + + +--- + +------------------------- + diff --git a/tmp/t/11182.md b/tmp/t/11182.md new file mode 100644 index 000000000..4c38f22af --- /dev/null +++ b/tmp/t/11182.md @@ -0,0 +1,62 @@ +barrettj12 | 2024-09-16 15:57:47 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [wait-for application](#wait-for application), [wait-for machine](#wait-for machine), [wait-for unit](#wait-for unit) + +## Summary +Wait for a model to reach a specified state. + +## Usage +```juju wait-for model [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `--query` | life=="alive" && status=="available" | query the goal state | +| `--summary` | true | output a summary of the application query on exit | +| `--timeout` | 10m0s | how long to wait, before timing out | + +## Examples + +Waits for all the model units to start with ubuntu. + + juju wait-for model default --query='forEach(units, unit => startsWith(unit.name, "ubuntu"))' + +Waits for all the model applications to be active. + + juju wait-for model default --query='forEach(applications, app => app.status == "active")' + +Waits for the model to be created and available and for all the model +applications to be active. + + juju wait-for model default --query='life=="alive" && status=="available" && forEach(applications, app => app.status == "active")' + + +## Details + +The wait-for model command waits for the model to reach a goal state. The goal +state can be defined programmatically using the query DSL (domain specific +language). The default query for a model just waits for the model to be +created and available. + +The wait-for command is an optimized alternative to the status command for +determining programmatically if a goal state has been reached. The wait-for +command streams delta changes from the underlying database, unlike the status +command which performs a full query of the database. + +The model query DSL can be used to programmatically define the goal state +for applications, machines and units within the scope of the model. This can +be achieved by using lambda expressions to iterate over the applications, +machines and units within the model. Multiple expressions can be combined to +define a complex goal state. + + +--- + +------------------------- + diff --git a/tmp/t/11183.md b/tmp/t/11183.md new file mode 100644 index 000000000..6b2e5f4ed --- /dev/null +++ b/tmp/t/11183.md @@ -0,0 +1,50 @@ +barrettj12 | 2024-09-16 15:52:03 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [wait-for model](#wait-for model), [wait-for application](#wait-for application), [wait-for unit](#wait-for unit) + +## Summary +Wait for a machine to reach a specified state. + +## Usage +```juju wait-for machine [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--query` | life=="alive" && status=="started" | query the goal state | +| `--summary` | true | output a summary of the application query on exit | +| `--timeout` | 10m0s | how long to wait, before timing out | + +## Examples + +Waits for a machine to be created and started. + + juju wait-for machine 0 --query='life=="alive" && status=="started"' + + +## Details + +The wait-for machine command waits for a machine to reach a goal state. +The goal state can be defined programmatically using the query DSL +(domain specific language). The default query for a machine just waits for the +machine to be created and started. + +The wait-for command is an optimized alternative to the status command for +determining programmatically if a goal state has been reached. The wait-for +command streams delta changes from the underlying database, unlike the status +command which performs a full query of the database. + +Multiple expressions can be combined to define a complex goal state. + + +--- + +------------------------- + diff --git a/tmp/t/11184.md b/tmp/t/11184.md new file mode 100644 index 000000000..699e7aacc --- /dev/null +++ b/tmp/t/11184.md @@ -0,0 +1,57 @@ +barrettj12 | 2024-09-16 15:57:01 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + +> See also: [wait-for model](#wait-for model), [wait-for application](#wait-for application), [wait-for machine](#wait-for machine) + +## Summary +Wait for a unit to reach a specified state. + +## Usage +```juju wait-for unit [options] []``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--query` | life=="alive" && workload-status=="active" | query the goal state | +| `--summary` | true | output a summary of the application query on exit | +| `--timeout` | 10m0s | how long to wait, before timing out | + +## Examples + +Waits for a units to be machines to be length of 1. + + juju wait-for unit ubuntu/0 --query='len(machines) == 1' + +Waits for the unit to be created and active. + + juju wait-for unit ubuntu/0 --query='life=="alive" && workload-status=="active"' + + +## Details + +The wait-for unit command waits for the unit to reach a goal state. The goal +state can be defined programmatically using the query DSL (domain specific +language). The default query for a unit just waits for the unit to be created +and active. + +The wait-for command is an optimized alternative to the status command for +determining programmatically if a goal state has been reached. The wait-for +command streams delta changes from the underlying database, unlike the status +command which performs a full query of the database. + +The unit query DSL can be used to programmatically define the goal state +for machine within the scope of the unit. This can be achieved by using lambda +expressions to iterate over the machines associated with the unit. Multiple +expressions can be combined to define a complex goal state. + + +--- + +------------------------- + diff --git a/tmp/t/112.md b/tmp/t/112.md new file mode 100644 index 000000000..68ec7ac51 --- /dev/null +++ b/tmp/t/112.md @@ -0,0 +1,132 @@ +thumper | 2022-08-12 14:15:04 UTC | #1 + +> See also: [Agent introspection: juju_machine_lock](/t/116) + +A new log file was introduced in 2.3.9 and 2.4.2. The purpose of this log file is to give more visibility to who has been holding the machine lock. + +The machine lock is used to serialize a number of activities of the agents on the machines started by Juju. + +The machine agent will acquire the lock when it needs to install software to create containers, and also in some other instances. + +The unit agents acquire the machine lock whenever they are going to execute hooks or run actions. Sometimes when there are multiple units on a given machine it is not always clear as to why something isn't happening as soon as you'd normally expect. This log file is to help give you insight into the actions of the agents. + +This sample of output was taken from machine 0 in a hadoop-kafka deployment: + +```text +2018-08-01 23:08:28 === agent unit-namenode-0 started === +2018-08-01 23:08:30 unit-namenode-0: meterstatus (meter-status-changed), waited 0s, held 0s +2018-08-01 23:15:50 unit-namenode-0: uniter (run install hook), waited 0s, held 7m20s +2018-08-01 23:08:29 === agent unit-resourcemanager-0 started === +2018-08-01 23:16:12 unit-resourcemanager-0: uniter (run install hook), waited 7m19s, held 22s +2018-08-01 23:16:14 unit-namenode-0: uniter (run leader-elected hook), waited 22s, held 2s +2018-08-01 23:16:16 unit-resourcemanager-0: uniter (run leader-elected hook), waited 2s, held 2s +2018-08-01 23:16:17 unit-namenode-0: uniter (run config-changed hook), waited 2s, held 2s +2018-08-01 23:16:19 unit-resourcemanager-0: uniter (run config-changed hook), waited 2s, held 2s +2018-08-01 23:16:21 unit-namenode-0: uniter (run start hook), waited 2s, held 2s +2018-08-01 23:16:23 unit-resourcemanager-0: uniter (run start hook), waited 2s, held 2s +2018-08-01 23:16:26 unit-namenode-0: uniter (run relation-joined (2; slave/0) hook), waited 2s, held 3s +2018-08-01 23:16:28 unit-resourcemanager-0: uniter (run relation-joined (1; namenode/0) hook), waited 3s, held 2s +2018-08-01 23:16:22 === agent unit-rsyslog-forwarder-ha-7 started === +2018-08-01 23:16:38 unit-rsyslog-forwarder-ha-7: uniter (run install hook), waited 4s, held 10s +2018-08-01 23:16:22 === agent unit-ganglia-node-7 started === +2018-08-01 23:16:43 unit-ganglia-node-7: uniter (run install hook), waited 13s, held 5s +2018-08-01 23:16:24 === agent unit-ganglia-node-8 started === +2018-08-01 23:16:45 unit-ganglia-node-8: uniter (run install hook), waited 17s, held 2s +2018-08-01 23:16:47 unit-resourcemanager-0: uniter (run relation-changed (1; namenode/0) hook), waited 17s, held 2s +2018-08-01 23:16:50 unit-namenode-0: uniter (run relation-joined (1; resourcemanager/0) hook), waited 21s, held 3s +2018-08-01 23:16:52 unit-resourcemanager-0: uniter (run relation-joined (3; slave/0) hook), waited 4s, held 2s +2018-08-01 23:16:24 === agent unit-rsyslog-forwarder-ha-8 started === +2018-08-01 23:16:54 unit-rsyslog-forwarder-ha-8: uniter (run install hook), waited 27s, held 2s +2018-08-01 23:16:54 unit-rsyslog-forwarder-ha-7: uniter (run leader-settings-changed hook), waited 17s, held 0s +2018-08-01 23:16:55 unit-ganglia-node-7: uniter (run leader-settings-changed hook), waited 12s, held 0s +2018-08-01 23:16:55 unit-ganglia-node-8: uniter (run leader-settings-changed hook), waited 10s, held 0s +2018-08-01 23:18:20 unit-resourcemanager-0: uniter (run relation-changed (1; namenode/0) hook), waited 3s, held 1m25s +2018-08-01 23:18:23 unit-namenode-0: uniter (run relation-changed (1; resourcemanager/0) hook), waited 1m30s, held 3s +2018-08-01 23:18:25 unit-ganglia-node-7: uniter (run config-changed hook), waited 1m29s, held 2s +2018-08-01 23:18:27 unit-ganglia-node-8: uniter (run config-changed hook), waited 1m30s, held 2s +2018-08-01 23:18:29 unit-ganglia-node-7: uniter (run start hook), waited 2s, held 1s +2018-08-01 23:18:29 unit-rsyslog-forwarder-ha-7: uniter (run config-changed hook), waited 1m35s, held 0s +2018-08-01 23:18:29 unit-rsyslog-forwarder-ha-8: uniter (run leader-settings-changed hook), waited 1m35s, held 0s +2018-08-01 23:18:30 unit-rsyslog-forwarder-ha-8: uniter (run config-changed hook), waited 0s, held 0s +2018-08-01 23:18:32 unit-resourcemanager-0: uniter (run relation-changed (3; slave/0) hook), waited 10s, held 2s +2018-08-01 23:18:34 unit-ganglia-node-8: uniter (run start hook), waited 5s, held 1s +2018-08-01 23:18:38 unit-namenode-0: uniter (run relation-joined (4; plugin/0) hook), waited 10s, held 4s +2018-08-01 23:18:39 unit-ganglia-node-7: uniter (run relation-joined (11; namenode/0) hook), waited 9s, held 1s +2018-08-01 23:18:39 unit-rsyslog-forwarder-ha-7: uniter (run start hook), waited 10s, held 0s +2018-08-01 23:18:39 unit-rsyslog-forwarder-ha-8: uniter (run start hook), waited 10s, held 0s +2018-08-01 23:18:43 unit-resourcemanager-0: uniter (run relation-joined (5; plugin/0) hook), waited 8s, held 3s +2018-08-01 23:18:45 unit-ganglia-node-8: uniter (run relation-joined (12; resourcemanager/0) hook), waited 10s, held 2s +2018-08-01 23:18:49 unit-namenode-0: uniter (run relation-changed (2; slave/0) hook), waited 8s, held 4s +2018-08-01 23:18:49 unit-ganglia-node-7: uniter (run relation-changed (11; namenode/0) hook), waited 10s, held 0s +2018-08-01 23:18:49 unit-rsyslog-forwarder-ha-7: uniter (run relation-joined (22; rsyslog/0) hook), waited 10s, held 0s +2018-08-01 23:18:50 unit-rsyslog-forwarder-ha-8: uniter (run relation-joined (22; rsyslog/0) hook), waited 10s, held 0s +2018-08-01 23:18:50 unit-ganglia-node-8: uniter (run relation-changed (12; resourcemanager/0) hook), waited 5s, held 0s +2018-08-01 23:18:50 unit-rsyslog-forwarder-ha-8: uniter (run relation-changed (22; rsyslog/0) hook), waited 0s, held 0s +2018-08-01 23:18:52 unit-ganglia-node-8: uniter (run relation-joined (16; ganglia/0) hook), waited 0s, held 1s +2018-08-01 23:18:53 unit-ganglia-node-7: uniter (run relation-joined (16; ganglia/0) hook), waited 3s, held 1s +2018-08-01 23:18:57 unit-resourcemanager-0: uniter (run relation-joined (3; slave/1) hook), waited 10s, held 4s +2018-08-01 23:18:57 unit-rsyslog-forwarder-ha-7: uniter (run relation-changed (22; rsyslog/0) hook), waited 7s, held 0s +2018-08-01 23:19:00 unit-namenode-0: uniter (run relation-changed (4; plugin/0) hook), waited 8s, held 4s +2018-08-01 23:19:04 unit-resourcemanager-0: uniter (run relation-changed (3; slave/1) hook), waited 4s, held 3s +2018-08-01 23:19:04 unit-rsyslog-forwarder-ha-7: uniter (run relation-joined (17; namenode/0) hook), waited 7s, held 0s +2018-08-01 23:19:04 unit-rsyslog-forwarder-ha-8: uniter (run relation-joined (18; resourcemanager/0) hook), waited 14s, held 0s +2018-08-01 23:19:09 unit-namenode-0: uniter (run relation-joined (4; plugin/1) hook), waited 4s, held 4s +2018-08-01 23:19:10 unit-ganglia-node-8: uniter (run relation-changed (16; ganglia/0) hook), waited 17s, held 1s +2018-08-01 23:19:10 unit-rsyslog-forwarder-ha-8: uniter (run relation-changed (18; resourcemanager/0) hook), waited 6s, held 0s +2018-08-01 23:19:12 unit-ganglia-node-7: uniter (run relation-changed (16; ganglia/0) hook), waited 18s, held 1s +2018-08-01 23:19:12 unit-rsyslog-forwarder-ha-7: uniter (run relation-changed (17; namenode/0) hook), waited 8s, held 0s +2018-08-01 23:19:16 unit-resourcemanager-0: uniter (run relation-joined (3; slave/2) hook), waited 9s, held 4s +2018-08-01 23:19:21 unit-namenode-0: uniter (run relation-joined (2; slave/1) hook), waited 8s, held 4s +2018-08-01 23:19:25 unit-resourcemanager-0: uniter (run relation-changed (3; slave/2) hook), waited 5s, held 4s +2018-08-01 23:19:29 unit-namenode-0: uniter (run relation-changed (2; slave/1) hook), waited 4s, held 4s +2018-08-01 23:19:33 unit-resourcemanager-0: uniter (run relation-changed (5; plugin/0) hook), waited 5s, held 4s +2018-08-01 23:19:38 unit-namenode-0: uniter (run relation-joined (2; slave/2) hook), waited 4s, held 5s +2018-08-01 23:19:42 unit-resourcemanager-0: uniter (run relation-changed (1; namenode/0) hook), waited 5s, held 4s +2018-08-01 23:19:47 unit-namenode-0: uniter (run relation-changed (2; slave/2) hook), waited 4s, held 5s +2018-08-01 23:19:51 unit-resourcemanager-0: uniter (run relation-joined (5; plugin/1) hook), waited 5s, held 4s +2018-08-01 23:19:56 unit-namenode-0: uniter (run relation-changed (4; plugin/1) hook), waited 5s, held 5s +2018-08-01 23:20:01 unit-resourcemanager-0: uniter (run relation-changed (1; namenode/0) hook), waited 5s, held 4s +2018-08-01 23:20:05 unit-resourcemanager-0: uniter (run relation-changed (5; plugin/1) hook), waited 0s, held 4s +2018-08-01 23:20:05 unit-namenode-0: meterstatus (meter-status-changed), waited 4s, held 0s +2018-08-01 23:20:05 unit-resourcemanager-0: meterstatus (meter-status-changed), waited 4s, held 0s +2018-08-01 23:20:32 unit-rsyslog-forwarder-ha-7: uniter (run update-status hook), waited 0s, held 0s +2018-08-01 23:20:52 unit-ganglia-node-8: uniter (run update-status hook), waited 0s, held 2s +2018-08-01 23:20:52 unit-rsyslog-forwarder-ha-8: uniter (run update-status hook), waited 2s, held 0s +2018-08-01 23:21:19 unit-ganglia-node-7: uniter (run update-status hook), waited 0s, held 2s +2018-08-01 23:22:28 unit-namenode-0: uniter (run update-status hook), waited 0s, held 8s +2018-08-01 23:22:32 unit-resourcemanager-0: uniter (run update-status hook), waited 7s, held 4s +``` + +There are a number of points of interest here to point out. + +The times that the agents are started is recorded and written out to the file, but they are not actually written out to the log file until the agent writes its first entry. Each of the entries is written just before the release of the machine lock. You can see below here that the **`unit-resourcemanager-0`** agent started just one second after the **`unit-namenode-0`** agent, but the output of the line doesn't appear in time order. This is due to there being multiple processes wanting to write to a single file, so the file is only written to while the machine lock is held, and we don't want to stop an agent starting by waiting to acquire the lock just to write out that the agent has started. + +```text +2018-08-01 23:08:28 === agent unit-namenode-0 started === +2018-08-01 23:08:30 unit-namenode-0: meterstatus (meter-status-changed), waited 0s, held 0s +2018-08-01 23:15:50 unit-namenode-0: uniter (run install hook), waited 0s, held 7m20s +2018-08-01 23:08:29 === agent unit-resourcemanager-0 started === +2018-08-01 23:16:12 unit-resourcemanager-0: uniter (run install hook), waited 7m19s, held 22s +``` + +Additionally normal line includes: +* a timestamp in UTC +* the agent name +* the worker inside that agent, and what it is acquiring the hook for +* how long the worker waited for the lock to be acquired +* how long the lock was held for + +------------------------- + +pedroleaoc | 2022-04-07 09:25:44 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:41 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/11285.md b/tmp/t/11285.md new file mode 100644 index 000000000..9aa71894b --- /dev/null +++ b/tmp/t/11285.md @@ -0,0 +1,40 @@ +tmihoc | 2024-04-26 13:56:06 UTC | #1 + +In Juju, **deploying** refers to the process where Juju uses a [charm](/t/5457) (from Charmhub or a local path) to install an [application](/t/5471) on a resource from a [cloud](/t/5454). + + +## Deploying on a Kubernetes cloud + +### The process + +![JujuOnKubernetesDeployProcess|690x311](upload://4MzLfQku8H9gB3GTlGww4Jqud3d.png) + +### The result + +Note: This diagram assumes a typical scenario with a single workload container (depending on the charm, there may be more and there may be none). + +![JujuOnKubernetesDeployResult|690x704](upload://6oWYcP95EAD5gW99GhN96cVXapH.jpeg) + + +## Deploying on a machine cloud + +### The process + +![JujuOnMachinesDeployProcess|689x700](upload://bgOc4jpj7YoQ6Wfp4zfaQM8QELk.png) + +### The result + +![JujuOnMachinesDeployResult|622x1000](upload://qPaCb6Xlu0zZoGvlaXukncOVd92.jpeg) + +
*Deploying an application on a machine cloud: The result. This diagram assumes a typical scenario where the unit is deployed on a new machine of its own. (Note: The machine, model, unit, and controller agent are actually all part of the same [`jujud`](/t/7319) process and refer in fact to trees of workers with machine, model, unit and, respectively, controller responsibility.)* + +[note type=information] +**If you're curious about deployments to a *system container* on a VM:** + +On most machine clouds, Juju makes it possible to deploy to a system container *inside* the machine rather to the machine directly. The result doesn't change much: In terms of the diagram above, the only difference would be another box in between the "Regular Model Machine" and its contents and another machine agent for this box, as Juju treats system containers as regular machines. + +> See more: [Machine > Machines and system (LXD) containers](/t/5459#heading--machines-and-system-lxd-containers) +[/note] + +------------------------- + diff --git a/tmp/t/11290.md b/tmp/t/11290.md new file mode 100644 index 000000000..cbdf20528 --- /dev/null +++ b/tmp/t/11290.md @@ -0,0 +1,35 @@ +system | 2024-09-16 15:55:58 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Grant access to a secret. + +## Usage +```juju grant-secret [options] | [,...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju grant-secret my-secret ubuntu-k8s + juju grant-secret 9m4e2mr0ui3e8a215n4g ubuntu-k8s,prometheus-k8s + + +## Details + +Grant applications access to view the value of a specified secret. + + +--- + +------------------------- + diff --git a/tmp/t/11291.md b/tmp/t/11291.md new file mode 100644 index 000000000..3c498a018 --- /dev/null +++ b/tmp/t/11291.md @@ -0,0 +1,35 @@ +system | 2024-09-16 15:55:03 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Revoke access to a secret. + +## Usage +```juju revoke-secret [options] | [,...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-B`, `--no-browser-login` | false | Do not use web browser for authentication | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | + +## Examples + + juju revoke-secret my-secret ubuntu-k8s + juju revoke-secret 9m4e2mr0ui3e8a215n4g ubuntu-k8s,prometheus-k8s + + +## Details + +Revoke applications' access to view the value of a specified secret. + + +--- + +------------------------- + diff --git a/tmp/t/11298.md b/tmp/t/11298.md new file mode 100644 index 000000000..841b04d56 --- /dev/null +++ b/tmp/t/11298.md @@ -0,0 +1,6 @@ +tmihoc | 2024-10-29 13:35:50 UTC | #1 + +In a Juju [deployment](/t/11285), a **worker** is a process that a Juju [agent](/t/5466) runs in the background on a Juju entity ([controller](/t/5455), [model](/t/5456), [machine](/5459/), [unit](/t/5460), etc.) and which has responsibilities related to that entity. + +------------------------- + diff --git a/tmp/t/11312.md b/tmp/t/11312.md new file mode 100644 index 000000000..f0c2af3e7 --- /dev/null +++ b/tmp/t/11312.md @@ -0,0 +1,138 @@ +tmihoc | 2024-04-05 12:00:39 UTC | #1 + +> See also: [SDK | Resource](https://juju.is/docs/sdk/about-resources) +> +> See also: [How to manage resources](/t/11313) + +In Juju, a **charm resource** is additional content that a [charm](/t/5457) can make use of, or may require, to run. + +Resources are used where a charm author needs to include large blobs (perhaps a database, media file, or otherwise) that may not need to be updated with the same cadence as the charm or workload itself. By keeping resources separate, they can control the lifecycle of these elements more carefully, and in some situations avoid the need for repeatedly downloading large files from Charmhub during routine upgrades/maintenance. + + +A resource can have one of two basic types -- `file` and `oci-image`. These can be specified as follows: + +1. If the resource is type 'file', you can specify it by providing + + a. the resource revision number or + + b. a path to a local file. + +2. If the resource is type 'oci-image', you can specify it by providing + + a. the resource revision number, + + b. a path to a local file = private OCI image, + + c. a link to a public OCI image. + +If you choose to provide a path to a local file, the file can be a JSON or a YAML file with an image reference and optionally a username and a password (i.e., an OCI image resource). + +---- +[details=Expand to view an example JSON file] +```txt +{ + "ImageName": "my.private.repo.com/a/b:latest", + "username": "harry", + "password": "supersecretpassword" +} +``` +[/details] +-------- + +[details=Expand to view an example YAML file] +``` +registrypath: my.private.repo.com/a/b:latest +username: harry +password: supersecretpassword +[/details] +------------- + +------------------------- + +samuel_allan | 2024-02-15 06:23:18 UTC | #2 + +[quote="tmihoc, post:1, topic:11312"] +--- +Expand to view an example JSON file + +``` +{ + "ImageName": "my.private.repo.com/a/b:latest", + "username": "harry", + "password": "supersecretpassword" +} +``` + +--- +Expand to view an example YAML file + +``` +registrypath: my.private.repo.com/a/b:latest +username: harry +password: supersecretpassword +``` + +--- +[/quote] + +What is the current spec for the image? `ImageName` or `registrypath`? + +------------------------- + +tmihoc | 2024-02-15 06:45:21 UTC | #3 + +@samuel_allan I've asked @wallyworld and he'll investigate. One of us will be in touch. + +------------------------- + +wallyworld | 2024-02-16 08:00:21 UTC | #4 + +To confirm the syntax for the different options... + +The simplest is when using a public oci image - just specify the image path + +`juju deploy some-charm --resource myimge=docker.io/myimage:latest` + +docker.io is the default repo, so you can also just do + +`juju deploy some-charm --resource myimge=myimage:latest` + +If you need to provide a credential because it's a private image, that's when you would want to supply a YAML or JSON file with the necessary details. + +The deploy command becomes + +`juju deploy some-charm --resource myimage=/path/to/file` + +Example YAML file + +``` +registrypath: docker.io/myorg/myimage +username: docker-registry +password: hunter2 +``` + +Note: if you include credential information, you'll need to specify the full image path including domain. + +Unfortunately, there seems to be a typo in the JSON tag, so if you're using a JSON file the image path will need to use the key `ImageName`. + +There's also other more advanced auth schemes that can be used, eg token auth, you can specify values for +- `email` +- `identitytoken` +- `registrytoken` + +Other registry / auth specific keys include (see your registry's documentation for more detail on what's needed): +- `serveraddress` +- `region` +- `repository` + +eg +``` + "serveraddress": "66668888.dkr.ecr.eu-west-1.amazonaws.com", + "username": "aws_access_key_id", + "repository": "66668888.dkr.ecr.eu-west-1.amazonaws.com", + "password": "aws_secret_access_key", + "region": "ap-southeast-2" +``` + +------------------------- + diff --git a/tmp/t/11313.md b/tmp/t/11313.md new file mode 100644 index 000000000..21cfc616a --- /dev/null +++ b/tmp/t/11313.md @@ -0,0 +1,208 @@ +tmihoc | 2024-06-26 16:55:09 UTC | #1 + +> See also: [Resource (charm)](/t/11312) + +When you deploy / update an application from a charm, that automatically deploys / updates any charm resources, using the defaults specified by the charm author. However, you can also specify resources manually (e.g., to try a resource released only to `edge` or to specify a non-Charmhub resource). This document shows you how. + +**Contents:** + +- [Find out the resources available for a charm](#heading--find-out-the-resources-available-for-a-charm) +- [Specify the resources to be deployed with a charm](#heading--specify-the-resources-to-be-deployed-with-a-charm) +- [View the resources deployed with a charm](#heading--view-the-resources-deployed-with-a-charm) + + +

Find out the resources available for a charm

+ +[tabs] +[tab version="juju"] + +To find out what resources are available for a charm on Charmhub, run the `charm-resources` command followed by the name of the charm: + +```text +juju charm-resources +``` +--- +[details=Expand to view a sample output for the 'postgresql-k8s' charm] +```text +$ juju charm-resources postgresql-k8s +Resource Revision +postgresql-image 68 +``` +[/details] +--- + +The command has flags that allow you to specify a charm channel, an output format, an output file, etc. + +> See more: [`juju charm-resources`](/t/10099) + +Alternatively, you can also consider a resource available somewhere else online (e.g., a link to an OCI image) or in your local filesystem. + +[/tab] + +[tab version="terraform juju"] + +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] +To find out what resources are available for a charm on Charmhub, on a connected Model object, select the `charmhub` object associated with the model, and use the `list_resources()` method, passing the name of the charm as an argument. For example: + +```python +await model.charmhub.list_resources('postgresql-k8s') +``` + +> See more: [`charmhub (property)`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.charmhub), [Model (module)](https://pythonlibjuju.readthedocs.io/en/latest/narrative/model.html) + +[/tab] +[/tabs] + +

Specify the resources to be deployed with a charm

+ +[tabs] +[tab version="juju"] + + +How you specify a resource to deploy with a charm depends on whether you want to do this during deployment or, as an update, post-deployment. + + +- To specify a resource during deployment, run the `deploy` command with the `--resources` flag followed by a key-value pair consisting of the resource name and the resource: + +```text +juju deploy --resources = +``` + +> See more: [`juju deploy`](/t/10074) + +- To specify a resource after deployment, run the `attach-resource` command followed by the name of the deployed charm (= [application](/t/5471)) and a key-value pair consisting of the resource name and the resource revision number of the local path to the resource file: + +```text +juju attach-resource = +``` + +Regardless of the case, the resource name is always as defined by the charm author (see the Resources tab of the charm homepage on Charmhub or the `resources` map in the `metadata.yaml` file of the charm) and the resource is the resource revision number, a path to a local file, or a link to a public OCI image (only for OCI-image type resources). + +---- +[details=Expand to view an example where the resource is specified post-deployment by revision number] +```text +juju attach-resource juju-qa-test foo-file=3 +``` +[/details] +---- + +- To update a resource's revision, run the `refresh` command with the `--resource` flag followed by a key=value pair denoting the name of the resource and its revision number or the local path to the resource file. + +> See more: [`juju deploy ... --resources`](/t/10074), [`juju attach-resource`](/t/10124), [`juju refresh ... --resources`](/t/10189) + +[/tab] + +[tab version="terraform juju"] + +To specify the resource(s) to be deployed with your charm, in your Terraform plan, in the definition of the resource for the application specify a `resources` block with key-value pairs listing resource names and their revision number. For example: + +```text +resource "juju_application" "application_one" { + name = "my-application" + model = juju_model.testmodel.name + + charm { + name = "juju-qa-test" + channel = "2.0/edge" + } + resources = { + "foo-file" = 4 + } +} +``` + + +[note type=information] +About `charm > revision` and `resources` and their counterparts in the `juju client`: +- If you specify only `charm > revision`: This is equivalent to `juju deploy --revision` or `juju refresh --revision` -- that is, the resource revision is automatically the latest. +- If you specify only `resources`: This is equivalent to `juju attach-resource` -- that is, the resource revision is whatever you've specified. + +**Note:** While `juju refresh --resource` allows you to update a resource even if no update is available for the charm, this is not possible with `terraform juju`. +[/note] + +> See more: [`juju_application > resources`](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/application#resources) + +[/tab] + +[tab version="python libjuju"] +To specify a resource during deployment, on a connected Model object, use the `deploy` method, passing the resources as a parameter. For example: + +```python +resources = {"file-res": "test.file"} +app = await model.deploy(charm_path, resources=resources) +``` + +To update a resource after deployment by uploading file from local disk, on an Application object, use the `attach_resource()` method, passing resource name, file name and the file object as parameters. + +```python +with open(str(charm_path / 'test.file')) as f: + app.attach_resource('file-res', 'test.file', f) +``` + + +> See more: [`deploy()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.deploy), [`attach_resource()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.application.html#juju.application.Application.attach_resource), [Model (module)](https://pythonlibjuju.readthedocs.io/en/latest/narrative/model.html) + +[/tab] +[/tabs] + + +

View the resources deployed with a charm

+ +[tabs] +[tab version="juju"] + + +To view the resources that have been deployed with a charm, run the `resources` command followed by the name of the corresponding application / ID of one of the application's units. + +```text +juju resources / +``` + +> See more: [`juju resources`](/t/10218) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +To view the resources that have been deployed with a charm, on an Application object, use the `get_resources()` method. For example: + +```python +await my_app.get_resources() +``` + + +> See more: [`get_resources()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.application.html#juju.application.Application.get_resources), [Model (module)](https://pythonlibjuju.readthedocs.io/en/latest/narrative/model.html) +[/tab] +[/tabs] + +
+ +> **Contributors:** @cderici, @hmlanigan, @tmihoc + +------------------------- + +guimalufb | 2023-08-21 12:23:24 UTC | #2 + +[quote="tmihoc, post:1, topic:11313"] +`juju attach-resource =` +[/quote] + +`charm name` placeholder is missing, it should be + +`juju attach-resource =` + +------------------------- + +tmihoc | 2023-08-21 12:49:20 UTC | #3 + +Fixed, thanks! + +------------------------- + diff --git a/tmp/t/11351.md b/tmp/t/11351.md new file mode 100644 index 000000000..81a94e478 --- /dev/null +++ b/tmp/t/11351.md @@ -0,0 +1,563 @@ +tmihoc | 2024-07-10 09:21:35 UTC | #1 + +This document shows various ways in which you may interact with a charm or a bundle. + +**Contents:** + +- [Query Charmhub for available charms / bundles](#heading--query-charmhub-for-available-charms---bundles) +- [View details about a Charmhub charm / bundle](#heading--view-details-about-a-charmhub-charm---bundle) +- [Find out the resources available for a charm](#heading--find-out-the-resources-available-for-a-charm) +- [Download a Charmhub charm](#heading--download-a-charmhub-charm) +- [Deploy a charm / bundle](#heading--deploy-a-charm---bundle) +- [Update a charm](#heading--update-a-charm) +- [Remove a charm / bundle](#heading--remove-a-charm---bundle) + +

Query Charmhub for available charms / bundles

+ +[tabs] +[tab version="juju"] + +To query Charmhub for the charms / bundles that deliver a given application, run the `find` command followed by a suitable keyword. For example, to find out the charms and/or bundles that deliver WordPress: + +```text +juju find wordpress +``` + + +> See more: [`juju find`](/t/10187) + +[/tab] + +[tab version="terraform juju"] + +[/tab] + +[tab version="python libjuju"] + +To query Charmhub for the charms / bundles on python-libjuju, you can use the `find` method on the `CharmHub` object that's built-in on each `Model` object: + +```python +await model.charmhub.find('wordpress') +``` + +[/tab] +[/tabs] + + +

View details about a Charmhub charm / bundle

+ + +[tabs] +[tab version="juju"] + +To view details about a particular Charmhub charm / bundle, run the `info` command followed by the name of the charm / bundle. For example: + +```text +juju info postgresql +``` +> See more: [`juju info`](/t/10103) + +[note type=caution] +For comprehensive information about the charm, including charm documentation, it is always best to see the charm's page on Charmhub. +[/note] + +[/tab] + +[tab version="terraform juju"] + +[/tab] + +[tab version="python libjuju"] + +To view details about a particular Charmhub charm / bundle on python-libjuju, you can use the `info` method on the `CharmHub` object that's built-in on each `Model` object: + +```python +await model.charmhub.info('wordpress') +``` + +[/tab] +[/tabs] + +

Find out the resources available for a charm

+ +> See: [How to manage charm resources > Find out the resources available for a charm](/t/11313#heading--find-out-the-resources-available-for-a-charm) + + +

Download a Charmhub charm

+ + +[tabs] +[tab version="juju"] + +[note type=information] +This is relevant for air-gapped deployments. +[/note] + +To download a Charmhub charm, run the `download` command followed by the name of the charm. For example: + +```text +juju download postgresql +``` + +> See more: [`juju download`](/t/10134) + +[/tab] + +[tab version="terraform juju"] + +[/tab] + +[tab version="python libjuju"] + +Python-libjuju doesn't support this, please use the `juju` client to download a Charmhub charm. + +[/tab] +[/tabs] + + +

Deploy a charm / bundle

+ +[tabs] +[tab version="juju"] + +To deploy a charm / bundle from [Charmhub](https://charmhub.io/) / your local filesystem, use the `deploy` command followed by the name of the charm / bundle / the path to the local `.charm` / `.yaml` file: + + +``` text +juju deploy | +``` + +----------------- + +[details=Example: Deploy a Charmhub charm] +```text +juju deploy mysql +``` +[/details] + +----------- + +[details=Example: Deploy a Charmhub bundle] + +[note type=positive] +To get a summary of the deployment steps (without actually deploying), add the `--dry-run` flag. Note: This flag is only supported for bundles, not charms. +[/note] + +```text +juju deploy kubeflow +``` +[/details] + +----------- +[details=Example: Deploy a local charm] + +```text +juju deploy ./mini_ubuntu-20.04-amd64.charm +``` +[/details] + +------- + +[details=Example: Deploy a local charm with a resource] + +If your charm's `metadata.yaml` specifies a [resource](/t/11312), you must also explicitly pass the resource. For example: + +```text +juju deploy ./demo-api-charm_ubuntu-22.04-amd64.charm --resource \ + demo-server-image=ghcr.io/beliaev-maksim/api_demo_server:0.0.9 +``` + +[/details] + +---- + +[details=Example: Deploy a local bundle] +```text +juju deploy ./mediawiki-model-bundle.yaml +``` +[/details] + +----- +[details=Example: Deploy a local bundle as an overlay] + +To deploy a local bundle as an overlay, run the `deploy` command with the `--overlay` flag followed by the path to the overlay. To add an overlay to a model later, export the contents of the model to a bundle and deploy that bundle with the overlay. + +> See more: [How to compare and export the contents of a model to a bundle](/t/1155#heading--compare-and-export-the-contents-of-a-model-to-a-bundle) + +[details=Generic example] + +Suppose you want to deploy `mediawiki` and also apply an overlay bundle called `custom-wikimedia.yaml`. Run the `deploy` command followed by `mediawiki` and the `--overlay` flag followed by the local path to your overlay bundle `yaml`: + +``` text +juju deploy mediawiki \ + --overlay ./custom-mediawiki.yaml +``` + +Suppose now that have a model where you've already deployed `mediawiki`. You've also made some other changes in your model. Finally, you'd like to apply an overlay bundle `custom-mediawiki.yaml`. In that case: + +1. Export the contents of your model to a bundle (below, `mediawiki-bundle.yaml`): + +```text +juju export-bundle --filename mediawiki-model-bundle.yaml +``` + +2. Deploy the new bundle and during deploy apply the overlay: + +```text +juju deploy ./mediawiki-model-bundle.yaml \ + --overlay ./custom-mediawiki.yaml +``` + +[/details] + +[details=OpenStack example] + +Suppose you want to deploy an OpenStack cloud. This is done by deploying a base bundle defining the cloud with an overlay bundle, to make the bundle deployable within the local environment, and -- optionally -- any other number of bundles, to override / add parameters in / to the existing bundle, e.g., storage or constraints. Run the `deploy` command followed by the base bundle and then repeat the `--overlay` flag followed by the path to the overlay for as many overlays as you want. For example, below we deploy an OpenStack Yoga cloud running on Focal nodes (our base bundle), ensure it can run in a MAAS environment (the first, mandatory, overlay) and that it has Shared filesystem services (the second overlay): + +``` text +juju deploy ./bundle-focal-yoga.yaml \ + --overlay ./overlay-focal-yoga-mymaas.yaml + --overlay ./overlay-focal-yoga-mymaas-shared-filesystem.yaml +``` + +Suppose now that have a model where you've already deployed all of the above. You've maybe also made some other changes in your model. And you'd like to add manual zone Swift services by applying another overlay. + +1. Export the contents of your model to a bundle (below, `exported-bundle-focal-yoga-2022-06-07.yaml`): + +``` text +juju export-bundle --filename exported-bundle-focal-yoga-2022-06-07.yaml +``` + +2. Deploy the new bundle and during deploy apply the overlay: + +``` text +juju deploy ./exported-bundle-focal-yoga-2022-06-07.yaml \ + --overlay ./overlay-focal-yoga-mymaas-manual-swift.yaml +``` +[/details] +[/details] +---- +[details=Example: Deploy a bundle to existing machines] + +To have a bundle use a model's existing machines, as opposed to creating new machines, the `--map-machines=existing` option is used. In addition, to specify particular machines for the mapping, comma-separated values of the form 'bundle-id=existing-id' can be passed where the bundle-id and the existing-id refer to top level machine IDs. + +For example, consider a bundle whose YAML file is configured with machines 1, 2, 3, and 4, and a model containing machines 1, 2, 3, 4, and 5. The following deployment would use existing machines 1 and 2 for bundle machines 1 and 2 but use existing machine 4 for bundle machine 3 and existing machine 5 for bundle machine 4: + +``` text +juju deploy some-bundle --map-machines=existing,3=4,4=5 +``` +[/details] +---- + + +Depending on the cloud substrate that your controller is running on, the above command will allocate a machine (physical, virtual, LXD container) or a Kubernetes pod and then proceed to deploy the contents of the charm / bundle. + +[note type="positive"] +Depending on your use case, you may alternatively opt to provision a set of machines in advance via the `juju add-machine` command. + +In this case, when running the above `juju deploy` command, Juju will detect that the model contains machines with no applications assigned to them and automatically deploy the application to one of those machines instead of spinning up a new machine. +[/note] + +The command also allows you to add another argument to specify a custom name (alias) for your deployed application (charms only). You can also take advantage of the rich set of flags to specify a charm channel or revision, a machine base, a machine constraint (e.g., availability zone), the number of application units you want (clusterised), a space binding, a placement directive (e.g., to deploy to a LXD container), a specific storage instance, a specific machine, etc., and even to trust the application with the current credential -- in case the application requires access to the backing cloud in order to fulfil its purpose (e.g., stojrage-related tasks). + +[note type=information status="Troubleshooting"] + +When deploying, if Juju fails to provision a subset of machines for some reason (e.g. machine quota limits on the cloud provider) the command [`juju retry-provisioning`](t/command-retry-provisioning/10209) can be used to retry the provisioning of specific machine numbers. + +[/note] + +---- + +[details=Expand to view examples of using a placement directive to deploy to specific targets] +> See also: [Placement directive](/t/6187) + +```text +# Deploy to a new lxd-type container on new machine: +juju deploy mariadb --to lxd + +# Deploy to a new container on existing machine 25: +juju deploy mongodb --to lxd:25 + +# Deploy to existing lxd-type container 3 on existing machine 24: +juju deploy nginx --to 24/lxd/3 + +# Deploy to zone us-east-1a on AWS: +juju deploy mysql --to zone=us-east-1a + +# Dploy to a specific machine on MAAS: +juju deploy mediawiki --to node1.maas + +# Deploy to a specific machine on LXD: +juju deploy mariadb --to node1.lxd + +For a Kubernetes-backed cloud, a Kubernetes node can be targeted based on matching labels. The label can be either built-in or one that is user-defined and added to the node. For example: + +# Deploy to a specific Kubernetes node (using either a built-in or a user-defined label): +juju deploy mariadb-k8s --to kubernetes.io/hostname=somehost + +``` + +[/details] + +----- + +> See more: [`juju deploy`](/t/10074) + +[/tab] + +[tab version="terraform juju"] + +[note type=information] +The Terraform Provider for Juju does not support deploying a local charm. +[/note] + +To deploy a Charmhub charm, in your Terraform plan add a `juju_application` resource, specifying the target model and the charm: + +```terraform +resource "juju_application" "this" { + model = juju_model.development.name + + charm { + name = "hello-kubecon" + } +} +``` + +> See more: [`juju_application` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/application#schema) + + +[/tab] + +[tab version="python libjuju"] + +To deploy a Charmhub charm / bundle using python-libjuju, you can use the `deploy` method on the `Model` object: + + +```python +m = model.Model() +await m.connect() + +# deploy a charm +await m.deploy('mysql') + +# deploy a bundle +await m.deploy('kubeflow') + +# deploy a local charm +await m.deploy('./mini_ubuntu-20.04-amd64.charm') + +# deploy a local charm with a resource +await m.deploy('./demo-api-charm_ubuntu-22.04-amd64.charm', resources={'demo-server-image=ghcr.io/beliaev-maksim/api_demo_server':'0.0.9'}) + +# deploy a local bundle +await m.deploy('./mediawiki-model-bundle.yaml') + +# deploy a bundle with an overlay +await m.deploy('mediawiki', overlays=['./custom-mediawiki.yaml']) + +# generic openstack example +await m.deploy('./bundle-focal-yoga.yaml', overlays=['./overlay-focal-yoga-mymaas.yaml', './overlay-focal-yoga-mymaas-shared-filesystem.yaml']) +``` + +> See more: [`Model.deploy()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.deploy) + +[/tab] +[/tabs] + + + +

Update a charm

+ +[tabs] +[tab version="juju"] + +Updating a charm to the latest revision always involves the `refresh` command, but the exact way to use it differs a little bit depending on whether you are dealing with a Charmhub charm or rather a local charm. + + + + +- [Update a Charmhub charm](#heading--update-a-charmhub-charm) +- [Update a local charm](#heading--update-a-local-charm) + + + +

Update a Charmhub charm

+ +[note type=information] +Because of the way charm channels work, 'updating' doesn't have to mean 'upgrading' -- you can switch to any charm revision, no matter if it's newer or older. The instructions below reflect this. + +However, as newer versions typically contain improvements, Juju will notify you if a new version exists: Juju polls Charmhub once a day to check for updates and, if an update is found, the poll will cause `juju status` to indicate that a newer charm version is available. +[/note] + + +1. **If you don't know your current channel:** Run `status` and check the App > Channel column. + +1. **If you don't know which channel you want to update to / would like to find out all the available channels:** Run `info` followed by the charm name. + +1. Run `refresh` followed by the charm name and the desired new `channel`. + +---- + +[details=Expand to view an example featuring the machine charm for PostgreSQL] +```text +# Find out the current channel (see App > Channel): +$ juju status +Model Controller Cloud/Region Version SLA Timestamp +welcome-lxd lxd localhost/localhost 3.1.6 unsupported 14:58:37+01:00 + +App Version Status Scale Charm Channel Rev Exposed Message +postgresql waiting 0/1 postgresql 14/stable 351 no agent initialising + +Unit Workload Agent Machine Public address Ports Message +postgresql/0* waiting allocating 2 10.122.219.3 agent initialising + +Machine State Address Inst id Base AZ Message +2 started 10.122.219.3 juju-f25b73-2 ubuntu@22.04 Running + +# Find out all the available channels: +$ juju info postgresql +name: postgresql +publisher: Canonical Data Platform +summary: Charmed PostgreSQL VM operator +description: | + Charm to operate the PostgreSQL database on machines. +store-url: https://charmhub.io/postgresql +charm-id: ChgcZB3RhaDOnhkAv9cgRg52LhjBbDt8 +supports: ubuntu@22.04 +tags: databases +subordinate: false +relations: + provides: + cos-agent: cos_agent + database: postgresql_client + db: pgsql + db-admin: pgsql + requires: + certificates: tls-certificates + s3-parameters: s3 +channels: | + 14/stable: 351 2024-01-03 (351) 29MB amd64 ubuntu@22.04 + 14/candidate: 363 2024-01-31 (363) 33MB amd64 ubuntu@22.04 + 14/beta: 363 2024-01-31 (363) 33MB amd64 ubuntu@22.04 + 14/edge: 365 2024-02-02 (365) 33MB amd64 ubuntu@22.04 + latest/stable: initial-reactive-278-ge3f064a 2023-11-09 (345) 7MB amd64 ubuntu@16.04, ubuntu@18.04, ubuntu@20.04, ubuntu@22.04 + latest/candidate: ↑ + latest/beta: ↑ + latest/edge: ↑ + +# Update the charm to revision `365` by switching to the `14/edge` channel: +$ juju refresh postgresql --channel 14/edge +Added charm-hub charm "postgresql", revision 365 in channel 14/edge, to the model +no change to endpoints in space "alpha": certificates, cos-agent, database, database-peers, db, db-admin, restart, s3-parameters, upgrade + +# Verify that the charm has been updated (see App > Channel): + +$ juju status +Model Controller Cloud/Region Version SLA Timestamp +welcome-lxd lxd localhost/localhost 3.1.6 unsupported 15:05:16+01:00 + +App Version Status Scale Charm Channel Rev Exposed Message +postgresql 14.9 active 1 postgresql 14/edge 365 no + +Unit Workload Agent Machine Public address Ports Message +postgresql/0* active executing 2 10.122.219.3 5432/tcp (config-changed) + +Machine State Address Inst id Base AZ Message +2 started 10.122.219.3 juju-f25b73-2 ubuntu@22.04 Running + +``` + +[/details] + +---- + +> See more: [`juju status`](/t/10173), [`juju info`](/t/10103), [`juju refresh`](/t/10189) + + +

Update a local charm

+ +To upgrade a local charm, run the `refresh` command followed by the name of the charm and the local path to the charm: + + +```shell +juju refresh juju-test --path ./path/to/juju-test +``` + +The command offers many other options, for example, the possibility to replace a charm completely with another charm by using the `--switch` option followed by a different path (a process known as 'crossgrading'). (Note: `--path` and `--switch` are mutually exclusive. Use `--switch` if you want to replace your existing charm with a completely new charm.) + +> See more: [`juju refresh`](/t/10189) + +[/tab] + +[tab version="terraform juju"] + +To update a charm, in the application's resource definition, in the charm attribute, use a sub-attribute specifying a different revision or channel. For example: + +```terraform +resource "juju_application" "this" { + model = juju_model.development.name + + charm { + name = "hello-kubecon" + revision = 19 + } + +} +``` + +> See more: [`juju_application` > `charm` > nested schema ](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/application#nested-schema-for-charm) + +[/tab] + +[tab version="python libjuju"] + +To update a charm on python-libjuju, you can use the `upgrade_charm` (aliased as `refresh`) method on the `Application` object: + +```python +# upgrade to latest revision on the channel +await my_app.upgrade_charm() + +# upgrade to the latest revision on a given channel +await my_app.upgrade_charm(channel='latest/edge') + +# upgrade to a particular revision +await my_app.upgrade_charm(revision=3) + +# upgrade with a local charm +await my_app.upgrade_charm(path='./path/to/juju-test') + +# replace a charm completely with another charm +await my_app.upgrade_charm(switch='./path/to/juju-test') + +# Note that the path and switch parameters are mutually exclusive. +``` + +> See more: [`Application.upgrade_charm()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.application.html#juju.application.Application.upgrade_charm) + +[/tab] +[/tabs] + +

Remove a charm / bundle

+ +As a charm / bundle is just the *means* by which (an) application(s) are deployed, there is no way to remove the *charm* / *bundle*. What you *can* do, however, is remove the *application* / *model*. + +> See more: [How to remove an application](/t/5476#heading--remove-an-application), [How to destroy a model](/t/1155#heading--destroy-a-model) + + + + + +
+ +> **Contributors:** @hmlanigan, @tmihoc, @aflynn + +------------------------- + diff --git a/tmp/t/11413.md b/tmp/t/11413.md new file mode 100644 index 000000000..1e0639fde --- /dev/null +++ b/tmp/t/11413.md @@ -0,0 +1,55 @@ +system | 2024-09-16 15:56:08 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Update an existing secret. + +## Usage +```juju update-secret [options] | [key[#base64|#file]=value...]``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `--auto-prune` | nil | used to allow Juju to automatically remove revisions which are no longer being tracked by any observers | +| `--file` | | a YAML file containing secret key values | +| `--info` | | the secret description | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--name` | | the new secret name | + +## Examples + + juju update-secret secret:9m4e2mr0ui3e8a215n4g token=34ae35facd4 + juju update-secret secret:9m4e2mr0ui3e8a215n4g key#base64 AA== + juju update-secret secret:9m4e2mr0ui3e8a215n4g token=34ae35facd4 --auto-prune + juju update-secret secret:9m4e2mr0ui3e8a215n4g --name db-password \ + --info "my database password" \ + data#base64 s3cret== + juju update-secret db-pass --name db-password \ + --info "my database password" + juju update-secret secret:9m4e2mr0ui3e8a215n4g --name db-password \ + --info "my database password" \ + --file=/path/to/file + + +## Details + +Update a secret with a list of key values, or info. +If a value has the '#base64' suffix, it is already in base64 format and no +encoding will be performed, otherwise the value will be base64 encoded +prior to being stored. +The --auto-prune option is used to allow Juju to automatically remove revisions +which are no longer being tracked by any observers (see Rotation and Expiry). +This is configured per revision. This feature is opt-in because Juju +automatically removing secret content might result in data loss. + + + +--- + +------------------------- + diff --git a/tmp/t/11414.md b/tmp/t/11414.md new file mode 100644 index 000000000..b611e40de --- /dev/null +++ b/tmp/t/11414.md @@ -0,0 +1,36 @@ +system | 2024-09-16 15:55:32 UTC | #1 + +[note type=caution] +The information in this doc is based on Juju version 3.5.5, +and may not accurately reflect other versions of Juju. +[/note] + + +## Summary +Remove a existing secret. + +## Usage +```juju remove-secret [options] |``` + +### Options +| Flag | Default | Usage | +| --- | --- | --- | +| `-m`, `--model` | | Model to operate in. Accepts [<controller name>:]<model name>|<model UUID> | +| `--revision` | 0 | remove the specified revision | + +## Examples + + juju remove-secret my-secret + juju remove-secret secret:9m4e2mr0ui3e8a215n4g + juju remove-secret secret:9m4e2mr0ui3e8a215n4g --revision 4 + + +## Details + +Remove all the revisions of a secret with the specified URI or remove the provided revision only. + + +--- + +------------------------- + diff --git a/tmp/t/1145.md b/tmp/t/1145.md new file mode 100644 index 000000000..c5994409d --- /dev/null +++ b/tmp/t/1145.md @@ -0,0 +1,36 @@ +system | 2022-10-07 11:53:20 UTC | #1 + +> See also: [How to manage plugins](/t/2203) + +A `juju` plugin is an external command that works with `juju` but which is not part of the `juju` core code. + + + +At a more technical level, a `juju` plugin is any executable file in your `$PATH` that begins with `juju-`. Although you can run these independently of the Juju command line (`juju-`), Juju will also wrap these commands so they can be run within Juju (`juju `). + + + +> See more: +> - [List of known Juju plugins](/t/5202) +> - [Plugin flags](/t/7171) + + + +------------------------- + +pedroleaoc | 2022-04-07 08:35:30 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:48 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/1150.md b/tmp/t/1150.md new file mode 100644 index 000000000..557c7c666 --- /dev/null +++ b/tmp/t/1150.md @@ -0,0 +1,820 @@ +system | 2024-08-09 13:45:18 UTC | #1 + +> See also: [Offer](/t/13132) + +This document shows how to manage offers. + + + + +**Contents:** + +- [Create an offer](#heading--create-an-offer) +- [View an offer’s details](#heading--view-an-offers-details) +- [Control access to an offer](#heading--control-access-to-an-offer) +- [Find an offer to use](#heading--find-an-offer-to-use) +- [Integrate with an offer](#heading--integrate-with-an-offer) +- [Allow traffic from an integrated offer](#heading--allow-traffic-from-an-integrated-offer) +- [Inspect integrations with an offer](#heading--inspect-integrations-with-an-offer) +- [Suspend, resume, or remove an integration with an offer](#heading--suspend-resume-or-remove-an-integration-with-an-offer) +- [Remove an offer](#heading--remove-an-offer) + +

Create an offer

+> Who: User with [offer `admin`](/t/6864#heading--offer-admin) access. + +[tabs] +[tab version="juju"] + +An offer stems from an application endpoint. This is how an offer is created: + +`juju offer :` + +By default, an offer is named after its underlying application but you may also choose to give it a different name: + +`juju offer : ` + +Example: +```plain +juju deploy mysql +juju offer mysql:database hosted-mysql +``` + +To view the available application endpoints use `juju show-application` and check the list below `endpoint-bindings`. Example: +```plain +juju show-application mysql +mysql: + charm: mysql + ... + endpoint-bindings: + "": alpha + certificates: alpha + cos-agent: alpha + database: alpha + ... +``` + +To offer both the `certificates` and `database` endpoints: +```plain +juju deploy mysql +juju offer mysql:database,certificates hosted-mysql +``` + +Although an offer may have multiple (offer) endpoints it is always expressed as a single URL: + +`/.` + +If the above mysql offer were made in the `default` model by user `admin`, the URL would be: + +`admin/default.hosted-mysql` + +> See more: [`juju offer`](/t/10080) + +[/tab] +[tab version="terraform juju"] + +To create an offer, in your Terraform plan, create a resource of the `juju_offer` type, specifying the offering model and the name of the application and application endpoint from which the offer is created: + +```terraform + +resource "juju_offer" "percona-cluster" { + model = juju_model.development.name + application_name = juju_application.percona-cluster.name + endpoint = server +} + +``` +> See more: [`juju_offer` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/offer) + +[/tab] +[tab version="python libjuju"] + +To create an offer, use the `create_offer()` method on a connected Model object. + +```python +# Assume a deployed mysql application +await my_model.deploy('mysql') +# Expose the database endpoint of the mysql application +await my_model.create_offer('mysql:database', offer_name='hosted-mysql') +``` + +> See more: [`create_offer()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.create_offer) +[/tab] +[/tabs] + +

View an offer’s details

+> Who: User with [offer `read`+](/t/6864#heading--offer-read) access. + +[tabs] +[tab version="juju"] + +The `show-offer` command gives details about a given offer. + +```plain +juju show-offer +``` + +Example: +```plain +juju show-offer hosted-mysql +Store URL Access Description Endpoint Interface Role +foo admin/default.hosted-mysql admin MySQL is a widely used, open-source certificates tls-certificates requirer + relational database management system database mysql_client provider + (RDBMS). MySQL InnoDB cluster provides a + complete high availability solution for MySQL + via Group Replic... +``` + +For more details, including which users can access the offer, use the `yaml` format. + +Example: +```plain +juju show-offer hosted-mysql --format yaml +serverstack:admin/default.hosted-mysql: + description: | + MySQL is a widely used, open-source relational database management system + (RDBMS). MySQL InnoDB cluster provides a complete high availability solution + for MySQL via Group Replication. + + This charm supports MySQL 8.0 in bare-metal/virtual-machines. + access: admin + endpoints: + certificates: + interface: tls-certificates + role: requirer + database: + interface: mysql_client + role: provider + users: + admin: + display-name: admin + access: admin + everyone@external: + access: read +``` + +A non-admin user with read/consume access can also view an offer's details, but they won't see the information for users with access. + +> See more: [`juju show-offer`](/t/10168) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not currently support this. Please use the `juju` client. +[/tab] + +[/tabs] + + +

Control access to an offer

+> Who: User with [offer `admin`](/t/6864#heading--offer-admin) access. + +[tabs] + +[tab version="juju"] + +Offers can have one of three access levels: + +- read (a user can see the offer when searching) +- consume (a user can relate an application to the offer) +- admin (a user can manage the offer) + +These are applied similarly to how standard model access is applied, via the `juju grant` and `juju revoke` commands: + +```plain +juju grant +``` + +```plain +juju revoke +``` + +Revoking a user's consume access will result in all relations for that user to that offer to be suspended. If the consume access is granted anew, each relation will need to be individually resumed. Suspending and resuming relations are explained in more detail later. + +To grant bob consume access to an offer: + +`juju grant bob consume admin/default.hosted-mysql` + +To revoke bob's consume access (he will be left with read access): + +`juju revoke bob consume admin/default.hosted-mysql` + +To revoke all of bob's access: + +`juju revoke bob read admin/default.hosted-mysql` + +> See more: [`juju grant`](/t/10196), [`juju revoke`](/t/10077) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The access levels for offers can be applied in the same way the model or controller access for a given user. Use the `grant()` and `revoke()` methods on a User object to grant or revoke access to an offer. + +```python +# Grant Bob consume access to an offer +await user_bob.grant('consume', offer_name='admin/default.hosted-mysql') + +# Revoke Bob's consume access (he will be left with read access) +await user_bob.revoke('consume', offer_name='admin/default.hosted-mysql') +``` + + +> See more: [`User (object)`](), [`grant()`](), [`revoke()`]() +[/tab] + +[/tabs] + +

Find an offer to use

+> Who: User with [offer `read`+](/t/6864#heading--offer-read) access + +[tabs] + +[tab version="juju"] + +Offers can be searched based on various criteria: + +* URL (or part thereof) +* offer name +* model name +* interface + +The results will show information about the offer, including the level of access the user making the query has on each offer. + +To find all offers on a specified controller: +```plain +$ juju find-offers foo: +Store URL Access Interfaces +foo admin/default.hosted-mysql admin mysql:database +foo admin/default.postgresql admin pgsql:db +``` +As with the `show-offer` command, the `yaml` output will show extra information, including users who can access the offer (if an admin makes the query). +```plain +juju find-offers --offer hosted-mysql --format yaml +foo:admin/default.hosted-mysql: + access: admin + endpoints: + certificates: + interface: tls-certificates + role: requirer + database: + interface: mysql_client + role: provider + users: + admin: + display-name: admin + access: admin + bob: + access: read + everyone@external: + access: read +``` + +To find offers in a specified model: +```plain +juju find-offers admin/another-model +juju find-offers foo:admin/another-model +``` + +To find offers with a specified interface on the current controller: +```plain +juju find-offers --interface mysql_client +juju find-offers --interface tls-certificates +``` + +To find offers with a specified interface on a specific controller: +```plain +juju find-offers --interface mysql_client foo: +``` + +To find offers with "sql" in the name: +```plain +$ juju find-offers --offer sql foo: +``` + +> See more: [`juju find-offers`](/t/10097) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] + +[/tabs] + +

Integrate with an offer

+> Who: User with [offer `consume`+](/t/6864#heading--offer-consume) access. + +[tabs] + +[tab version="juju"] + +[note type=information] + +Before Juju `3.0`, `juju integrate` was `juju relate`. + +[/note] + +If a user has consume access to an offer, they can deploy an application in their model and establish an integration with the offer by way of its URL. + +```plain +juju integrate [:] [:] +``` + +Specifying the endpoint for the application and the offer is analogous to normal integrations. They can be added but are often unnecessary: + +```plain +juju integrate +``` + +When you integrate with an offer, a proxy application is made in the consuming model, named after the offer. + +An offer can be consumed without integration. This workflow sets up the proxy application in the consuming model and creates a user-defined alias for the offer. This latter is what's used to subsequently relate to. Having an offer alias can avoid a namespace conflict with a pre-existing application. + +```plain +juju consume +juju integrate +``` + +Offers which have been consumed show up in `juju status` in the SAAS section. The integrations (relations) block in status shows any relevant status information about the integrations to the offer in the Message field. This includes any error information due to rejected ingress, or if the relation is suspended etc. + +To remove a consumed offer: + +```plain +juju remove-saas +``` +> See more: [`juju integrate`](/t/10207), [`juju consume`](/t/10213), [`juju remove-saas`](/t/10087) + +[/tab] + +[tab version="terraform juju"] +To integrate with an offer, in your Terraform plan create a `juju_integration` resource as usual by specifying two application blocks and a `lifecycle > replace_triggered_by` block, but for the application representing the offer specify the `offer_url`, and in the `lifecycle` block list triggers only for the regular application (not the offer). For example: + +```terraform +resource "juju_integration" "wordpress-db" { + model = juju_model.development-destination.name + + application { + name = juju_application.wordpress.name + endpoint = "db" + } + + application { + offer_url = juju_offer.this.url + } + +lifecycle { + replace_triggered_by = [ + juju_application.wordpress.name, + juju_application.wordpress.model, + juju_application.wordpress.constraints, + juju_application.wordpress.placement, + juju_application.wordpress.charm.name, + ] + } + +} + +``` + +> See more: [`juju_integration` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/integration) + +[/tab] + +[tab version="python libjuju"] +To integrate with an offer, use the `Model.integrate()` method on a connected model, with a consumed offer url. + +```python +# Integrate via offer url +await my_model.integrate('mediawiki:db', 'admin/default.hosted-mysql') + +# Integrate via an offer alias created when consumed +await my_model.consume('admin/prod.hosted_mysql', application_alias="mysql-alias") +await my_model.integrate('mediawiki:db', 'mysql-alias') + +# Remove a consumed offer: +await my_model.remove_saas('mysql-alias') +``` +> See more: [`Model.integrate()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.integrate), [`Model.consume()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.consume), [`Model.remove_saas()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.remove_saas) +[/tab] + +[/tabs] + +

Allow traffic from an integrated offer

+> Who: User with [offer `admin`](/t/6864#heading--offer-admin) access. + +[tabs] + +[tab version="juju"] + +When the consuming model is behind a NAT firewall its traffic will typically exit (egress) that firewall with a modified address/network. In this case, the `--via` option can be used with the `juju integrate` command to request the firewall on the offering side to allow this traffic. This option specifies the NATed address (or network) in CIDR notation: + +```plain +juju integrate --via +``` + +Example: +`juju integrate mediawiki:db ian:admin/default.mysql --via 69.32.56.0/8` + +The `--via` value is a comma separated list of subnets in CIDR notation. This includes the /32 case where a single NATed IP address is used for egress. + +It's also possible to set up egress subnets as a model config value so that all cross model integrations use those subnets without needing to use the `--via` option. + +```plain +juju model-config egress-subnets= +``` + +Example: +`juju model-config egress-subnets=69.32.56.0/8` + +To be clear, the above command is applied to the **consuming** model. + +To allow control over what ingress can be applied to the offering model, an administrator can set up allowed ingress subnets by creating a firewall rule. + +```plain +juju set-firewall-rule juju-application-offer --whitelist +``` + +Where 'juju-application-offer' is a well-known string that denotes the firewall rule to apply to any offer in the current model. If a consumer attempts to create a relation with requested ingress outside the bounds of the whitelist subnets, the relation will fail and be marked as in error. + +The above command is applied to the **offering** model. + +Example: +`juju set-firewall-rule juju-application-offer --whitelist 103.37.0.0/16` + +[note type="caution"] +The `juju set-firewall-rule` command only affects subsequently created relations, not existing ones. Only new relations will be rejected if the changed firewall rules preclude the requested ingress. +[/note] + +To see what firewall rules have currently been defined, use the list firewall-rules command. + +Example: +```plain +juju firewall-rules +Service Whitelist subnets +juju-application-offer 103.37.0.0/16 +``` + +[note type="caution"] +Beyond a certain number of firewall rules, which have been dynamically created to allow access from individual integrations, Juju will revert to using the whitelist subnets as the access rules. The number of rules at which this cutover applies is cloud specific. +[/note] + + +> See more: [`juju set-firewall-rule`](/t/10151), [`juju firewall-rules`](/t/10061) + +[/tab] + +[tab version="terraform juju"] +To allow traffic from an integrated offer, in your Terraform plan, in the resource definition where you define the integration with an offer, use the `via` attribute to specify the list of CIDRs for outbound traffic. For example: + + + +```terraform +resource "juju_integration" "this" { +... + via = "10.0.0.0/24,10.0.1.0/24" + +# the rest of your integration definition + +} + +``` + +> See more: [`juju_integration` > `via`](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/integration#via) + +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support this. Please use the `juju` client. +[/tab] + +[/tabs] + +

Inspect integrations with an offer

+> Who: User with [offer `admin`](/t/6864#heading--offer-admin) access. + +> See also: [`offers`](/t/command-offers/1773) + +[tabs] + +[tab version="juju"] + +The `offers` command is used to see all connections to one more offers. + +```plain +juju offers [--format (tabular|summary|yaml|json)] [] +``` + +If `offer name` is not provided, all offers are included in the result. + +The default `tabular` output shows each user connected (relating to) the offer, the +relation id of the relation, and ingress subnets in use with that connection. The `summary` output shows one row per offer, with a count of active/total relations. Use the `yaml` output to see extra detail such as the UUID of the consuming model. + +The output can be filtered by: + - interface: the interface name of the endpoint + - application: the name of the offered application + - connected user: the name of a user who has an integration to the offer + - allowed consumer: the name of a user allowed to consume the offer + - active only: only show offers which are in use (are related to) + +See `juju help offers` for more detail. + +Example: +```plain +juju offers mysql +Offer User Relation id Status Endpoint Interface Role Ingress subnets +mysql admin 2 joined db mysql provider 69.193.151.51/32 + +juju offers --format summary +Offer Application Charm Connected Store URL Endpoint Interface Role +hosted_mysql mysql ch:mysql-57 1/1 myctrl admin/prod.hosted_mysql db mysql provider + +``` + +All offers for a given application: +`juju offers --application mysql` + +All offers for a given interface: +`juju offers --interface mysql` + +All offers for a given user who has related to the offer: +`juju offers --connected-user fred` + +All offers for a given user who can consume the offer: +`juju offers --format summary --allowed-consumer mary` + +The above command is best run with `--format` summary as the intent is to see, for a given user, what offers they might relate to, regardless of whether there are existing integrations (which is what the tabular view shows). + +> See more: [`juju offers`](/t/10051) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +To see all connections to one or more offers, use the `list_offers()` method on a connected Model object. + +```python +await my_model.list_offers() +``` + +> See more: [`list_offers()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.list_offers) +[/tab] + +[/tabs] + +

Suspend, resume, or remove an integration with an offer

+> Who: User with [offer `admin`](/t/6864#heading--offer-admin) access. + +[tabs] + +[tab version=juju] + +Before you can suspend, resume, or remove an integration (relation), you need to know the integration (relation) ID. (That is because, once you've made an offer, there could potentially be many instances of the same application integrating with that offer, so the only way to identify uniquely is via the relation ID.) + +Given two related apps (app1: endpoint, app2), the integration (relation) ID can be found as follows: + + + + +```plain +juju exec --unit $UNIT_FOR_APP1 -- relation-ids endpoint +``` + +The output, `:`, gives you the relation id. + +Once you have the integration (relation) id: + +To suspend an integration (relation), do: + + +```plain +juju suspend-relation +``` + +[note type="information"] + +Suspended integrations (relations) will run the relation departed / broken hooks on either end, and any firewall ingress will be closed. + +[/note] + + +And, to resume an integration (relation), do: + +```plain +juju resume-relation +``` + +Finally, to remove an integration (relation) entirely: + +```plain +juju remove-relation +``` + +[note type="information"] +Removing an integration on the offering side will trigger a removal on the consuming side. An integration can also be removed from the consuming side, as well as the application proxy, resulting in all integrations being removed. +[/note] + +[note type="positive"] +In all cases, more than one integration id can be specified, separated by spaces. +[/note] + +Examples: +```plain +juju suspend-relation 2 --message "reason for suspension" +juju suspend-relation 3 4 5 --message "reason for suspension" +juju resume-relation 2 +``` + +> See more: [`juju suspend-relation`](/t/10179), [`juju resume-relation`](/t/10123), [`juju remove-relation`](/t/10110) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not support suspending, and resuming integrations. However, to remove an integration, you can use the `remove_relation()` method on an Application object. + +```python +await my_controller.remove_integration('mediawiki', 'mysql:db') +``` + +> See more: [`remove_relation()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.application.html#juju.application.Application.remove_relation) +[/tab] + +[/tabs] + +

Remove an offer

+> Who: User with [offer `admin`](/t/6864#heading--offer-admin) access. + +[tabs] + +[tab version="juju"] + + +An offer can be removed providing it hasn't been used in any integration. To override this behaviour the `--force` option is required, in which case the integration is also removed. This is how an offer is removed: + +`juju remove-offer [--force] ` + +Note that, if the offer resides in the current model, then the shorter offer name can be used instead of the longer URL. + +Similarly, if an application is being offered, it cannot be deleted until all its offers are removed. + + +> See more: [`juju remove-offer`](/t/10235) + +[/tab] + +[tab version="terraform juju"] + +To remove an offer, in your Terraform plan, remove its resource definition. + +> See more: [`juju_offer`](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/offer) + +[/tab] + +[tab version="python libjuju"] +To remove an offer, use the `remove_offer()` method on a connected Model. If the offer is used in an integration, then the `force=True` parameter is required to remove the offer, in which case the integration is also removed. + +```python +await my_model.remove_offer('admin/mymodel.ubuntu', force=True) +``` + +> See more: [`remove_offer()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.remove_offer) + +[/tab] + +[/tabs] + + + +--------------- +**Further reading** + +For more on cross-model relations, see the following scenarios: + +- [Scenario #1](/t/cmr-scenario-1/1148) + A MediaWiki deployment, based within the **same** controller, used by the **admin** user, but consumed by **multiple** models. +- [Scenario #2](/t/cmr-scenario-2/1149) + A MediaWiki deployment, based within **multiple** controllers, used by a **non-admin** user, and consumed by a **single** model. + +------- + +
+ +> **Contributors:** @anvial, @cderici, @hmlanigan, @manadart, @simonrichardson, @tmihoc + +------------------------- + +axino | 2020-04-29 13:18:49 UTC | #2 + +Why is this documentation page not listed on the left column on https://juju.is/docs/ ? @timClicks + +------------------------- + +timClicks | 2020-04-29 15:22:10 UTC | #3 + +Good question, but I don't have a good answer. I've added it now in the Deployment section. + +------------------------- + +bcarbone | 2021-05-05 12:52:57 UTC | #4 + +seems the content of the page refers to [ Advanced application deployment](https://juju.is/docs/olm/advanced-application-deployment) and no longer contains documentation about cross model relations. + +------------------------- + +pedroleaoc | 2021-05-07 09:44:11 UTC | #5 + +Fixed now. Thanks for flagging this. + +------------------------- + +pedroleaoc | 2021-06-08 18:06:24 UTC | #6 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:58 UTC | #7 + + + +------------------------- + +emcp | 2023-03-08 16:05:55 UTC | #8 + +I just started playing with a bind9 operator exposed in a cross-model relational fashion.. the exposing works great but when I went to consume or use the service in another model I got to the `juju integrate` and its saying this in the output + +``` +$ juju integrate bind9:dns-entry admin/other_model +ERROR juju: "integrate" is not a juju command. See "juju --help". + +Did you mean: + migrate +``` + +I am on juju `2.9.42` .. is this a `v3+` only feature? + +EDIT: it appears I just use `juju relate ` instead .. should these docs be updated or .. is this new nomenclature coming later? + +------------------------- + +erik-lonroth | 2023-03-09 12:13:33 UTC | #9 + +I think integrate is a 3.0 thing.... @tmihoc ? + +------------------------- + +tmihoc | 2023-03-09 12:54:21 UTC | #10 + +That's right. I should clarify this better in the docs. I'll do it right away. Update: Done. + +------------------------- + diff --git a/tmp/t/1155.md b/tmp/t/1155.md new file mode 100644 index 000000000..fbc170a2e --- /dev/null +++ b/tmp/t/1155.md @@ -0,0 +1,1271 @@ +system | 2024-07-17 10:12:40 UTC | #1 + +> See also: [Model](/t/5456) + +**Contents:** + +- [Add a model](#heading--add-a-model) +- [View all the models available on a controller](#heading--view-all-the-models-available-on-a-controller) +- [Switch to a different model](#heading--switch-to-a-different-model) +- [View the status of a model](#heading--view-the-status-of-a-model) +- [View details about a model](#heading--view-details-about-a-model) +- [Configure a model](#heading--configure-a-model) +- [Manage constraints for a model](#heading--manage-constraints-for-a-model) +- [Restrict commands on a model](#heading--restrict-commands-on-a-model) +- [Compare and export the contents of a model to a bundle](#heading--compare-and-export-the-contents-of-a-model-to-a-bundle) +- [Upgrade a model](#heading--upgrade-a-model) +- [Migrate a workload model to another controller](#heading--migrate-a-workload-model-to-another-controller) +- [Destroy a model](#heading--destroy-a-model) + + +

Add a model

+ +[note type=caution] +**If you have multiple credentials:** Be careful which one you use for the new model. Any machines subsequently on the model will be associated with this credential. As such, make sure you're not spending resources for the wrong cloud account! +[/note] + + +[tabs] +[tab version="juju"] + +To add a model to the current controller using the default credential and switch to this model, run the `add-model` command followed by the name of the model. For example: + +```text +juju add-model mymodel +``` + +You can also pass various options to choose a different controller or credential, specify a configuration, designate a different model `owner`, *not* switch to the newly create model, add it to a particular cloud (for multi-cloud controllers), etc. + +> See more: [`juju add-model`](/t/10145) + +[/tab] + +[tab version="terraform juju"] +To add a model on the controller specified in the `juju` provider definition, in your Terraform plan create a resource of the `juju_model` type, specifying, at the very least, a name. For example: + +```text +resource "juju_model" "testmodel" { + name = "machinetest" +} + +``` + +In the case of a multi-cloud controller, you can specify which cloud you want the model to be associated with by defining a `cloud` block. To specify a model configuration, include a `config` block. + + +> See more: [`juju_model` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/model) +[/tab] + +[tab version="python libjuju"] + +To add a model, on a connected controller, call the `add_model` function. For example, below we're adding a model called `test-model` on the `controller`: + +```python +await controller.add_model("test-model") +``` + +> See more: [`Controller.add_model()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.add_model), [`juju_model` (module)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html), [`juju_controller` (module)](https://pythonlibjuju.readthedocs.io/en/latest/narrative/controller.html) + + +[/tab] +[/tabs] + +

View all the models available on a controller

+ +[tabs] +[tab version="juju"] + +To get a list of all the models in the current controller, use the `models` command: + +```text +juju models +``` + +The current model will be denoted with an asterisk. + +[details=Expand to see a sample output] + +```text +Controller: localhost-localhost + +Model Cloud/Region Type Status Machines Units Access Last connection +controller localhost/localhost lxd available 1 1 admin 1 minute ago +prod* localhost/localhost lxd available 0 - admin never connected +test localhost/localhost lxd available 0 - admin 2 minutes ago +``` +[/details] + +By passing various options you can filter by controller, get a time stamp, output to a specific format, etc. + +> See more: [`juju models`](/t/10090) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] + +To view all the models available on a controller, call the `Controller.list_models()` function: + +```python +await controller.list_models() +``` +> See more: [`Controller.list_models()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.list_models), + + +[/tab] +[/tabs] + + +

Switch to a different model

+ +[tabs] +[tab version="juju"] + +**Identify the current model.** To identify the current model, run the `switch` command with no arguments: + +```text +juju switch +``` + +This will show the current controller, user, and model in a `:/` format. + +[details=Expand to see a sample output] +```text +localhost-localhost:admin/test +``` +[/details] + +[note type=information] +You can also identify the current model by running `juju models` -- your current model is the model with an asterisk! +[/note] + + +**Switch to a different model.** To change from the current model to a different model, use the `switch` command followed by the target model name in a `:/` format: + +```text +juju switch :/ +``` + +The command also allows you to specify the target controller in an abbreviated form by omitting one or more of the components. + + + +> See more: [`juju switch`](/t/10102) + +[note type=caution] +For important operations we recommend you specify the model in the unambiguous form shown above. +[/note] + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] + +In `python-libjuju`, switching to a different model means simply connecting to the model you want to work with, which is done by calling `connect` on the [Model](https://pythonlibjuju.readthedocs.io/en/latest/narrative/model.html) object: + +```python +from juju.model import Model + +model = Model() +await model.connect() # will connect to the "current" model + +await model.connect(model_name="test-model") # will connect to the model named "test-model" +``` + +Note that if the `model` object is already connected to a model, then that connection will be closed before making the new connection. + +> See more: [`Model.connect()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model.connect) + + +[/tab] +[/tabs] + +

View the status of a model

+ +[tabs] +[tab version="juju"] + +To see the status of a model and everything inside of it, run the `status` command: + +```text +juju status +``` + +[details=Expand to see a sample output] +```text +Model Controller Cloud/Region Version SLA Timestamp +test localhost-localhost localhost/localhost 3.1.0 unsupported 16:07:52+01:00 + +Model "admin/test" is empty. +``` +[/details] + + +By passing various options you can also specify a model, see the output in color formatting or with additional sections for relations or storage, watch the status for a given duration, etc. + +> See more: [`juju status`](/t/10173) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + +

View details about a model

+ +[tabs] +[tab version="juju"] + +To view detailed information about a specific model, use the `show-model` command followed by the model name. For example: + +```text +juju show-model test +```` + +[details=Expand to see a sample output for an empty model called 'test'] + +```text +test: + name: admin/test + short-name: test + model-uuid: 3850c8cc-0cd0-4d53-8a6d-591b63024141 + model-type: iaas + controller-uuid: f06afa86-3461-42bb-86ed-6c2f5d7b0ac7 + controller-name: localhost-localhost + is-controller: false + owner: admin + cloud: localhost + region: localhost + type: lxd + life: alive + status: + current: available + since: 5 hours ago + users: + admin: + display-name: admin + access: admin + last-connection: 2 minutes ago + sla: unsupported + agent-version: 3.1.0 + credential: + name: localhost + owner: admin + cloud: localhost + validity-check: valid + supported-features: + - name: juju + description: the version of Juju used by the model + version: 3.1.0 +``` +[/details] + +By passing options you can also specify a format, an output file, etc. + +> See more: [`juju show-model`](/t/10191) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + +

Configure a model

+> See also: [Model configuration](/t/6659#heading--model-configuration), [List of model configuration keys](/t/7068) +> +> See related: [How to configure a controller](/t/1111#heading--configure-a-controller) + +[tabs] +[tab version="juju"] + +The procedure for how to configure a model differs slightly depending on whether you are interested in the configuration of a specific model or rather of all the models on a controller. + +- [A specific model](#heading--a-specific-model) +- [All the models on a controller](#heading--all-the-models-on-a-controller) + +

A specific model

+ + +**Set values.** You can set the configuration for a model both while you are creating the model and later. + +- To set it for the `controller` model during control creation, use the `bootstrap` command with the `--config` option followed by the desired configuration, for example: + +``` text +juju bootstrap --config image-stream=daily localhost lxd-daily +``` + + +- To set it for any other (workload) model while creating it, use the `add-model` command with the `--config` flag followed by the desired configuration: + +```text +juju add-model mymodel --config image-stream=daily +``` + +- To set it for any model -- whether `controller` or otherwise -- after the model has already been created, use the `model-config` command followed by the desired configuration. For example, below we set the default space binding for all the applications on the model to 'myspace': + +``` text +juju model-config default-space=myspace +``` + +[note type=caution] +Juju does not currently check that the provided key is a valid setting, so make sure you spell it correctly. +[/note] + +In all cases, the configuration can be passed in the form of a space-separated list of key-value pairs or in the form of a YAML configuration file, and you can also use it to overwrite (e.g., with a null value) or to reset existing values, among other things. + + +[note type=information] +If you're trying to pass multiple configurations using the `--config` flag, make sure to repeat the flag for every configuration. +[/note] + +> See more: [`juju bootstrap --config ...`](/t/10132), [`juju add-model ... --config`](/t/10145), [`juju model-config`](/t/10096) + +**Get values.** You can get the configuration of a model at any time by running the `model-config` command without any argument, as below: + +``` text +juju model-config +``` + +By using various flags of this command you can also target a specific model or key, choose a different output format, etc. + +> See more: [`juju model-config`](/t/10096) + +

All the models on a controller

+ +**Set values.** You can set the default configuration values for all the models on a controller either during controller creation or after. + +- To set model configuration defaults during controller creation, use the `bootstrap` command with the `--model-defaults` flag followed by the desired configuration(s), for example, as below. This will affect the `controller` model and any subsequent (workload) model during controller creation. + +[note type=information] +For the `controller` model you can override `--model-defaults` through `--config`. See more: [How to configure a controller](/t/1111#heading--configure-a-controller). +[/note] + +```text +juju bootstrap microk8s uk8s \ + --model-defaults logging-config="=WARNING; unit=DEBUG" \ + --model-defaults update-status-hook-interval="60m" +``` + +By passing various flags you can also target a specific cloud or cloud region, pass the configuration(s) in the form of a yaml file, reset keys, etc. + +> See more: [`juju bootstrap --model-defaults ...`](/t/10132) + +- To set model configuration defaults *after* controller creation, use the `model-defaults` command followed by the desired configuration. This willl affect any models created from that point onwards. + +```text +juju model-defaults ftp-proxy=10.0.0.1:8000 +``` + +[note type=information] +These defaults can be overridden, on a per-model basis, during the invocation of the `add-model` command (option `--config`) as well as by resetting specific options to their original defaults through the use of the `model-config` command (option `--reset`). +[/note] + +> See more: [`juju model-defaults`](/t/10057) + + +**Get values.** At any point, you can get the default configuration values for all the models on a controller by running the `model-defaults` command, as below: + +```text +juju model-defaults +``` + +Just as before, by using various flags you can filter by a specific cloud or cloud region, or see the value for a specific key, etc. + +> See more: [`juju model-defaults`](/t/10057) + +[/tab] + +[tab version="terraform juju"] + +With the `terraform juju` client you can only set configuration values, only for a specific model, and only a workload model; for anything else, please use the `juju` client. + +To configure a specific workload model, in your Terraform plan, in the model's resource definition, specify a `config` block, listing all the key=value pairs you want to set. For example: + +```text +resource "juju_model" "this" { + name = "development" + + cloud { + name = "aws" + region = "eu-west-1" + } + + config = { + logging-config = "=INFO" + development = true + no-proxy = "jujucharms.com" + update-status-hook-interval = "5m" + } +} +``` + +> See more: [`juju_model` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/model) + +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + +

Manage constraints for a model

+> See also: [Constraint](/t/6184) + +[tabs] +[tab version="juju"] + +**Set values.** You can set constraints for the `controller` model during controller creation or to regular models at any other point. + + [note type=caution] +**To set constraints for just the `controller` application in the `controller` model *only*:** Use the `bootstrap` command with the `--bootstrap-constraints` flag. See more: [How to manage constraints for a controller](/t/1111#heading--manage-constraints-for-a-controller). +[/note] + + +- To apply a constraint to the entire `controller` model during controller creation, run the `bootstrap` command with the `--constraints` option. Below we use it to ensure that every machine has 4GiB memory. + +```text +juju bootstrap --constraints mem=4G aws +``` + +> See more: [`juju bootstrap --constraints`](/t/10132) + + +- To set constraints for a regular model, run the `set-model-constraints` command followed by the desired key-value pair, as in the example below. This will affect all new resources provisioned for the model. + + +``` text +juju set-model-constraints mem=4G +``` + +[note type=positive] +**Pro tip:** To reset a constraint key to its default value, run the command with the value part empty (e.g., `juju set-model-constraints mem= `). +[/note] + +> See more: [`juju set-model-constraints`](/t/1813) + +**Get values.** To get constraint values for the current model, run the `model-constraints` command, as below: + +``` text +juju model-constraints +``` + +By using various flags, you can specify a model (e.g., `-m controller`, to view constraints for the controller model), an output file, etc. + +> See more: [`juju model-constraints`](/t/10137) + +[/tab] + +[tab version="terraform juju"] +With the `terraform juju` provider you can only set constraints -- to view them, please use the `juju` client. + +To set constraints for a model, in your Terraform, in the model's resource definition, specify the `constraints` attribute (value is a quotes-enclosed space-separated list of key=value pairs). For example: + +```text +resource "juju_model" "this" { + name = "development" + + cloud { + name = "aws" + region = "eu-west-1" + } + + constraints = "cores=4 mem=16G" +} +``` + +> See more: [`juju_model` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/model) + +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + +

Restrict commands on a model

+ +[tabs] +[tab version="juju"] + +**Disable commands.** To disable commands for the current model, run the `disable-command` followed by the name of the command group that you want to restrict and, optionally, a message. For example, the code below disables the ability to destroy the model and its controller: + +```text +juju disable-command destroy-model ""Check with SA before destruction."" +``` + + + +> See more: [`juju disable-command`](/t/10205) + +**View a list of the disabled commands.** To see which command groups have been disabled for a model, run the `disabled-commands` command: + +```text + juju disabled-commands +``` + +> See more: [`juju disabled-commands`](/t/10220) + + +**Enable commands.** To lift command restrictions, run `enable-command` followed by the command group that you want to enable. For example, the code below re-allows people to destroy the model and its controller. + +```text +juju enable-command destroy-model +``` + +> See more: [`juju enable-command`](/t/10111) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + +

Compare and export the contents of a model to a bundle

+ +[tabs] +[tab version="juju"] + +**Compare.** To compare the contents of the current model with a bundle and report any differences, run the `diff-bundle` command: + +```text +juju diff-bundle +``` + +------ +[details=Expand to see an example] + + + +Consider, for example, a model for which the `status` command yields the output below: + +```text +Model Controller Cloud/Region Version SLA Timestamp +docs lxd localhost/localhost 2.5.0 unsupported 05:22:22Z + +App Version Status Scale Charm Store Rev OS Notes +haproxy unknown 1 haproxy jujucharms 46 ubuntu +mariadb 10.1.37 active 1 mariadb jujucharms 7 ubuntu +mediawiki 1.19.14 active 1 mediawiki jujucharms 19 ubuntu + +Unit Workload Agent Machine Public address Ports Message +haproxy/0* unknown idle 2 10.86.33.28 80/tcp +mariadb/0* active idle 1 10.86.33.192 ready +mediawiki/0* active idle 0 10.86.33.19 80/tcp Ready + +Machine State DNS Inst id Series AZ Message +0 started 10.86.33.19 juju-dbf96b-0 trusty Running +1 started 10.86.33.192 juju-dbf96b-1 trusty Running +2 started 10.86.33.28 juju-dbf96b-2 bionic Running + +Relation provider Requirer Interface Type Message +haproxy:peer haproxy:peer haproxy-peer peer +mariadb:cluster mariadb:cluster mysql-ha peer +mariadb:db mediawiki:db mysql regular +mediawiki:website haproxy:reverseproxy http regular +``` + +Now say we have a bundle file `bundle.yaml` with these contents: + +```text +applications: + mediawiki: + charm: "mediawiki" + num_units: 1 + options: + name: Central library + mysql: + charm: "mysql" + num_units: 1 + options: + "binlog-format": MIXED + "block-size": 5 + "dataset-size": "512M" + flavor: distro + "ha-bindiface": eth0 + "ha-mcastport": 5411 + "max-connections": -1 + "preferred-storage-engine": InnoDB + "query-cache-size": -1 + "query-cache-type": "OFF" + "rbd-name": mysql1 + "tuning-level": safest + vip_cidr: 24 + vip_iface: eth0 +relations: + - - "mediawiki:db" + - "mysql:db" +``` + +Comparison of the currently active model with the bundle can be achieved in this way: + +```text +juju diff-bundle bundle.yaml +``` + +This produces an output of: + +```text +applications: + haproxy: + missing: bundle + mariadb: + missing: bundle + mediawiki: + charm: + bundle: mediawiki-5 + model: mediawiki-19 + series: + bundle: "" + model: trusty + options: + name: + bundle: Central library + model: null + mysql: + missing: model +machines: + "0": + missing: bundle + "1": + missing: bundle + "2": + missing: bundle +relations: + bundle-additions: + - - mediawiki:db + - mysql:db + model-additions: + - - haproxy:reverseproxy + - mediawiki:website + - - mariadb:db + - mediawiki:db +``` + +This informs us of the differences in terms of applications, machines, and relations. For instance, compared to the model, the bundle is missing applications `haproxy` and `mariadb`, whereas the model is missing `mysql`. Both model and bundle utilise the 'mediawiki' application but they differ in terms of configuration. There are also differences being reported in the `machines` and `relations` sections. + +Let's now focus on the `machines` section and explore some other features of the `diff-bundle` command. + +We can extend the bundle by including a bundle overlay. Consider an overlay bundle file `changes.yaml` with these machine related contents: + +```text +applications: + mediawiki: + to: 2 + mysql: + to: 3 +machines: + "2": + series: trusty + constraints: arch=amd64 cores=1 + "3": + series: trusty + constraints: arch=amd64 cores=1 +``` + +Here, by means of the `--overlay` option, we can add this extra information to the comparison, effectively inflating the configuration of the bundle: + +```text +juju diff-bundle bundle.yaml --overlay changes.yaml +``` + +This changes the `machines` section of the output to: + +```text +machines: + "0": + missing: bundle + "1": + missing: bundle + "2": + series: + bundle: trusty + model: bionic + "3": + missing: model +``` + +The initial comparison displayed a lack of all three machines in the bundle. By adding machines `2` and `3` in the overlay, the output now shows machines `0` and `1` as missing in the bundle, machine `2` differs in configuration, and machine `3` is missing in the model. + +As with the `deploy` command, there is the ability to map machines in the bundle to those in the model. Below, the addition of `--map-machines=2=0,3=1` makes, for the sake of the comparison, bundle machines `2` and `3` become model machines `0` and `1`, respectively: + +```text +juju diff-bundle bundle.yaml --overlay changes.yaml --map-machines=2=0,3=1 +``` + +The `machines` section now becomes: + +```text +machines: + "2": + missing: bundle +``` + +The bundle shows as only missing machine `2` now, which makes sense. + +The target bundle can also reside on Charmhub. In that case you would simply reference the bundle name, such as `wiki-simple`: + +```text +juju diff-bundle wiki-simple +``` + +[/details] + +------- + +> See more: [`juju diff-bundle`](/t/10142) + + +**Export.** To export the contents of the current model to a bundle file (a file of the form `.yaml`), run the `export-bundle` command with the `--filename` flag followed by the file path. For example: + + +```text +juju export-bundle --filename mybundle.yaml +``` + +The command also has flags that allow you to select a different model, include charm configuration default values in the exported bundle, etc. + +---- +[details=Example] + +Suppose you have a model that looks like this: + +```text +$ juju status +Model Controller Cloud/Region Version SLA Timestamp +welcome-k8s microk8s microk8s/localhost 3.1.6 unsupported 09:09:56+01:00 + +App Version Status Scale Charm Channel Rev Address Exposed Message +example-k8s active 1 example-k8s 1 10.152.183.43 no +microsample-vm active 1 microsample-vm 0 10.152.183.230 no + +Unit Workload Agent Address Ports Message +example-k8s/0* active idle 10.1.64.174 +microsample-vm/0* active idle 10.1.64.169 +``` + +Running `juju export-bundle` will print this: + +```text +$ juju export-bundle +bundle: kubernetes +applications: + example-k8s: + charm: local:example-k8s-1 + scale: 1 + constraints: arch=amd64 + microsample-vm: + charm: local:microsample-vm-0 + scale: 1 + constraints: arch=amd64 +``` + +[/details] + +--- + +> See more: [`juju export-bundle`](/t/10046), [SDK | How to manage bundles](https://juju.is/docs/sdk/manage-bundles) + +[/tab] + +[tab version="terraform juju"] + +The `terraform juju` client does not support this. Please use the `juju` client. + +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + + +

Upgrade a model

+> See more: [Upgrading things](/t/1199) + +[tabs] +[tab version="juju"] + +A model upgrade affects the version of Juju (Juju machine and unit agents) on all the Juju machines in the model. + +First, prepare for the upgrade: + +- Ensure the controller has already been upgraded. See more: [How to upgrade a controller](/t/1111#heading--upgrade-a-controller) +- Ensure the models that are to be upgraded are in good working order (`juju status`). + +Then, perform the upgrade. How you upgrade a model depends on whether you'd be crossing patch versions (e.g., `v.2.9.25` -> `v.2.9.26`) or rather minor (e.g., `v.2.7` -> `v.2.8`) or major versions (`v.2` -> `v.3`). + +- To upgrade the current model across patch versions, use the `upgrade-model` command: + +```text +juju upgrade-model +``` + +By using various flags, you can specify an agent stream, agent version, etc., or you can even perform a dry run, to simulate what would happen if you upgraded. + +[note type=information] +This procedure can also be used to upgrade a controller model. +[/note] + +> See more: [`juju upgrade-model`](/t/10073) + +- To upgrade a model's minor or major version, use model migration. First, bootstrap a controller of your target version, migrate your model to that controller, and then do `upgrade-model` on the new controller. + +[note type=information] +This procedure cannot be used to upgrade a controller model. +[/note] + +> See more: [How to migrate a workload model to another controller](#heading--migrate-a-workload-model-to-another-controller), + + + +When you're done, verify that the model has been succesful by running the `status` command. If the output looks wrong, you will have to do some investigation. + +[note type=information status="Troubleshooting"] + +[details="Error: some agents have not upgraded to the current model version "] + +When the running agent software that is more than 1 patch point behind the targeted upgrade version the upgrade process will abort. + +One very common reason for "agent version skew" is that during a previous upgrade the agent could not be contacted and, therefore, was not upgraded along with the rest of the agents. + +To overcome this situation you may force the upgrade by ignoring the agent version check: + +``` text +juju upgrade-model --ignore-agent-versions +``` +[/details] + +[details="Unit agent has not restarted after upgrade"] +It may occur that an agent does not restart upon upgrade. One thing that may help is the inspection and modification of its `agent.conf` file. Comparing it with its file before upgrading can be very useful. + +Installing a different or modified configuration file will require a restart of the daemon. For example, for a machine with an ID of ‘2’: + +``` +juju ssh 2 'ls -lh /etc/systemd/system/juju*' +``` + +This will return something similar to: + +``` +-rwxr-xr-x 1 root root 326 Jun 29 19:02 /etc/systemd/system/jujud-machine-2-exec-start.sh +-rw-r--r-- 1 root root 284 Jun 29 19:02 /etc/systemd/system/jujud-machine-2.service +``` + +Therefore, if the agent for machine ‘2’ is not coming up you can connect to the machine in this way: + +``` +juju ssh 2 +``` + +Then modify or restore the agent file (`/var/lib/juju/agents/machine-2/agent.conf`), and while still connected to the machine, restart the agent: + +``` +sudo systemctl restart jujud-machine-2 +``` +[/details] +[/note] + + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + +

Migrate a workload model to another controller

+ +Model migration is the movement of a model from one controller to another. The same configuration of machines, units, and their relations will be replicated on the destination controller, while your applications continue uninterrupted. Migration is used to upgrade models across minor or major versions. Migration is also useful for load balancing: If a controller hosting multiple models reaches capacity, you can move the busiest models to a new controller, reducing load without affecting your applications. + +[note type=information] +A controller model cannot be migrated. +[/note] + +[tabs] +[tab version="juju"] + +**Prepare for migration.** + +- Verify that the source and destination controllers are both known to the Juju client (i.e., they show up in the `juju controllers` output) and located in the same cloud environment. +- Verify that the version of Juju running on the destination controller is the same or newer than the version on the source controller. +- Verify that the destination controller does not have any model with the same name as the name of the model you want to migrate to it. +- Back up the source controller. +- **If the destination controller is on a different region or VPC:** Ensure that the destination controller has direct connectivity to the source controller. +- **If the model is large:** Configure the destination controller to throttle the reconnection rate for the agents running for each machine and unit in the model and increase the migration agent timeout time. For example: + +```text +juju controller-config agent-ratelimit-rate=50ms +juju controller-config agent-ratelimit-max=100 +juju controller-config migration-agent-wait-time=30m +``` + +> See more: List of controller configuration keys > [`agent-ratelimit-rate`](/t/7059#heading--agent-ratelimit-rate), [`agent-ratelimit-max`](/t/7059#heading--agent-ratelimit-max), [`migration-agent-wait-time`](/t/7059#heading--migration-agent-wait-time) + + + +- **If the model has multiple users:** Ensure that all the users have been set up on the destination controller. The operation will be aborted, and an advisory message displayed, if this is not the case. +- **If the model contains secrets:** Set up the target controller to use the same secret bank end as the source controller. For example, for a backend called `myvault`, as below. This will ensure that any secrets are correctly migrated with the model. + + +```text +$ juju switch sourcecontroller +$ juju show-secret-backend myvault +myvault: + backend: vault + config: + endpoint: http://10.0.0.77:8200 + secrets: 0 + status: active + id: 63c8ad37c906eb278540e942 + +$ juju switch targetcontroller +$ juju add-secret-backend --config /path/to/backendcfg.yaml --import-id 63c8ad37c906eb278540e942 +``` + +**Migrate the model.** To migrate a model on the current controller to a destination controller, use the `migrate` command followed by the name of the model and the name of the destination controller, as below: + +```text +juju migrate +``` + +You can monitor progress from the output of the `status` command run against the source model. You may want to use a command such as `watch` to automatically refresh the status output, rather than manually running status each time: + +```text +watch --color -n 1 juju status --color +``` + +In the output, a 'Notes' column is appended to the model overview line at the top of the output. The migration will step through various states, from 'starting' to 'successful'. + +The 'status' section in the output from the `show-model` command also includes details on the current or most recently run migration. It adds extra information too, such as the migration start time, and is a good place to start if you need to determine why a migration has failed. + +This section will look similar to the following after starting a migration: + +```bash +status: + +current: available + +since: 23 hours ago + +migration: uploading model binaries into destination controller + +migration-start: 21 seconds ago +``` + +Migration time depends on the complexity of the model, the resources it uses, and the capabilities of the backing cloud. + +If failure occurs during the migration process, the model, in its original state, will be reverted to the original controller. + +When the migration has completed successfully, the model will no longer reside on the source controller. It, and its applications, machines and units, will be running on the destination controller. + +Inspect the migrated model with the `status` command: + +```text +juju status -m : +``` + + + + + + +[note type=information status="Troubleshooting"] + +[details="Error: migration: 'aborted, removing model from target controller: model data transfer failed, failed to import model into target controller: granting admin permission to the owner: user "" is permanently deleted'"] + +This error occurs when the model owner does not exist on the target controller. The solution is to create a user with that name on the target controller. + +**Note:** The underlying cause is because a model is tightly coupled with the user who has created it. Starting with Juju 4, it will be possible to identify models independently of the user. + +[/details] + +[details="Error:migration: 'aborted, removing model from target controller: model data transfer failed, failed to import model into target controller: credential "" not found (not found)'"] + +This error occurs when the model owner does not own the credential associated with the model. The solution is to change the credential to a credential the user owns (via `juju set-credential`). + +[/details] + +[details="Error: migration: 'aborted, removing model from target controller: machine sanity check failed, 1 error found'"] + +This error occurs when the machines known by Juju differ from the ones the underlying cloud reports (e.g., a LXD cloud still sees a container that has been removed from Juju). The solution is to check the cloud and resolve the difference (i.e., continuing with the previous example, to delete the container from the LXD cloud as well). + + +[/details] + +[/note] + +> See more: [`juju migrate`](/t/10121) + +[/tab] + +[tab version="terraform juju"] +To migrate a model to another controller, use the `juju` client to perform the migration, then, in your Terraform plan, reconfigure the `juju` provider to point to the destination controller (we recommend the method where you configure the provider using static credentials). You can verify your configuration changes by running `terraform plan` and noticing no change: Terraform merely compares the plan to what it finds in your deployment -- if model migration with `juju` has been successful, it should detect no change. + + +> See more: [How to use the client](/t/1083#heading--use-the-client) +[/tab] + +[tab version="python libjuju"] + +[/tab] +[/tabs] + +

Destroy a model

+> See also: [Removing things](/t/1063) + +[tabs] +[tab version="juju"] + +To remove a model, along with any associated machines and applications, use the `destroy-model` command followed by the name of the model: + +```text +juju destroy-model +``` + +The command has a variety of flags that you can use to skip the confirmation, to rush through the destruction without waiting for each step to complete, to release or destroy any persistant storage on the model, etc., or even to force destroy the model, ignoring any errors (not recommended as it might leave behind unresolved issues). + +> See more: [`juju destroy-model`](/t/10190) + +[/tab] + +[tab version="terraform juju"] +To destroy a model, remove its resource definition from your Terraform plan. + +> See more: [`juju_model` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/model) + +[/tab] + +[tab version="python libjuju"] + +To destroy a model, with a connected controller object, call the `Controller.destroy_model()` function. For example: + +```python +await controller.destroy_model("test-model") +``` + +> See more: [`Controller.destroy_model()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.destroy_model) + +[/tab] +[/tabs] + + +
+ +**Contributors:** @aflynn, @awnns, @barrettj12, @cderici, @hmlanigan, @pedroleaoc, @pmatulis, @serdarvural80, @timclicks, @tmihoc + +------------------------- + +brettmilford | 2019-10-14 22:58:17 UTC | #2 + +The paragraph before "Model management tasks" appears to be incomplete. + +------------------------- + +timClicks | 2019-10-14 23:08:55 UTC | #3 + +Thanks for reporting this. I'll fix it right away. + +[edit: I've made some changes to the text to flesh out three use cases] + +------------------------- + +pedroleaoc | 2022-04-07 09:26:03 UTC | #4 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:44 UTC | #5 + + + +------------------------- + +axino | 2023-04-26 06:50:53 UTC | #6 + +``` +juju migration-agent-wait-time=30m +``` +This is missing `controller-config` after `juju` + +------------------------- + +barrettj12 | 2023-04-27 03:58:56 UTC | #7 + +Fixed. For future reference, this post is a wiki, so you should be able to make these edits yourself :) + +------------------------- + +erik-lonroth | 2023-10-26 07:42:41 UTC | #8 + +What can be done to manage situations where users are not available in destination controller or that the destination controller has a different identity provider? + +``` +$ juju migrate my-example-model destination-controller +ERROR cannot initiate migration as external users have been granted access to the model +and the two controllers have different identity provider configurations. To resolve +this issue you can remove the following users from the current model: + - erik@external + - gustav@external +``` + +``` +$ juju whoami +Controller: jaas +Model: my-example-model +User: erik@external +``` + +In this above example, the users are suffixed with the "@external" thing coming from the candid/ubuntu-one identity provider, which wouldn't be available in my destionation-controller. + +In the source controller (jaas) I can't operate as "admin" either to remove the users altogether, so I'm clueless as how to make a migration in this scenario. + +**[UPDATE]** + +So, I discovered that I have to use the USSO (Ubuntu one) external identity provider to be able to migrate from JAAS. That implies that the remote controller needs to be bootstrapped to use the external identity provider that JAAS uses also: + +E.g. my new controller needs to be bootstrapped with: + +`juju bootstrap aws/eu-north-1 my-controller.example.com --config identity-url="https://api.jujucharms.com/identity"` + +This has a few if-and-but:s but generally would allow you to migrate and keep the users. + +Unfortunately - a already deployed controller will not be possible to retro-fit as far as I know. So, yeah... + +**[UPDATE]** + +An important note is that you might need to be "admin" user on the destination controller if you get this error when running *juju migrate* + +`ERROR connect to target controller: invalid request - expected local user (unauthorized access)` + +After all this, I managed to migrate. + +@alesstimec @emcp + +------------------------- + +samiwasenius | 2024-05-08 06:58:14 UTC | #9 + +Hi, + +``` +juju migrate +``` + +This does not work in k8s if you have units in terminated stage. Only solution so far is to re-deploy the whole application since you cannot remove individual units. + +``` +$ juju remove-unit jupyter-ui/23 +ERROR k8s models do not support removing named units. +Instead specify an application with --num-units. +``` + + +So, if you have several of these, it's lot of work. Otherwise the migration stops here: + +``` +ERROR source prechecks failed: unit jupyter-ui/23 is dying +15:26:51 DEBUG cmd supercommand.go:537 error stack: +source prechecks failed: unit jupyter-ui/23 is dying +``` +Cheers, +Sami + +------------------------- + diff --git a/tmp/t/1156.md b/tmp/t/1156.md new file mode 100644 index 000000000..f12c22709 --- /dev/null +++ b/tmp/t/1156.md @@ -0,0 +1,574 @@ +system | 2024-06-11 16:17:51 UTC | #1 + +> See also: [User](/t/6186) + +**Contents:** +- [Add a user](#heading--add-a-user) +- [View all the known users](#heading--view-all-the-known-users) +- [View details about a user](#heading--view-details-about-a-user) +- [View details about the current user](#heading--view-details-about-the-current-user) +- [Manage a user's access level](#heading--manage-a-users-access-level) +- [Manager a user's login details](#heading--manager-a-users-login-details) +- [Manage a user's login status](#heading--manage-a-users-login-status) +- [Manage a user's enabled status](#heading--manage-a-users-enabled-status) +- [Remove a user](#heading--remove-a-user) + + + +

Add a user

+ +[tabs] +[tab version="juju"] + +[note type=positive] +**If you're the controller creator:**
Juju has already set up a user for you. Your username is `admin` and your access level is that of controller `superuser`. Run `juju logout` to be prompted to set up a password. Use `juju change-user-password` to set the password. +[/note] + +To add a user to a controller, run the `add-user` command followed by the username you want to assign to this user. For example: + +```text +juju add-user alex +```` + +This will create a user with username 'alex' and a controller `login` access level. + +> See more: [User access levels](/t/6864) + +It will also print a line of code that you must give this user to run using their Juju client -- this will register the controller with their client and also prompt them to set up a password for the user. + +------- +[details=Example user setup] +Admin adding a new user 'alex' to the controller: + +```text +# Add a user named `alex`: +$ juju add-user alex +User "alex" added +Please send this command to alex: + juju register MFUTBGFsZXgwFRMTMTAuMTM2LjEzNi4xOToxNzA3MAQghBj6RLW5VgmCSWsAesRm5unETluNu1-FczN9oVfNGuYTFGxvY2FsaG9zdC1jb250cm9sbGVy + +"alex" has not been granted access to any models. You can use "juju grant" to grant access. +``` + +New user 'alex' accessing the controller: + +```text +$ juju register MFUTBGFsZXgwFRMTMTAuMTM2LjEzNi4xOToxNzA3MAQghBj6RLW5VgmCSWsAesRm5unETluNu1-FczN9oVfNGuYTFGxvY2FsaG9zdC1jb250cm9sbGVy +Enter a new password: ******** +Confirm password: ******** +Enter a name for this controller [localhost-controller]: localhost-controller +Initial password successfully set for alex. + +Welcome, alex. You are now logged into "localhost-controller". + +There are no models available. You can add models with +"juju add-model", or you can ask an administrator or owner +of a model to grant access to that model with "juju grant". + +``` +[/details] + +----- + +[note type="caution"] +Controller registration (and any other Juju operations that involves communication between a client and a controller) requires that the client be able to contact the controller over the network on TCP port 17070. In particular, if using a LXD-based cloud, network routes need to be in place (i.e. to contact the controller LXD container the client traffic must be routed through the LXD host). +[/note] + +> See more: [`juju add-user`](/t/10193),[How to register a private controller](/t/1111#heading--register-a-controller) + +[/tab] + +[tab version="terraform juju"] +To add a user to a controller, in your Terraform plan add a `juju_user` resource, specifying a label, a name, and a password. For example: + +```terraform +resource "juju_user" "alex" { + name = "alex" + password = "alexsupersecretpassword" + +} +``` + +> See more: [`juju_user` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/user) +[/tab] + +[tab version="python libjuju"] +To add a user to a controller, on a connected Controller object, use the `add_user()` method. + +```python +await my_controller.add_user("alex") +``` + +> See more: [`add_user()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.add_user) +[/tab] +[/tabs] + +

View all the known users

+ +[tabs] +[tab version="juju"] + +To view a list of all the users known (i.e., allowed to log in) to the current controller, run the `users` command: + + +```text +juju users +``` + +The command also has flags that will allow you to specify a different controller, an output file, an output format, whether to print the full timestamp for connection times, etc. + +> See more: [`juju users`](/t/10175) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +To view a list of all the users known (i.e., allowed to log in) to a controller, on a connected Controller object, use the `get_users()` method. + +```python +await my_controller.get_users() +``` + +> See more: [`get_users()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.get_users) +[/tab] +[/tabs] + +

View details about a user

+ +[tabs] +[tab version="juju"] + +To view details about a specific user, run the `show-user` command followed by the name of the user. For example: + +```text +juju show-user alice +``` + +This will display the user's username, display name (if available), access level, creation date, and last connection time, in a YAML format. + + +------------ +[details=Expand to see a sample output for user 'admin'] +```text +user-name: admin +display-name: admin +access: superuser +date-created: 8 minutes ago +last-connection: just now +``` +[/details] +----- + +> See more: [`juju show-user`](/t/1830) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +To view details about a specific user, on a connected Controller, use the `get_user()` method to retrieve a User object that encapsulates everything about that user. Using that object, you can access all the details (via the object properties) for that user. + +```python +user_object = await my_controller.get_user("alice") +# Then we can access all the properties to view details +print(user_object.display_name) +print(user_object.access) +print(user_object.date_created) +print(user_object.last_connection) +``` + +> See more: [`get_user()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.get_user), [User (module)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User) +[/tab] +[/tabs] + +

View details about the current user

+ +[tabs] +[tab version="juju"] + +To see details about the current user, run the `whoami` command: + +```text +juju whoami +``` + +This will print the current controller, model, and user username. + + +------ +[details=Expand to see a sample output] +```text +Controller: microk8s-controller +Model: +User: admin +``` +[/details] +----- + +> See more: [`juju whoami`](/t/10148) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +To see details about the current user, on a connected Controller, use the `get_current_user()` method to retrieve a User object that encapsulates everything about the current user. Using that object, you can access all the details (via the object properties) for that user. + +```python +user_object = await my_controller.get_current_user() +# Then we can access all the properties to view details +print(user_object.display_name) +print(user_object.access) +print(user_object.date_created) +print(user_object.last_connection) +``` + +> See more: [`get_current_user()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.get_current_user), [User (module)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User) +[/tab] +[/tabs] + +

Manage a user's access level

+> See also: [User access levels](/t/6864) + +[tabs] +[tab version="juju"] + +The procedure for how to control a user's access level depends on whether you want to grant access at the level of the controller, model, application, or application offer or rather at the level of a cloud. + +[note type=information] +This division doesn't currently align perfectly with the scope hierarchy, which is rather controller > cloud > model > application > offer (because the cloud scope is designed as a restriction on the controller scope for cases where multiple clouds are managed via the same controller). +[/note] + + +- [Manage access at the controller, model, application, or offer level](#heading--manage-access-at-the-controller-model-application-or-offer-level) +- [Manage access at the cloud level](#heading--manage-access-at-the-cloud-level) + +

Manage access at the controller, model, application, or offer level

+ +**Grant access.** To grant a user access at the controller, model, application, or offer level, run the `grant` command, specifying the user, applicable desired access level, and the target controller, model, application, or offer. For example: + +```text +juju grant jim write mymodel +``` + +The command also has a flag that allows you to specify a different controller to operate in. + +> See more: [`juju grant`](/t/10196) + +**Revoke access.** To revoke a user's access at the controller, model, application, or offer level, run the `revoke` command, specifying the user, access level to be revoked, and the controller, model, application, or offer to be revoked from. For example: + +```text +juju revoke joe read mymodel +``` + +The command also has a flag that allows you to specify a different controller to operate in. + +> See more: [`juju revoke`](/t/10077) + + +

Manage access at the cloud level

+ +**Grant access.** To grant a user's access at the cloud level, run the `grant-cloud` command followed by the name of the user, the access level, and the name of the cloud. For example: + +```text +juju grant-cloud joe add-model fluffy +``` + +> See more: [`juju grant-cloud`](/t/10164) + +**Revoke access.** To revoke a user's access at the cloud level, run the `revoke-cloud` command followed by the name of the user, the access level to be revoked, and the name of the cloud. For example: + +```text +juju revoke-cloud joe add-model fluffy +``` + +> See more: [`juju revoke-cloud`](/t/10104) + +[/tab] + +[tab version="terraform juju"] +With the `terraform juju` client you can manage user access only at the model level; for anything else, please use the `juju` client. + +To grant a user access to a model, in your Terraform plan add a `juju_access_model` resource, specifying the model, the access level, and the user(s) to which you want to grant access. For example: + +```terraform +resource "juju_access_model" "this" { + model = juju_model.dev.name + access = "write" + users = [juju_user.dev.name, juju_user.qa.name] +} +``` + +> See more: [`juju_access_model`](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/access_model) +[/tab] + +[tab version="python libjuju"] + +To manage a user's access to a controller, a model, or an offer, on a User object, use the `grant()` and `revoke()` methods to grant or revoke a certain access level to a user. + +```python +# grant a superuser access to the controller (that the user is on) +await user_object.grant('superuser') + +# grant user the access to see a model +await user_object.grant("read", model_name="test-model") + +# revoke ‘read’ (and ‘write’) access from user for application offer ‘fred/prod.hosted-mysql’: +await user_object.revoke("read", offer_url="fred/prod.hosted-mysql") +``` + +> See more: [`grant()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User.grant), [`revoke()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User.revoke), [User (module)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User) + +[/tab] +[/tabs] + +

Manager a user's login details

+ +[tabs] +[tab version="juju"] + +**Set a password.** The procedure for how to set a password depends on whether you are the controller creator or rather some other user. + +- To set a password as a controller creator user ('admin'), run the `change-user-password` command, optionally followed by your username, 'admin'. + +```text +juju change-user-password +``` + +This will prompt you to type, and then re-type, your desired password. + +> See more: [`juju change-user-password`](/t/10118) + + +- To set a password as a non-controller-creator user, follow the prompt you get when registering the controller via the `register` command. + +> See more: [How to register a controller](/t/1111#heading--register-a-controller) + +**Change a password.** To change the current user's password, run the `change-user-password` command: + +```text +juju change-user-password +``` + +This will prompt you to type, and then re-type, your desired password. + +The command also allows an optional username argument, and flags, allowing an admin to change / reset the password for another user. + +> See more: [`juju change-user-password`](/t/10118) + +[/tab] + +[tab version="terraform juju"] +To set or change a user's password, in your Terraform plan add, in the relevant `juju_user` resource definition, change the `password` attribute to the desired value. For example: + +```terraform +resource "juju_user" "alex" { + name = "alex" + password = "alexnewsupersecretpassword" + +} +``` + +> See more: [`juju_user`](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/user#password) +[/tab] + +[tab version="python libjuju"] + +To set or change a user's password, on a User object, use the `set_password()` method. + +```python +await user_object.set_password('123') +``` + +> See more: [`set_password()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User.set_password), [User (module)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User) + +[/tab] +[/tabs] + +

Manage a user's login status

+ +[tabs] +[tab version="juju"] + +**Log in.** + +[note type=information] +**If you're the controller creator:**
You've already been logged in as the `admin` user. To verify, run `juju whoami` or `juju show-user admin`; to set a password, run `juju change-user-password` to set a password; to log out, run `juju logout`. +[/note] + +[note type=information] +**If you've just registered an external controller with your client (via `juju register`):**
You're already logged in. Run `juju whoami` or `juju show-user ` to view your user details. +[/note] + +To log in as a user on the current controller, run the `login` command, using the `-u` flag to specify the user you want to log in as. For example: + +```text +juju login -u alice +``` + +This will prompt you to enter the password. + +The command also has flags that allow you to specify a controller, etc. + +> See more: [`juju login`](/t/10157) + +**Log out.** + +[note type=information] +**If you're the controller creator, and you haven't set a password yet:**
You will be prompted to set a password. Make sure to set it before logging out. +[/note] + +To log a user out of the current controller, run the `logout` command: + +```text +juju logout +``` + +> See more: [`juju logout`](/t/10183) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +The `python-libjuju` client does not currently support this. Please use the `juju` client. +[/tab] +[/tabs] + +

Manage a user's enabled status

+ +[tabs] +[tab version="juju"] + +To disable a user on the current controller, run the `disable-user` command followed by the name of the user. For example: + +```text +juju disable-user mike +``` + +> See more: [`juju disable-user`](/t/10198) + +[note type=positive] +**To view disabled users in the output of `juju users`:** Use the `--all` flag. +[/note] + +To re-enable a disabled user on a controller, run the `enable-user` command followed by the name of the user. For example: + +```text +juju enable-user mike +``` + +> See more: [`juju enable-user`](/t/10241) + +[/tab] + +[tab version="terraform juju"] +The `terraform juju` client does not support this. Please use the `juju` client. +[/tab] + +[tab version="python libjuju"] +To enable or disable a user, on a User object, use the `enable()` and `disable()` methods. + +```python +await user_object.enable() + +await user_object.disable() +``` + +You can also check if a user is enabled or disabled using the `enabled` and `disabled` properties on the Unit object. + +```python +# re-enable a disabled user +if user_object.disabled: + await user_object.enable() +``` + +> See more: [`enable()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User.enable), [`disable()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User.disable), [User (module)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User) +[/tab] +[/tabs] + +

Remove a user

+ +[tabs] +[tab version="juju"] + +To remove a user from the current controller, run the `remove-user` command followed by the name of the user. For example: + +```text +juju remove-user bob +``` + +This will prompt you to confirm, and then proceed to remove. + +The command also has flags that allow you to specify a different controller, skip the confirmation, etc. + +> See more: [`juju remove-user`](/t/10130) + +[/tab] + +[tab version="terraform juju"] +To remove a user, in your Terraform plan remove its resource definition. + +> See more: [`juju_user` (resource)](https://registry.terraform.io/providers/juju/juju/latest/docs/resources/user) + +[/tab] + +[tab version="python libjuju"] + +To remove a user, on a connected Controller object, use the `remove_user()` method. + +```python +await my_controller.remove_user("bob") +``` + +> See more: [`remove_user()`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.controller.html#juju.controller.Controller.remove_user), [User (module)](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.user.html#juju.user.User) + +[/tab] +[/tabs] + +
+ +> **Contributors:** @cderici, @hmlanigan, @pedroleaoc, @pmatulis, @timclicks, @tmihoc + +------------------------- + +addyess | 2020-09-14 19:24:06 UTC | #2 + +I would love to request a permission level of `status` in addition to the current `read`, `write`, and `admin` permissions. A `status` level permission would be able to see the status of the cluster without being able to read the configuration of the state of the model. This would be beneficial in environments I'd wish a user to see the relations of the charms and the status of the applications without having to be able to see configuration data which could contain secrets/keys private to those applications. If the user can SEE the config, in many cases they can also access the underlying applications and change them. If there was a status only permission, juju could manage that API. I'm not familiar with the UAC on the juju api -- so i'd be happy with some kinda of admin managed permissions of groups/users and permissions to certain APIs. + +[Link to Bug](https://bugs.launchpad.net/juju/+bug/1895326) + +------------------------- + +aluria | 2021-02-17 07:38:05 UTC | #3 + +Hi, + +The last couple of links (Multi-user basic setup, Multi-user external setup) are broken. They point to [1] and [2]. + +1. https://juju.is/t/multi-user-basic-setup-tutorial/1195 +2. https://juju.is/t/multi-user-external-setup-tutorial/1196 + +------------------------- + +pedroleaoc | 2022-04-07 09:25:09 UTC | #4 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:17 UTC | #5 + + + +------------------------- + diff --git a/tmp/t/1157.md b/tmp/t/1157.md new file mode 100644 index 000000000..51e7657bb --- /dev/null +++ b/tmp/t/1157.md @@ -0,0 +1,175 @@ +system | 2024-09-10 13:22:09 UTC | #1 + +> See also: [How to manage spaces](/t/6664) + + + +A Juju **(network) space** is a logical grouping of [subnets](/t/6234) that can communicate with one another. + +A space is used to help segment network traffic for the purpose of: +* Network performance +* Security +* Controlling the scope of regulatory compliance + + +**Contents:** + +- [Spaces as constraints and bindings](#heading--spaces-as-constraints-and-bindings) +- [Support for spaces in Juju providers](#heading--support-for-spaces-in-juju-providers) + - [Spaces inherited from the substrate](#heading--spaces-inherited-from-the-substrate) + - [MAAS](#heading--maas) + - [Subnets inherited from the substrate](#heading--subnets-inherited-from-the-substrate) + - [EC2](#heading--ec2) + - [OpenStack and Azure](#heading--openstack-and-azure) + - [LXD](#heading--lxd) + - [Subnets discovered progressively](#heading--subnets-discovered-progressively) + - [Manual](#heading--manual) + +

Spaces as constraints and bindings

+ +Spaces can be specified as [constraints](/t/6184)---to determine what subnets a machine is connected to---or as [bindings](/t/6488)---to determine the subnets used by application relations. + +A binding associates an [application endpoint](/t/5462) with a space. This restricts traffic for the endpoint to the subnets in the space. By default, endpoints are bound to the space specified in the `default-space` model configuration value. The name of the default space is "alpha". + +The concepts of space and application end-point binding can be depicted as follows: + +![space|690x431](upload://yffeZl5DxwcSoLr3i6DonE6W3s7.png) + +Constraints and bindings affect application deployment and machine provisioning as well as the subnets a machine can talk to. + + +Endpoint bindings can be specified during deployment with `juju deploy --bind` or changed after deployment using the `juju bind` command. + +

Support for spaces in Juju providers

+ + +Support for spaces by the different Juju providers falls into one of three cases. + + + +

Spaces inherited from the substrate

+ + +This is the case for MAAS, as described below. + +

MAAS

+ +The concept of spaces is native to MAAS and its API can be used to modify the space/subnet topology. As such, Juju does not permit editing of spaces in a MAAS model. MAAS spaces and subnets are read and loaded into Juju when a new model is created. + +If spaces or subnets are changed in MAAS, they can be reloaded into Juju via Juju's `reload-spaces` command. + +[note type=information] +The `reload-spaces` command does not currently pull in all information. This is being worked upon. See [LP #1747998](https://bugs.launchpad.net/juju/+bug/1747998). +[/note] + +For other providers, `reload-spaces` will fall back to refreshing the known subnets if subnet discovery is supported. One scenario for this usage would be adding a subnet to an AWS VPC that Juju is using, and then issuing the `reload-spaces` command so that the new subnet is available for association with a Juju space. + + +

Subnets inherited from the substrate

+ +This is the case for EC2, OpenStack and Azure, and LXD. Inherited subnets are then grouped into spaces at the discretion of the Juju administrator. + +

EC2

+ +Machines on Amazon EC2 are provisioned with a single network device. At this time, specifying multiple space constraints and/or bindings will result in selection of a *single intersecting* space in order to provision the machine. + +

OpenStack and Azure

+ +The OpenStack and Azure providers support multiple network devices. Supplying multiple space constraints and/or bindings will provision machines with NICs in subnets representing the *union* of specified spaces. + +

LXD

+ +LXD automatically detects any subnets belonging to bridge networks that it has access to. It is up to the Juju user to define spaces using these subnets. + +

Subnets discovered progressively

+ +This is the case for the Manual provider, as described below. + +

Manual

+ +For the Manual provider, space support differs somewhat from other providers. The `reload-spaces` command does not discover subnets. Instead, each time a manual machine is provisioned, its discovered network devices are used to update Juju's known subnet list. + +Accordingly, the machines to be used in a manual provider must be provisioned by Juju before their subnets can be grouped into spaces. When provisioning a machine results in discovery of a new subnet, that subnet will reside in the _alpha_ space. + + + + + + + + + + + + + + +------------------------- + +manadart | 2020-07-31 11:58:25 UTC | #4 + +I've just given this page a refresh, but I couldn't find a concise definition of an application "endpoint" in the docs, which is a deficiency. We should create such a doc and include a link in the bindings section. + +------------------------- + +pmatulis | 2020-07-31 13:55:47 UTC | #5 + +@manadart This page has it. + + https://discourse.charmhub.io/t/concepts-and-terms/1144 + +------------------------- + +serdarvural80 | 2022-01-27 12:19:48 UTC | #6 + +[quote="system, post:1, topic:1157"] +[application endpoint](/t/concepts-and-terms/1144#heading--endpoint). +[/quote] + +The link takes us to "Basic juju concepts" page. Would be great to link to a page where an "application end-point" is specifically defined. There's a mention of an application end-point here: https://juju.is/docs/olm/relations, though perhaps it could also be explicitly defined either there or on a separate page? + +------------------------- + +tmihoc | 2022-01-27 15:11:27 UTC | #7 + +We do have this doc on endpoints: https://juju.is/docs/olm/endpoints . I'll add a link to it at the top of this doc. + +------------------------- + +pedroleaoc | 2022-04-07 08:33:15 UTC | #8 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:30 UTC | #9 + + + +------------------------- + diff --git a/tmp/t/1158.md b/tmp/t/1158.md new file mode 100644 index 000000000..1c1403f57 --- /dev/null +++ b/tmp/t/1158.md @@ -0,0 +1,78 @@ +system | 2024-10-29 14:41:09 UTC | #1 + +> See also: [How to manage charms or bundles](/t/11351) +> +> See more: [SDK | Bundle](https://juju.is/docs/sdk/bundle) + +In Juju, a **bundle** is a collection of [charms](/t/5457) which have been carefully combined and configured in order to automate a multi-charm solution. + +For example, a bundle may include the `wordpress` charm, the `mysql` charm, and the relation between them. + +The operations are transparent to Juju and so the deployment can continue to be managed by Juju as if everything was performed manually (what you see in `juju status` is applications, relations, etc.; that is, not the bundle entity, but its contents). + +Bundles can be of two kinds, **regular** and **overlay**. + +- An **overlay bundle** is a local bundle you pass to `juju deploy ` via `--overlay .yaml` if you want to customise an upstream charm / bundle (usually the latter, also known as a **base bundle**) for your own needs without modifying the existing charm / bundle directly. For example, you may wish to add extra applications, set custom machine constraints or modify the number of units being deployed. They are especially useful for keeping configuration local, while being able to make use of public bundles. It is also necessary in cases where certain bundle properties (e.g. offers, exposed endpoints) are deployment specific and can _only_ be provided by the bundle's user. +- A **regular bundle** is any bundle that is not an overlay. + + +Whether regular or overlay, a bundle is fundamentally just a YAML file that contains all the applications, configurations, relations, etc., that you want your deployment to have. + +> See more: [File `.yaml`](/t/5679) + +------------------------- + +addyess | 2020-04-21 19:51:14 UTC | #2 + +i learned today about `include-file://` and `include-base64://`but i didn't exactly know under what circumstances i can use them in the bundle.yaml. + +Are they only allowed as a values of `config` key or can they be anywhere? Where's the reference manual for how these magic-key values are supposed to be used? + +this [blog](http://mitechie.com/blog/tag/bundle) did give me some hints to some of my questions + +------------------------- + +pmatulis | 2020-04-22 02:09:45 UTC | #3 + +[quote="addyess, post:2, topic:1158"] +Where’s the reference manual for how these magic-key values are supposed to be used? +[/quote] + +I don't know about a reference manual but these two methods can be used to include application configuration options ('config') or to contain binary data for a specific application configuration option (charm-specific) like 'cert'. There is a simple example on the [Charm bundles](/t/charm-bundles/1058) page. + +------------------------- + +pedroleaoc | 2022-04-07 09:25:34 UTC | #4 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:36 UTC | #5 + + + +------------------------- + +crucible | 2024-08-29 20:37:01 UTC | #6 + +@tmihoc I know that bundles are being deprecated, but I think it could be useful to add a section (I could even do this) to this doc about overlays with some simple examples. I browsed some Canonical repos and noticed that ~90% of the overlay YAML files just make use of the `applications` and `relations` keys. I would follow a similar format to the [Juju config-yaml](https://juju.is/docs/sdk/config-yaml) doc. + +What are your thoughts? I am also happy to leave it as is. + +------------------------- + +tmihoc | 2024-08-30 06:52:31 UTC | #7 + +Hi @crucible , and thanks for raising this! We do already have something similar to [File `config.yaml`](https://juju.is/docs/sdk/config-yaml), namely, [File `.yaml`](https://juju.is/docs/sdk/bundle.yaml), but it's in the SDK docs; I've added a "See more" link to it at the bottom of the doc. (That file, at the very top, also has a link to examples from test files. The schema part itself could use a lot of work but, since bundles are being phased out, improving that content isn't a priority.) + +As a general note: Our current documentation architecture, which is split into Juju and Charm SDK, makes it hard sometimes to document concepts relevant to both. So far we've handled this by documenting shared concepts under Juju (they usually come from the Juju API anyway) and having the charm SDK docs presuppose Juju docs and linking to Juju docs via "See first" links. However, the approach is clunky (what happened here is an example of that) and potentially misleading: It suggests that the relation between what a charm+Juju user needs to know and what a charm author needs to know is subset-superset, whereas in reality it's partial overlap. I'm thinking of combining the Juju and the Charm SDK docs and having the how-to guides continue to reflect the charm author vs. user split but the reference combined, with all the shared entries being directly linked to from both, wherever relevant. Happy to chat further. + +------------------------- + +crucible | 2024-08-30 15:33:10 UTC | #8 + +Thank you for clarifying the documentation architecture and I agree the low prio comment. I will link to the existing docs. + +------------------------- + diff --git a/tmp/t/116.md b/tmp/t/116.md new file mode 100644 index 000000000..9e9ee57dc --- /dev/null +++ b/tmp/t/116.md @@ -0,0 +1,82 @@ +thumper | 2024-08-22 13:13:16 UTC | #1 + +> See also: [Agent introspection](/t/117), [Logfile: /var/log/juju/machine-lock.log](/t/112) + +The `juju_machine_lock` introspection function was introduced in 2.3.9 and 2.4.2. + +This function actually calls into every agent on the machine to ask about the agent's view of the hook execution lock. Where the [machine-lock.log](/t/112) file shows the history of the machine lock, the introspection endpoint shows the current status of the lock, whether the agent holds the lock, or is waiting for the lock. + +During a deploy of `hadoop-kafka`, after the machine 0 has started, and is deploying the two units, we can see the following: + +``` +machine-0: + holder: none +unit-namenode-0: + holder: uniter (run install hook), holding 1m42s +unit-resourcemanager-0: + holder: none + waiting: + - uniter (run install hook), waiting 1m41s +``` +You can see that the `namenode/0` unit has the uniter worker holding the hook, and it is running the install hook, and at the time of executing the `juju_machine_lock` command it had been holding the lock for one minute and 42 seconds. + +You can additionally see that the `resourcemanager/0` unit is waiting to run its install hook. + +As the installation progresses, the subordinate units are deployed, and the output looks more like this: + +``` +machine-0: + holder: none +unit-ganglia-node-7: + holder: none + waiting: + - uniter (run install hook), waiting 1s +unit-ganglia-node-8: + holder: none +unit-namenode-0: + holder: uniter (run relation-joined (2; slave/0) hook), holding 1s +unit-resourcemanager-0: + holder: none + waiting: + - uniter (run relation-joined (1; namenode/0) hook), waiting 1s +unit-rsyslog-forwarder-ha-7: + holder: none + waiting: + - uniter (run install hook), waiting 1s +unit-rsyslog-forwarder-ha-8: + holder: none +``` + +When everything is idle, the output looks like this: + +``` +machine-0: + holder: none +unit-ganglia-node-7: + holder: none +unit-ganglia-node-8: + holder: none +unit-namenode-0: + holder: none +unit-resourcemanager-0: + holder: none +unit-rsyslog-forwarder-ha-7: + holder: none +unit-rsyslog-forwarder-ha-8: + holder: none +``` + +------------------------- + +pedroleaoc | 2022-04-07 09:25:43 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:37 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/1162.md b/tmp/t/1162.md new file mode 100644 index 000000000..270b842f7 --- /dev/null +++ b/tmp/t/1162.md @@ -0,0 +1,184 @@ +system | 2022-04-27 06:10:43 UTC | #1 + +> See also: +> - [`juju` (the Juju CLI tool, the Juju client)](/t/5465) + + + + + +This document lists the environment variables that are available on the Juju client in order to change its default behaviour. + +**Contents:** + +- [GOCOOKIES](#heading--gocookies) +- [JUJU_CONTROLLER](#heading--jujucontroller) +- [JUJU_DATA](#heading--jujudata) +- [JUJU_HOME (deprecated)](#heading--jujuhome-deprecated) +- [JUJU_REPOSITORY (deprecated)](#heading--jujurepository-deprecated) +- [JUJU_LOGGING_CONFIG](#heading--jujuloggingconfig) +- [JUJU_MODEL](#heading--jujumodel) +- [JUJU_DEV_FEATURE_FLAGS](#heading--jujudevfeatureflags) +- [JUJU_STARTUP_LOGGING_CONFIG](#heading--jujustartuploggingconfig) +- [JUJU_CLI_VERSION](#heading--jujucliversion) + + +

GOCOOKIES

+ + +The default location of the Go cookies file is `~/.go-cookies`. This variable can change that. + +Example: + +``` text +GOCOOKIES=/var/lib/landscape/juju-homes/1/.go-cookies +``` + +

JUJU_CONTROLLER

+ + +Used to specify the current Juju controller to use. This is overridden if the controller is specified on the command line using `-c CONTROLLER`. + +

JUJU_DATA

+ + +This sets the path where Juju will look for its configuration files. You do not need to set this - by default Juju follows XDG guidelines and on Linux systems it will use the path: + +``` text +~/.local/share/juju +``` + +

JUJU_HOME (deprecated)

+ +For versions of Juju prior to 2.0, this variable indicated the 'home' directory where Juju kept configuration and other data. + + JUJU_HOME=~/.juju + +

JUJU_REPOSITORY (deprecated)

+ +For versions prior to 2.0, this variable set a local charms directory that Juju would search when deploying an application. The equivalent `--repository=/path/to/charms` switch (with `juju deploy`) was also available. + +Both the environment variable and the switch are no longer functional in 2.x versions. + +

JUJU_LOGGING_CONFIG

+ + +This setting takes effect on an environment only at bootstrap time. In stable Juju releases, agents are started with logging set to WARNING, and units are set to INFO. Development releases are set to DEBUG globally. Post bootstrap, on a running environment you can change the logging options to be more or less verbose. For example: + + juju model-config logging-config="juju=DEBUG; unit=WARNING" + +

JUJU_MODEL

+ + +Used to specify the current Juju model to use. This is overridden if the model is specified on the command line using `-m MODEL`. + +

JUJU_DEV_FEATURE_FLAGS

+ + +This setting takes effect on an environment only at bootstrap time. Unstable or pre-release features are enabled only when the feature flag is enabled prior to bootstrapping the environment. + + JUJU_DEV_FEATURE_FLAGS= juju bootstrap + +[note type="caution"] +Unforeseen and detrimental results can occur by enabling developmental features. Do not do so on production systems. +[/note] + +

JUJU_STARTUP_LOGGING_CONFIG

+ +This setting takes effect on an environment only at bootstrap time, and is used to set the verbosity of the bootstrap process. For example, to troubleshoot a failure bootstrapping during provider development, you can set the log level to TRACE. + + JUJU_STARTUP_LOGGING_CONFIG=TRACE juju bootstrap + +

JUJU_CLI_VERSION

+ +This allows you to change the behaviour of the command line interface (CLI) between major Juju releases and exists as a compatibility flag for those users wishing to enable the newer behaviour of the Juju CLI. As the CLI output and behaviour is stable between minor releases of Juju, setting JUJU_CLI_VERSION will enable developers and users to preview the newer behaviour of the CLI. + + export JUJU_CLI_VERSION=2 + juju status + + + +# Internal Use only + +These exist for developmental purposes only. + +

JUJU_DUMMY_DELAY

+ +

JUJU_NOTEST_MONGOJS

+ +------------------------- + +erik-lonroth | 2019-03-05 01:19:24 UTC | #2 + +[quote="system, post:1, topic:1162"] +JUJU_CHARM_DIR +[/quote] + +Isnt this depreceated ? JUJU_CHARM_DIR === > CHARM_DIR + +------------------------- + +schkovich | 2019-08-22 10:36:31 UTC | #3 + +[quote="system, post:1, topic:1162"] +LAYER_PATH=$JUJU_REPOSITORY/layers +[/quote] + +It's confusing `$JUJU_REPOSITORY` is marked as deprecated. Same applies to `$INTERFACE_PATH`. + +------------------------- + +pedroleaoc | 2022-04-07 09:26:02 UTC | #4 + + + +------------------------- + +swalladge | 2022-04-09 02:38:25 UTC | #5 + +I updated the Building section with the new names for layer and interface directories. The old names are deprecated. Note current output from `charm build` if the old names are used: + +```txt +build: DEPRECATED: INTERFACE_PATH environment variable; please use CHARM_INTERFACES_DIR instead +build: DEPRECATED: LAYER_PATH environment variable; please use CHARM_LAYERS_DIR instead +``` + +------------------------- + +pedroleaoc | 2022-10-14 11:30:58 UTC | #6 + + + +------------------------- + +emcp | 2023-04-12 20:20:37 UTC | #7 + +there's a doc page at https://juju.is/docs/sdk/hook-tool#heading--relation-get which references this post regarding `JUJU_REMOTE_UNIT` .. however I see no such text or extra information here about that.. + +> The environment variable [`JUJU_REMOTE_UNIT`](https://discourse.charmhub.io/t/juju-environment-variables/1162#heading--juju_remote_unit#heading--juju_remote_unit) stores the default remote unit. + +------------------------- + diff --git a/tmp/t/1163.md b/tmp/t/1163.md new file mode 100644 index 000000000..3e309705d --- /dev/null +++ b/tmp/t/1163.md @@ -0,0 +1,2282 @@ +system | 2024-08-18 21:46:39 UTC | #1 + +> See also: [Hook](/t/6464), [Ops](/t/5527) + + + +In Juju, a **hook tool (or 'hook command')** is a Bash script located in `/var/lib/juju/tools/unit--` that a charm uses to communicate with its Juju unit agent in response to a [hook](/t/6464). + +In the charm SDK, in [Ops](/t/5527), hook tools are accessed through Ops constructs, specifically, those constructs designed to be used in the definition of the event handlers associated with the Ops events that translate Juju [hooks](/t/6464). For example, when your charm calls `ops.Unit.is_leader`, in the background this calls `~/hooks/unit-name/leader-get`; its output is wrapped and returned as a Python `True/False` value. + + + +In Juju, you can use hook tools for troubleshooting. + +--- +[details=Example: Use `relation-get` to change relation data] + +```bash +# Get the relation ID + +$ juju show-unit synapse/0 + +... + - relation-id: 7 + endpoint: synapse-peers + related-endpoint: synapse-peers + application-data: + secret-id: secret://1234 + local-unit: + in-scope: true + + +# Check the output: +$ juju exec --unit synapse/0 "relation-get -r 7 --app secret-id synapse/0" +secret://1234 + +# Change the data: +juju exec --unit synapse/0 "relation-set -r 7 --app secret-id=something-else" + +# Check the output again to verify the change. +``` +[/details] +--- + + +## List of hook tools + +[note type=information] +This list replicates the output of `juju help hook-tool` and of `juju help-tool + + +### `action-fail` + +#### Usage + +```text +action-fail [""] +``` + +#### Summary + +Set action fail status with message. + +#### Details + +action-fail sets the fail state of the action with a given error message. Using +action-fail without a failure message will set a default message indicating a +problem with the action. + + +#### Examples + +```bash +action-fail 'unable to contact remote service' +``` + + +### `action-get` + +#### Usage + +```text +action-get [options] [[.....]] +``` + +#### Summary +Get action parameters. + +#### Options + +``` +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + +#### Details + +action-get will print the value of the parameter at the given key, serialized +as YAML. If multiple keys are passed, action-get will recurse into the param +map as needed. + + + +#### Examples + + +```bash +TIMEOUT=$(action-get timeout) +``` + +### `action-log` + +#### Usage + +```text +action-log +``` + +#### Summary + +record a progress message for the current action + + +### `action-set` + + +#### Usage + +``` +action-set = [= ...] +``` + +#### Summary + +set action results + +#### Details + +action-set adds the given values to the results map of the Action. This map +is returned to the user after the completion of the Action. Keys must start +and end with lowercase alphanumeric, and contain only lowercase alphanumeric, +hyphens and periods. The following special keys are reserved for internal use: +"stdout", "stdout-encoding", "stderr", "stderr-encoding". + +Example usage: + +```text + action-set outfile.size=10G + action-set foo.bar=2 + action-set foo.baz.val=3 + action-set foo.bar.zab=4 + action-set foo.baz=1 +``` + + will yield: + +```text + outfile: + size: "10G" + foo: + bar: + zab: "4" + baz: "1" +``` + + +#### Examples + +```bash +action-set answer 42 +``` + + + +### `add-metric` + +> The `add-metric` hook tool may only be executed from the `collect-metrics` hook. + + +#### Usage + +```text +add-metric [options] key1=value1 [key2=value2 ...] +``` + +#### Summary + +Records a measurement which will be forwarded to the Juju controller. The same metric may not be collected twice in the same command. + +#### Options + +```text +-l, --labels (= "") + labels to be associated with metric values +``` + + +#### Examples + +```bash +add-metric metric1=value1 [metric2=value2 …] +``` + + + +### `application-version-set` + +#### Usage + +```text +application-version-set +``` + +#### Summary + +Specify which version of the application is deployed. This will be provided to users via `juju status`. + +#### Details + +application-version-set tells Juju which version of the application +software is running. This could be a package version number or some +other useful identifier, such as a Git hash, that indicates the +version of the deployed software. (It shouldn't be confused with the +charm revision.) The version set will be displayed in "juju status" +output for the application. + + +#### Examples + + +```bash +application-version-set 1.1.10 +``` + +### `close-port` + +#### Usage + +```text +close-port [options] [/] or -[/] or icmp +``` + +#### Summary + +Register a request to close a port or port range. + +#### Options + +```text +--endpoints (= "") + a comma-delimited list of application endpoints to target with this operation +--format (= "") + deprecated format flag +``` + +#### Details + +close-port registers a request to close the specified port or port range. + +By default, the specified port or port range will be closed for all defined +application endpoints. The --endpoints option can be used to constrain the +close request to a comma-delimited list of application endpoints. + + +`close-port` ensures a port, or port range, is not accessible from the public interface. + + +#### Examples + +```bash +# Close single port +close-port 80 + +# Close a range of ports +close-port 9000-9999/udp + +# Disable ICMP +close-port icmp + +# Close a range of ports for a set of endpoints (since Juju 2.9) +close-port 80-90 --endpoints dmz,public +``` + + +### `config-get` + +#### Usage + +```text +config-get [options] [] +``` + +#### Summary + +Print application configuration. + +#### Options + +```text +-a, --all (= false) + print all keys +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + +#### Details + + + +`config-get` returns information about the application configuration (as defined by `config.yaml`). If called without arguments, it returns a dictionary containing all config settings that are either explicitly set, or which have a non-nil default value. If the `--all` flag is passed, it returns a dictionary containing all defined config settings including nil values (for those without defaults). If called with a single argument, it returns the value of that config key. Missing config keys are reported as nulls, and do not return an error. + + + +#### Examples + +```bash +INTERVAL=$(config-get interval) + +config-get --all +``` + +### `credential-get` + + +#### Usage + +```text +credential-get [options] +``` + +#### Summary + +Access cloud credentials. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + +#### Details + +credential-get returns the cloud specification used by the unit's model. + + +### `goal-state` + +#### Usage + +```text +goal-state [options] +``` + +#### Summary + +Print the status of the charm's peers and related units. + +#### Options + +```text +--format (= yaml) + Specify output format (json|yaml) +-o, --output (= "") + Specify an output file +``` + +#### Details + +'goal-state' command will list the charm units and relations, specifying their status and their relations to other units in different charms. + + +`goal-state` queries information about charm deployment and returns it as structured data. + + + +`goal-state` provides: + +- the details of other peer units have been deployed and their status +- the details of remote units on the other end of each endpoint and their status + +The output will be a subset of that produced by the `juju status`. There will be output for sibling (peer) units and relation state per unit. + +The unit status values are the workload status of the (sibling) peer units. We also use a unit status value of dying when the unit's life becomes dying. Thus unit status is one of: + +`allocating` +`active` +`waiting` +`blocked` +`error` +`dying` + +The relation status values are determined per unit and depend on whether the unit has entered or left scope. The possible values are: + +- `joining` : a relation has been created, but no units are available. This occurs when the application on the other side of the relation is added to a model, but the machine hosting the first unit has not yet been provisioned. Calling `relation-set` will work correctly as that data will be passed through to the unit when it comes online, but `relation-get` will not provide any data. +- `joined` : the relation is active. A unit has entered scope and is accessible to this one. +- `broken` : unit has left, or is preparing to leave scope. Calling `relation-get` is not advised as the data will quickly out of date when the unit leaves. +- `suspended` : parent cross model relation is suspended +- `error`: an external error has been detected + +By reporting error state, the charm has a chance to determine that goal state may not be reached due to some external cause. As with status, we will report the time since the status changed to allow the charm to empirically guess that a peer may have become stuck if it has not yet reached active state. + + + +#### Examples + + +```bash +goal-state +``` + +### `is-leader` + +#### Usage + +```text +is-leader [options] +``` + +#### Summary + +Print application leadership status. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + + +#### Details + +is-leader prints a boolean indicating whether the local unit is guaranteed to +be application leader for at least 30 seconds. If it fails, you should assume that +there is no such guarantee. + + +`is-leader` indicates whether the current unit is the application leader. + + +`is-leader`will write `"True"` to STDOUT and return 0 if the unit is currently leader and can be guaranteed to remain so for 30 seconds. + +Output can be expressed as `--format json` or `--format yaml` if desired. + + +#### Examples + + +```bash +LEADER=$(is-leader) +if [ "${LEADER}" == "True" ]; then + # Do something a leader would do +fi +``` + +### `juju-log` + +#### Usage + +```text +juju-log [options] +``` + +#### Summary + +Write a message to the juju log. + +#### Options + +```text +--debug (= false) + log at debug level +--format (= "") + deprecated format flag +-l, --log-level (= "INFO") + Send log message at the given level +``` + + +`juju-log` writes messages directly to the unit's log file. Valid levels are: INFO, WARN, ERROR, DEBUG + +#### Examples + + +```bash +juju-log -l 'WARN' Something has transpired +``` + + +### `juju-reboot` + + +#### Usage + +```text +juju-reboot [options] +``` + +#### Summary + +Reboot the host machine. + +#### Options + +```text +--now (= false) + reboot immediately, killing the invoking process +``` + +#### Details + +juju-reboot causes the host machine to reboot, after stopping all containers + hosted on the machine. + +An invocation without arguments will allow the current hook to complete, and +will only cause a reboot if the hook completes successfully. + +If the --now flag is passed, the current hook will terminate immediately, and +be restarted from scratch after reboot. This allows charm authors to write +hooks that need to reboot more than once in the course of installing software. + +The --now flag cannot terminate a debug-hooks session; hooks using --now should +be sure to terminate on unexpected errors, so as to guarantee expected behaviour +in all situations. + + +juju-reboot is not supported when running actions. + + +#### Examples + +```bash +# immediately reboot +juju-reboot --now + +# Reboot after current hook exits +juju-reboot +``` + +### `k8s-raw-get` + +#### Usage + +```text +k8s-raw-get +``` + +#### Summary + +Get k8s raw spec information. + +#### Details + +Gets configuration data used to set up k8s resources. + + +### `k8s-raw-set` + +#### Usage + +```text +k8s-raw-set [options] --file +``` + +#### Summary + +Set k8s raw spec information. + +#### Options + +```text +--file (= -) + file containing k8s raw spec +``` + +#### Details + +Sets configuration data in k8s raw format to use for k8s resources. +The spec applies to all units for the application. + + +### `k8s-spec-get` + +#### Usage + +```text +k8s-spec-get +``` + +#### Summary + +Get k8s spec information. + +#### Details + +Gets configuration data used to set up k8s resources. + + +### `k8s-spec-set` + +#### Usage + +```text +k8s-spec-set [options] --file [--k8s-resources ] +``` + +#### Summary + +Set k8s spec information. + +#### Options + +```text +--file (= -) + file containing pod spec +--k8s-resources (= ) + file containing k8s specific resources not yet modelled by Juju +``` + +#### Details + +Sets configuration data to use for k8s resources. +The spec applies to all units for the application. + +### `leader-get` +> :warning: The functionality provided by leader data (`leader-get` and `leader-set`) is now being replaced by "application-level relation data". See [`relation-get`](#heading--relation-get) and [`relation-set`](#heading--relation-set). + +#### Usage + +```text +leader-get [options] [] +``` + +#### Summary + +Print application leadership settings. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + +#### Details + +leader-get prints the value of a leadership setting specified by key. If no key +is given, or if the key is "-", all keys and values will be printed. + + +#### Examples: + +``` text +ADDRESSS=$(leader-get cluster-leader-address) +``` + + +### `leader-set` +> :warning: The functionality provided by leader data (`leader-get` and `leader-set`) is now being replaced by "application-level relation data". See [`relation-get`](#heading--relation-get) and [`relation-set`](#heading--relation-set). + + +#### Usage + +```text +leader-set = [...] +``` + +#### Summary + +Write application leadership settings. + +#### Details + +leader-set immediate writes the supplied key/value pairs to the controller, +which will then inform non-leader units of the change. It will fail if called +without arguments, or if called by a unit that is not currently application leader. + + +`leader-set` lets you distribute string key=value pairs to other units, but with the following differences: + +- there's only one leader-settings bucket per application (not one per unit) +- only the leader can write to the bucket +- only minions are informed of changes to the bucket +- changes are propagated instantly + +The instant propagation may be surprising, but it exists to satisfy the use case where shared data can be chosen by the leader at the very beginning of the install hook. + +It is strongly recommended that leader settings are always written as a self-consistent group `leader-set one=one two=two three=three`. + +#### Examples: + + +```bash +leader-set cluster-leader-address=10.0.0.123 +``` + +### `network-get` + +#### Usage + +```text +network-get [options] [--ingress-address] [--bind-address] [--egress-subnets] +``` + +#### Summary + +Get network config. + +#### Options + +```text +--bind-address (= false) + get the address for the binding on which the unit should listen +--egress-subnets (= false) + get the egress subnets for the binding +--format (= smart) + Specify output format (json|smart|yaml) +--ingress-address (= false) + get the ingress address for the binding +-o, --output (= "") + Specify an output file +--primary-address (= false) + (deprecated) get the primary address for the binding +-r, --relation (= ) + specify a relation by id +``` + +#### Details + +network-get returns the network config for a given binding name. By default +it returns the list of interfaces and associated addresses in the space for +the binding, as well as the ingress address for the binding. If defined, any +egress subnets are also returned. + +If one of the following flags are specified, just that value is returned. + +If more than one flag is specified, a map of values is returned. + +```text + --bind-address: the address the local unit should listen on to serve connections, as well + as the address that should be advertised to its peers. + --ingress-address: the address the local unit should advertise as being used for incoming connections. + --egress-subnets: subnets (in CIDR notation) from which traffic on this relation will originate. + +``` + + +`network-get` reports hostnames, IP addresses and CIDR blocks related to endpoint bindings. + + +By default it lists three pieces of address information: + +- binding address(es) +- ingress address(es) +- egress subnets + +See [Network primitives](/t/charm-network-primitives/1126) for in-depth coverage. + + + +### `open-port` +> **Requires Juju 3.1+ for Kubernetes charms** + +#### Usage + +```text +open-port [options] [/] or -[/] or icmp +``` + +#### Summary + +Register a request to open a port or port range. + +#### Options + +```text +--endpoints (= "") + a comma-delimited list of application endpoints to target with this operation +--format (= "") + deprecated format flag +``` + +#### Details + +`open-port` registers a port or range to open on the public-interface. + +By default, the specified port or port range will be opened for all defined +application endpoints. The --endpoints option can be used to constrain the +open request to a comma-delimited list of application endpoints. + +The behavior differs a little bit between machine charms and Kubernetes charms. + +**Machine charms.** On public clouds the port will only be open while the application is exposed. It accepts a single port or range of ports with an optional protocol, which may be `icmp`, `udp`, or `tcp`. `tcp` is the default. + +`open-port` will not have any effect if the application is not exposed, and may have a somewhat delayed effect even if it is. This operation is transactional, so changes will not be made unless the hook exits successfully. + +Prior to Juju 2.9, when charms requested a particular port range to be opened, Juju would automatically mark that port range as opened for **all** defined application endpoints. As of Juju 2.9, charms can constrain opened port ranges to a set of application endpoints by providing the `--endpoints` flag followed by a comma-delimited list of application endpoints. + +**Kubernetes charms.** The port will open directly regardless of whether the application is exposed or not. This connects to the fact that `juju expose` currently has no effect on sidecar charms. Additionally, it is currently not possible to designate a range of ports to open for Kubernetes charms; to open a range, you will have to run `open-port` multiple times. + + +#### Examples: + +Open port 80 to TCP traffic: + +```bash +open-port 80/tcp +``` +Open port 1234 to UDP traffic: + +```bash +open-port 1234/udp +``` + +Open a range of ports to UDP traffic: + +```bash +open-port 1000-2000/udp +``` + +Open a range of ports to TCP traffic for specific application endpoints (since Juju 2.9): + +```bash +open-port 1000-2000/tcp --endpoints dmz,monitoring +``` + +### `opened-ports` +> The opened-ports hook tool lists all the ports currently opened **by the running charm**. It does not, at the moment, include ports which may be opened by other charms co-hosted on the same machine [lp#1427770](https://bugs.launchpad.net/juju-core/+bug/1427770). + + +#### Usage + +```text +opened-ports [options] +``` + +#### Summary + +List all ports or port ranges opened by the unit. + +#### Options + +```text +--endpoints (= false) + display the list of target application endpoints for each port range +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + + +#### Details + +opened-ports lists all ports or port ranges opened by a unit. + +By default, the port range listing does not include information about the +application endpoints that each port range applies to. Each list entry is +formatted as / (e.g. "80/tcp") or -/ +(e.g. "8080-8088/udp"). + +If the --endpoints option is specified, each entry in the port list will be +augmented with a comma-delimited list of endpoints that the port range +applies to (e.g. "80/tcp (endpoint1, endpoint2)"). If a port range applies to +all endpoints, this will be indicated by the presence of a '*' character +(e.g. "80/tcp (*)"). + + + + +Opening ports is transactional (i.e. will take place on successfully exiting the current hook), and therefore `opened-ports` will not return any values for pending `open-port` operations run from within the same hook. + + +#### Examples: + + +``` text +opened-ports +``` + +Prior to Juju 2.9, when charms requested a particular port range to be opened, Juju would automatically mark that port range as opened for **all** defined application endpoints. As of Juju 2.9, charms can constrain opened port ranges to a set of application endpoints. To ensure backwards compatibility, `opened-ports` will, by default, display the unique set of opened port ranges for all endpoints. To list of opened port ranges grouped by application endpoint can be obtained by running `opened-ports --endpoints`. + +### `payload-register` + +#### Usage + +```text +payload-register [tags...] +``` + +#### Summary + +Register a charm payload with Juju. + +#### Details + +"payload-register" is used while a hook is running to let Juju know that a +payload has been started. The information used to start the payload must be +provided when "register" is run. + +The payload class must correspond to one of the payloads defined in +the charm's metadata.yaml. + + +An example fragment from `metadata.yaml`: + +``` yaml +payloads: + monitoring: + type: docker + kvm-guest: + type: kvm +``` + + +#### Examples: + +```bash +payload-register monitoring docker 0fcgaba +``` + + +### `payload-status-set` + +#### Usage + +```text +payload-status-set +``` + +#### Summary + +Update the status of a payload. + + +#### Details + +"payload-status-set" is used to update the current status of a registered payload. +The `` and `` provided must match a payload that has been previously +registered with juju using payload-register. The `` must be one of the +follow: `starting`, `started`, `stopping`, `stopped`. + +#### Examples: + +```bash +payload-status-set monitor abcd13asa32c starting +``` + + +### `payload-unregister` + +#### Usage + +```text +payload-unregister +``` + +#### Summary + +Stop tracking a payload. + +#### Details + +`payload-unregister` is used while a hook is running to let Juju know +that a payload has been manually stopped. The `` and `` provided +must match a payload that has been previously registered with juju using +`payload-register`. + + +#### Examples: + +``` text +payload-unregister monitoring 0fcgaba +``` + + +### `pod-spec-get` + +#### Usage + +```text +pod-spec-get +``` + +#### Summary + +Get k8s spec information (deprecated). + +#### Details + +Gets configuration data used to set up k8s resources. + + +### `pod-spec-set` + +#### Usage + +```text +pod-spec-set [options] --file [--k8s-resources ] +``` + +#### Summary + +Set k8s spec information (deprecated). + +#### Options + +```text +--file (= -) + file containing pod spec +--k8s-resources (= ) + file containing k8s specific resources not yet modelled by Juju +``` + +#### Details + +Sets configuration data to use for k8s resources. +The spec applies to all units for the application. + + + +### `relation-get` + +#### Usage + +```text +relation-get [options] +``` + +#### Summary + +Get relation settings. + +#### Options + +```text +--app (= false) + Get the relation data for the overall application, not just a unit +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +-r, --relation (= ) + Specify a relation by id +``` + +#### Details + + +relation-get prints the value of a unit's relation setting, specified by key. +If no key is given, or if the key is "-", all keys and values will be printed. + +A unit can see its own settings by calling "relation-get - MYUNIT", this will include +any changes that have been made with "relation-set". + +When reading remote relation data, a charm can call relation-get --app - to get +the data for the application data bag that is set by the remote applications +leader. + + + +Further details: + + +`relation-get` reads the settings of the local unit, or of any remote unit, in a given relation (set with `-r`, defaulting to the current relation identifier, as in `relation-set`). The first argument specifies the settings key, and the second the remote unit, which may be omitted if a default is available (that is, when running a relation hook other than [-relation-broken](/t/charm-hooks/1040#%5Bname%5D-relation-broken)). + +If the first argument is omitted, a dictionary of all current keys and values will be printed; all values are always plain strings without any interpretation. If you need to specify a remote unit but want to see all settings, use `-` for the first argument. + +The environment variable [`JUJU_REMOTE_UNIT`](/t/juju-environment-variables/1162#heading--juju_remote_unit) stores the default remote unit. + +You should never depend upon the presence of any given key in `relation-get` output. Processing that depends on specific values (other than `private-address`) should be restricted to [-relation-changed](/t/charm-hooks/1040#%5Bname%5D-relation-changed) hooks for the relevant unit, and the absence of a remote unit's value should never be treated as an [error](/t/dealing-with-errors-encountered-by-charm-hooks/1048) in the local unit. + +In practice, it is common and encouraged for [-relation-changed](/t/charm-hooks/1040#%5Bname%5D-relation-changed) hooks to exit early, without error, after inspecting `relation-get` output and determining the data is inadequate; and for [all other hooks](/t/charm-hooks/1040) to be resilient in the face of missing keys, such that -relation-changed hooks will be sufficient to complete all configuration that depends on remote unit settings. + +Key value pairs for remote units that have departed remain accessible for the lifetime of the relation. + + +#### Examples: + + +``` text +# Getting the settings of the default unit in the default relation is done with: + relation-get + username: jim + password: "12345" + +# To get a specific setting from the default remote unit in the default relation + relation-get username + jim + +# To get all settings from a particular remote unit in a particular relation you + relation-get -r database:7 - mongodb/5 + username: bob + password: 2db673e81ffa264c +``` + +### `relation-ids` + + +#### Usage relation-ids [options] + +#### Summary + +List all relation ids with the given relation name. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + + + + +`relation-ids` outputs a list of the related **applications** with a relation name. Accepts a single argument (relation-name) which, in a relation hook, defaults to the name of the current relation. The output is useful as input to the `relation-list`, `relation-get`, and `relation-set` commands to read or write other relation values. + + +#### Examples: + +``` text +relation-ids database +``` + + +### `relation-list` + +#### Usage + +```text +relation-list [options] +``` + +#### Summary + +List relation units. + +#### Options + +```text +--app (= false) + List remote application instead of participating units +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +-r, --relation (= ) + Specify a relation by id +``` + +#### Details + +`-r` must be specified when not in a relation hook. + + + + +`relation-list` outputs a list of all the related **units** for a relation identifier. If not running in a relation hook context, `-r` needs to be specified with a relation identifier similar to the`relation-get` and `relation-set` commands. + + +#### Examples: + +``` text +relation-list 9 +``` + + +### `relation-set` + + +#### Usage + +```text +relation-set [options] key=value [key=value ...] +``` + +#### Summary + +Set relation settings. + +#### Options + +```text +--app (= false) + pick whether you are setting "application" settings or "unit" settings +--file (= ) + file containing key-value pairs +--format (= "") + deprecated format flag +-r, --relation (= ) + specify a relation by id +``` + +#### Details + +"relation-set" writes the local unit's settings for some relation. +If no relation is specified then the current relation is used. The +setting values are not inspected and are stored as strings. Setting +an empty string causes the setting to be removed. Duplicate settings +are not allowed. + +If the unit is the leader, it can set the application settings using +"--app". These are visible to related applications via 'relation-get --app' +or by supplying the application name to 'relation-get' in place of +a unit name. + +The --file option should be used when one or more key-value pairs are +too long to fit within the command length limit of the shell or +operating system. The file will contain a YAML map containing the +settings. Settings in the file will be overridden by any duplicate +key-value arguments. A value of "-" for the filename means . + + +Further details: + + +`relation-set` writes the local unit's settings for some relation. If it's not running in a relation hook, `-r` needs to be specified. The `value` part of an argument is not inspected, and is stored directly as a string. Setting an empty string causes the setting to be removed. + +`relation-set` is the tool for communicating information between units of related applications. By convention the charm that `provides` an interface is likely to set values, and a charm that `requires` that interface will read values; but there is nothing enforcing this. Whatever information you need to propagate for the remote charm to work must be propagated via relation-set, with the single exception of the `private-address` key, which is always set before the unit joins. + +For some charms you may wish to overwrite the `private-address` setting, for example if you're writing a charm that serves as a proxy for some external application. It is rarely a good idea to *remove* that key though, as most charms expect that value to exist unconditionally and may fail if it is not present. + +All values are set in a [transaction](https://en.wikipedia.org/wiki/Transaction_processing) at the point when the hook terminates successfully (i.e. the hook exit code is 0). At that point all changed values will be communicated to the rest of the system, causing -changed hooks to run in all related units. + +There is no way to write settings for any unit other than the local unit. However, any hook on the local unit can write settings for any relation which the local unit is participating in. + +#### Examples: + +``` text +relation-set port=80 tuning=default + +relation-set -r server:3 username=jim password=12345 +``` + + +### `resource-get` + + +#### Usage + +```text +resource-get +``` + +#### Summary + +Get the path to the locally cached resource file. + +#### Details + +"resource-get" is used while a hook is running to get the local path +to the file for the identified resource. This file is an fs-local copy, +unique to the unit for which the hook is running. It is downloaded from +the controller, if necessary. + +If "resource-get" for a resource has not been run before (for the unit) +then the resource is downloaded from the controller at the revision +associated with the unit's application. That file is stored in the unit's +local cache. If "resource-get" *has* been run before then each +subsequent run syncs the resource with the controller. This ensures +that the revision of the unit-local copy of the resource matches the +revision of the resource associated with the unit's application. + +Either way, the path provided by "resource-get" references the +up-to-date file for the resource. Note that the resource may get +updated on the controller for the application at any time, meaning the +cached copy *may* be out of date at any time after you call +"resource-get". Consequently, the command should be run at every +point where it is critical that the resource be up to date. + +The "upgrade-charm" hook is useful for keeping your charm's resources +on a unit up to date. Run "resource-get" there for each of your +charm's resources to do so. The hook fires whenever the the file for +one of the application's resources changes on the controller (in addition +to when the charm itself changes). That means it happens in response +to "juju upgrade-charm" as well as to "juju push-resource". + +Note that the "upgrade-charm" hook does not run when the unit is +started up. So be sure to run "resource-get" for your resources in the +"install" hook (or "config-changed", etc.). + +Note that "resource-get" only provides an FS path to the resource file. +It does not provide any information about the resource (e.g. revision). + + +Further details: + +`resource-get` fetches a resource from the Juju controller or the Juju Charm store. The command returns a local path to the file for a named resource. + +If `resource-get` has not been run for the named resource previously, then the resource is downloaded from the controller at the revision associated with the unit's application. That file is stored in the unit's local cache. If `resource-get` *has* been run before then each subsequent run synchronizes the resource with the controller. This ensures that the revision of the unit-local copy of the resource matches the revision of the resource associated with the unit's application. + +The path provided by `resource-get` references the up-to-date file for the resource. Note that the resource may get updated on the controller for the application at any time, meaning the cached copy *may* be out of date at any time after `resource-get` is called. Consequently, the command should be run at every point where it is critical for the resource be up to date. + + +#### Examples: + +```bash +# resource-get software +/var/lib/juju/agents/unit-resources-example-0/resources/software/software.zip +``` + + +### `secret-add` + +#### Usage + +```text +secret-add [options] [key[#base64|#file]=value...] +``` + +#### Summary + +Add a new secret. + +#### Options + +```text +--description (= "") + the secret description +--expire (= "") + either a duration or time when the secret should expire +--file (= "") + a YAML file containing secret key values +--label (= "") + a label used to identify the secret in hooks +--owner (= "application") + the owner of the secret, either the application or unit +--rotate (= "") + the secret rotation policy +``` + +#### Details + +Add a secret with a list of key values. + +If a key has the '#base64' suffix, the value is already in base64 format and no +encoding will be performed, otherwise the value will be base64 encoded +prior to being stored. + +If a key has the '#file' suffix, the value is read from the corresponding file. + +By default, a secret is owned by the application, meaning only the unit +leader can manage it. Use "--owner unit" to create a secret owned by the +specific unit which created it. + +#### Examples: + +```text + +secret-add token=34ae35facd4 +secret-add key#base64=AA== +secret-add key#file=/path/to/file another-key=s3cret +secret-add --owner unit token=s3cret +secret-add --rotate monthly token=s3cret +secret-add --expire 24h token=s3cret +secret-add --expire 2025-01-01T06:06:06 token=s3cret +secret-add --label db-password \ + --description "my database password" \ + data#base64=s3cret== +secret-add --label db-password \ + --description "my database password" \ + --file=/path/to/file +``` + + +### `secret-get` + + +#### Usage + +```text +secret-get [options] [key[#base64]] +``` + +#### Summary + +Get the content of a secret. + +#### Options + +```text +--format (= yaml) + Specify output format (json|yaml) +--label (= "") + a label used to identify the secret in hooks +-o, --output (= "") + Specify an output file +--peek (= false) + get the latest revision just this time +--refresh (= false) + get the latest revision and also get this same revision for subsequent calls +``` + + +#### Details + +Get the content of a secret with a given secret ID. +The first time the value is fetched, the latest revision is used. +Subsequent calls will always return this same revision unless +--peek or --refresh are used. +Using --peek will fetch the latest revision just this time. +Using --refresh will fetch the latest revision and continue to +return the same revision next time unless --peek or --refresh is used. + +Either the ID or label can be used to identify the secret. + +#### Examples: + +```text +secret-get secret:9m4e2mr0ui3e8a215n4g +secret-get secret:9m4e2mr0ui3e8a215n4g token +secret-get secret:9m4e2mr0ui3e8a215n4g token#base64 +secret-get secret:9m4e2mr0ui3e8a215n4g --format json +secret-get secret:9m4e2mr0ui3e8a215n4g --peek +secret-get secret:9m4e2mr0ui3e8a215n4g --refresh +secret-get secret:9m4e2mr0ui3e8a215n4g --label db-password +``` + +### `secret-grant` + +#### Usage + +```text +secret-grant [options] +``` + +#### Summary + +Grant access to a secret. + +#### Options + +```text +-r, --relation (= ) + the relation with which to associate the grant +--unit (= "") + the unit to grant access +``` + +#### Details + +Grant access to view the value of a specified secret. +Access is granted in the context of a relation - unless revoked +earlier, once the relation is removed, so too is the access grant. + +By default, all units of the related application are granted access. +Optionally specify a unit name to limit access to just that unit. + +#### Examples: + +```text +secret-grant secret:9m4e2mr0ui3e8a215n4g -r 0 --unit mediawiki/6 +secret-grant secret:9m4e2mr0ui3e8a215n4g --relation db:2 +``` + + +### `secret-ids` + +#### Usage + + +```text +secret-ids [options] +``` + +#### Summary + +Print secret ids. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + +#### Details + +Returns the secret ids for secrets owned by the application. + +#### Examples: + +```text +secret-ids +``` + + +### `secret-info-get` + +#### Usage + +```text +secret-info-get [options] +``` + +#### Summary + +Get a secret's metadata info. + +#### Options + +```text +--format (= yaml) + Specify output format (json|yaml) +--label (= "") + a label used to identify the secret +-o, --output (= "") + Specify an output file +``` + + +#### Details + +Get the metadata of a secret with a given secret ID. +Either the ID or label can be used to identify the secret. + +#### Examples: + +```text +secret-info-get --label db-password +secret-info-get --label db-password + +``` + + +### `secret-remove` + + +#### Usage + +```text +secret-remove [options] +``` + +#### Summary + +remove a existing secret + +#### Options + +``` +--revision (= 0) + remove the specified revision +``` + + +#### Details + +Remove a secret with the specified URI. + +#### Examples: + +```text +secret-remove secret:9m4e2mr0ui3e8a215n4g +``` + + +### `secret-revoke` + +#### Usage + +```text +secret-revoke [options] +``` + +#### Summary + +Revoke access to a secret. + +#### Options +```text +--app, --application (= "") + the application to revoke access +-r, --relation (= ) + the relation for which to revoke the grant +--unit (= "") + the unit to revoke access +``` + + +#### Details + +Revoke access to view the value of a specified secret. +Access may be revoked from an application (all units of +that application lose access), or from a specified unit. +If run in a relation hook, the related application's +access is revoked, unless a uni is specified, in which +case just that unit's access is revoked.' + +#### Examples: + +```text +secret-revoke secret:9m4e2mr0ui3e8a215n4g +secret-revoke secret:9m4e2mr0ui3e8a215n4g --relation 1 +secret-revoke secret:9m4e2mr0ui3e8a215n4g --app mediawiki +secret-revoke secret:9m4e2mr0ui3e8a215n4g --unit mediawiki/6 +``` + +### `secret-set` + +#### Usage + +```text +secret-set [options] [key[#base64]=value...] +``` + +#### Summary + +Update an existing secret. + +#### Options + +```text +--description (= "") + the secret description +--expire (= "") + either a duration or time when the secret should expire +--file (= "") + a YAML file containing secret key values +--label (= "") + a label used to identify the secret in hooks +--owner (= "application") + the owner of the secret, either the application or unit +--rotate (= "") + the secret rotation policy +``` + + +#### Details + +Update a secret with a list of key values, or set new metadata. +If a value has the '#base64' suffix, it is already in base64 format and no +encoding will be performed, otherwise the value will be base64 encoded +prior to being stored. +To just update selected metadata like rotate policy, do not specify any secret value. + +#### Examples: + +```text +secret-set secret:9m4e2mr0ui3e8a215n4g token=34ae35facd4 +secret-set secret:9m4e2mr0ui3e8a215n4g key#base64 AA== +secret-set secret:9m4e2mr0ui3e8a215n4g --rotate monthly token=s3cret +secret-set secret:9m4e2mr0ui3e8a215n4g --expire 24h +secret-set secret:9m4e2mr0ui3e8a215n4g --expire 24h token=s3cret +secret-set secret:9m4e2mr0ui3e8a215n4g --expire 2025-01-01T06:06:06 token=s3cret +secret-set secret:9m4e2mr0ui3e8a215n4g --label db-password \ + --description "my database password" \ + data#base64 s3cret== +secret-set secret:9m4e2mr0ui3e8a215n4g --label db-password \ + --description "my database password" +secret-set secret:9m4e2mr0ui3e8a215n4g --label db-password \ + --description "my database password" \ + --file=/path/to/file +``` + + +### `state-delete` + + +#### Usage + +```text +state-delete +``` + +#### Summary + +Delete server-side-state key value pair. + +#### Details + +state-delete deletes the value of the server side state specified by key. + +See also: + + state-get + state-set + + +### `state-get` + +#### Usage + +```text +state-get [options] [] +``` + +#### Summary + +Print server-side-state value. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +--strict (= false) + Return an error if the requested key does not exist +``` + +#### Details + +state-get prints the value of the server side state specified by key. +If no key is given, or if the key is "-", all keys and values will be printed. + +See also: + + state-delete + state-set + + + +### `state-set` + + +#### Usage + +```text +state-set [options] key=value [key=value ...] +``` + +#### Summary + +Set server-side-state values. + +#### Options + +```text +--file (= ) + file containing key-value pairs +``` + +#### Details + +state-set sets the value of the server side state specified by key. + +The --file option should be used when one or more key-value pairs +are too long to fit within the command length limit of the shell +or operating system. The file will contain a YAML map containing +the settings as strings. Settings in the file will be overridden +by any duplicate key-value arguments. A value of "-" for the filename +means . + +The following fixed size limits apply: +- Length of stored keys cannot exceed 256 bytes. +- Length of stored values cannot exceed 65536 bytes. + +See also: + + state-delete + state-get + + +### `status-get` + +#### Usage + +```text +status-get [options] [--include-data] [--application] +``` + +#### Summary + +Print status information. + +#### Options + +```text +--application (= false) + print status for all units of this application if this unit is the leader +--format (= smart) + Specify output format (json|smart|yaml) +--include-data (= false) + print all status data +-o, --output (= "") + Specify an output file +``` + +#### Details + + +By default, only the status value is printed. +If the --include-data flag is passed, the associated data are printed also. + + +Further details: + +`status-get` allows charms to query the current workload status. + + +Without arguments, it just prints the status code e.g. 'maintenance'. With `--include-data` specified, it prints YAML which contains the status value plus any data associated with the status. + +Include the `--application` option to get the overall status for the application, rather than an individual unit. + + +#### Examples: + +Access the unit's status: + + +``` text +status-get + +status-get --include-data +``` + + +Access the application's status: + + +``` text +status-get --application +``` + + +### `status-set` + +#### Usage + +```text +status-set [options] [message] +``` + +#### Summary + +Set status information. + +#### Options + +```text +--application (= false) + set this status for the application to which the unit belongs if the unit is the leader +``` + + +#### Details + +Sets the workload status of the charm. Message is optional. +The "last updated" attribute of the status is set, even if the +status and message are the same as what's already set. + + + +Further details: + +`status-set` changes what is displayed in `juju status`. + + +`status-set` allows charms to describe their current status. This places the responsibility on the charm to know its status, and set it accordingly using the `status-set` hook tool. Changes made via `status-set` are applied without waiting for a hook execution to end and are not rolled back if a hook execution fails. + +The leader unit is responsible for setting the overall status of the application by using the `--application` option. + +This hook tool takes 2 arguments. The first is the status code and the second is a message to report to the user. + +Valid status codes are: + +- `maintenance` (the unit is not currently providing a service, but expects to be soon, e.g. when first installing) +- `blocked` (the unit cannot continue without user input) +- `waiting` (the unit itself is not in error and requires no intervention, but it is not currently in service as it depends on some external factor, e.g. an application to which it is related is not running) +- `active` (This unit believes it is correctly offering all the services it is primarily installed to provide) + +For more extensive explanations of these status codes, [please see the status reference page](/t/charm-unit-status-and-their-meanings/1168). + +The second argument is a user-facing message, which will be displayed to any users viewing the status, and will also be visible in the status history. This can contain any useful information. + +In the case of a `blocked` status though the **status message should tell the user explicitly how to unblock the unit** insofar as possible, as this is primary way of indicating any action to be taken (and may be surfaced by other tools using Juju, e.g. the Juju GUI). + +A unit in the `active` state with should not generally expect anyone to look at its status message, and often it is better not to set one at all. In the event of a degradation of service, this is a good place to surface an explanation for the degradation (load, hardware failure or other issue). + +A unit in `error` state will have a message that is set by Juju and not the charm because the error state represents a crash in a charm hook - an unmanaged and uninterpretable situation. Juju will set the message to be a reflection of the hook which crashed. For example “Crashed installing the software” for an install hook crash, or “Crash establishing database link” for a crash in a relationship hook. + + + +#### Examples: + +Set the unit's status: + + +```bash +# Set the unit's workload status to "maintenance". +# This implies a short downtime that should self-resolve. +status-set maintenance "installing software" +status-set maintenance "formatting storage space, time left: 120s" + +# Set the unit's workload status to "waiting" +# The workload is awaiting something else in the model to become active +status-set waiting "waiting for database" + +# Set the unit workload's status to "active" +# The workload is installed and running. Any messages should be informational. +status-set active +status-set active "Storage 95% full" + +# Set the unit's workload status to "blocked" +# This implies human intervention is required to unblock the unit. +# Messages should describe what is needed to resolve the problem. +status-set blocked "Add a database relation" +status-set blocked "Storage full" +``` + + +Set the application's status: + +```bash +# From a unit, update its status +status-set maintenance "Upgrading to 4.1.1" + +# From the leader, update the application's status line +status-set --application maintenance "Application upgrade underway" +``` + +Non-leader units which attempt to use `--application` will receive an error: + +``` text +status-set --application maintenance "I'm not the leader." +error: this unit is not the leader +``` + + +### `storage-add` + +#### Usage + +```text +storage-add [=count] ... +``` + +#### Summary + +Add storage instances. + +#### Details + +Storage add adds storage instances to unit using provided storage directives. +A storage directive consists of a storage name as per charm specification +and optional storage COUNT. + +COUNT is a positive integer indicating how many instances +of the storage to create. If unspecified, COUNT defaults to 1. + +Further details: + +`storage-add` adds storage volumes to the unit. + + +`storage-add` takes the name of the storage volume (as defined in the charm metadata), and optionally the number of storage instances to add. By default, it will add a single storage instance of the name. + + +#### Examples: + + +``` text +storage-add database-storage=1 +``` + + + +### `storage-get` + +#### Usage + + +```text +storage-get [options] [] +``` + +#### Summary + +Print information for storage instance with specified id. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +-s (= ) + specify a storage instance by id +``` + +#### Details + +When no `` is supplied, all keys values are printed. + + +Further details: + +`storage-get` obtains information about storage being attached to, or detaching from, the unit. + + +If the executing hook is a storage hook, information about the storage related to the hook will be reported; this may be overridden by specifying the name of the storage as reported by storage-list, and must be specified for non-storage hooks. + +`storage-get` can be used to identify the storage location during storage-attached and storage-detaching hooks. The exception to this is when the charm specifies a static location for singleton stores. + + +#### Examples: + + +```bash +# retrieve information by UUID +storage-get 21127934-8986-11e5-af63-feff819cdc9f + +# retrieve information by name +storage-get -s data/0 +``` + + + +### `storage-list` + +#### Usage + +```text +storage-list [options] [] +``` + +#### Summary + +List storage attached to the unit. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + +#### Details + +storage-list will list the names of all storage instances +attached to the unit. These names can be passed to storage-get +via the "-s" flag to query the storage attributes. + +A storage name may be specified, in which case only storage +instances for that named storage will be returned. + + +Further details: + +`storage-list` list storages instances that are attached to the unit. + + +The storage instance identifiers returned from `storage-list` may be passed through to the `storage-get` command using the -s option. + + +### `unit-get` +> :warning: `unit-get` is deprecated in favour of `network-get` hook tool. See [Network primitives](/t/charm-network-primitives/1126) for details. + +#### Usage + +```text +unit-get [options] +``` + +#### Summary + +Print public-address or private-address. + +#### Options + +```text +--format (= smart) + Specify output format (json|smart|yaml) +-o, --output (= "") + Specify an output file +``` + +Further details: + +`unit-get` returns the IP address of the unit. + + +It accepts a single argument, which must be `private-address` or `public-address`. It is not affected by context. + +Note that if a unit has been deployed with `--bind space` then the address returned from `unit-get private-address` will get the address from this space, not the 'default' space. +[/details] + +#### Examples: + +``` text +unit-get public-address +``` + + + +
**Contributors:** @achilleasa, @amandahla, @benhoyt, @charlie4284, @dmitrii, @jameinel, @pmatulis, @ppasotti, @timclicks, @tmihoc
+ +------------------------- + +jameinel | 2020-04-22 10:40:49 UTC | #2 + +opened ports should certainly be under the Networking section. + +I don't know if there is a missing section to aggregate the relation-* functions like we do for Information/Actions/Metrics. Maybe Relations and Storage as sections. status-get status-set feel like they should be part of the Informational section. + +------------------------- + +timClicks | 2020-04-22 23:05:29 UTC | #3 + +Hi John, I repeatedly hit 502 errors when finishing this page. Looking to fix everything up now. + +------------------------- + +sssler-scania | 2020-09-06 19:40:07 UTC | #4 + +I'm trying to learn, digest and use relations for juju. + +I'm now stuck at trying to figure out what documentation I should use. I tried first this one and found an error: + +**relation_set** --> [https://discourse.charmhub.io/t/hook-tools/1163](https://discourse.charmhub.io/t/hook-tools/1163) + +I'm trying the python code and end up in an error: + +Code: +> hookenv.relation_set({'changed' : "BOOOOM"}) + +Error: +> 2020-09-06 19:00:59 DEBUG jujuc server.go:211 running hook tool "relation-set" +> 2020-09-06 19:00:59 DEBUG master-relation-changed Traceback (most recent call last): +> 2020-09-06 19:00:59 DEBUG master-relation-changed File "/var/lib/juju/agents/unit-worker-4/charm/hooks/master-relation-changed", line 29, in +> 2020-09-06 19:00:59 DEBUG master-relation-changed hooks.execute(sys.argv) +> 2020-09-06 19:00:59 DEBUG master-relation-changed File "/var/lib/juju/agents/unit-worker-4/charm/lib/charmhelpers/core/hookenv.py", line 945, in execute +> 2020-09-06 19:00:59 DEBUG master-relation-changed self._hooks[hook_name]() +> 2020-09-06 19:00:59 DEBUG master-relation-changed File "/var/lib/juju/agents/unit-worker-4/charm/hooks/master-relation-changed", line 24, in master_relation_changed +> 2020-09-06 19:00:59 DEBUG master-relation-changed hookenv.relation_set({'changed' : "BOOOOM"}) +> 2020-09-06 19:00:59 DEBUG master-relation-changed File "/var/lib/juju/agents/unit-worker-4/charm/lib/charmhelpers/core/hookenv.py", line 502, in relation_set +> 2020-09-06 19:00:59 DEBUG master-relation-changed relation_cmd_line + ["--file", settings_file.name]) +> 2020-09-06 19:00:59 DEBUG master-relation-changed File "/usr/lib/python3.6/subprocess.py", line 306, in check_call +> 2020-09-06 19:00:59 DEBUG master-relation-changed retcode = call(*popenargs, **kwargs) +> 2020-09-06 19:00:59 DEBUG master-relation-changed File "/usr/lib/python3.6/subprocess.py", line 287, in call +> 2020-09-06 19:00:59 DEBUG master-relation-changed with Popen(*popenargs, **kwargs) as p: +> 2020-09-06 19:00:59 DEBUG master-relation-changed File "/usr/lib/python3.6/subprocess.py", line 729, in __init__ +> 2020-09-06 19:00:59 DEBUG master-relation-changed restore_signals, start_new_session) +> 2020-09-06 19:00:59 DEBUG master-relation-changed File "/usr/lib/python3.6/subprocess.py", line 1295, in _execute_child +> 2020-09-06 19:00:59 DEBUG master-relation-changed restore_signals, start_new_session, preexec_fn) +> 2020-09-06 19:00:59 DEBUG master-relation-changed TypeError: expected str, bytes or os.PathLike object, not dict +> 2020-09-06 19:00:59 ERROR juju.worker.uniter.operation runhook.go:136 hook "master-relation-changed" (via explicit, bespoke hook script) failed: exit status 1 + +So, I'm not sure what I'm doing wrong. But the docs are not producing a working example. + +I switched my attention therefore to this documentation for the same function: + +**relation_set** ---> [https://charm-helpers.readthedocs.io/en/latest/api/charmhelpers.core.hookenv.html#charmhelpers.core.hookenv.relation_set](https://charm-helpers.readthedocs.io/en/latest/api/charmhelpers.core.hookenv.html#charmhelpers.core.hookenv.relation_set) + +I see two documentation sources and they are not the same and I really cant see any example for the later one, which leaves me to a trial-and-error-reading-code-and-figuring-this-out-by-myself. + +As I think that learning about relations and interfaces for juju is really KEY - I would consider updating the document very valuable and I'd love to help out here to get this properly in place so that we can train beginner charmers. + +I'm working on a tutorial for this purpose, but the shape of the documentation is so bad at the moment its difficult to know what to reference for my links to documentation. So, yeah... where should I turn here? + +------------------------- + +jameinel | 2020-09-14 20:02:47 UTC | #5 + +[quote="sssler-scania, post:4, topic:1163"] +Code: + +> hookenv.relation_set({‘changed’ : “BOOOOM”}) +[/quote] + +You are not using the juju tool directly (relation-set) but the reactive python wrapper from charmhelpers: +https://github.com/juju/charm-helpers/blob/9b6222e1f22fecf883bc2ff98217277cdd268a43/charmhelpers/core/hookenv.py#L479 + +The python signature of that is: +``` +def relation_set(relation_id=None, relation_settings=None, **kwargs): +``` + +I do think the docs on the function are a bit sparse, and don't really tell you how to use it: +https://reactive-charm-helpers.readthedocs.io/en/latest/api/charmhelpers.core.hookenv.html#charmhelpers.core.hookenv.relation_set + +However, the expectation is that you would pass the relation data you want to set as part of either a dict to the "relation_settings" keyword, or just as kwargs. + +eg either: +```python +hookenv.relation_set(relation_settings={"changed": "BOOOM"}) +``` +or +```python +hookenv.relation_set(changed="BOOOM") +``` + +The error you are getting is because relation_set is interpreting the first argument as the relation_id and trying to pass a dict to a subprocess that expects a string. + +------------------------- + +jameinel | 2020-09-14 20:08:07 UTC | #6 + +If you do want more information about Reactive charming then you'd want to look around here: +https://charmsreactive.readthedocs.io/en/latest/ + +There is another python framework as well: +https://ops.readthedocs.io/en/latest/ + +And some associated getting started docs: +https://discourse.charmhub.io/t/first-steps-with-the-operator-framework/3006 +https://discourse.charmhub.io/t/developing-reusable-components-in-the-operator-framework/3304 + + +I definitely agree that relations are key to the expressive power of Juju charms. I'm sorry that it wasn't clearer how to interact with it correctly. + +------------------------- + +erik-lonroth | 2020-09-15 04:56:39 UTC | #7 + +Thanx @jameinel + +I'm trying to put together a tutorial and would like to be able to reference good and working docs. This is essential for that work, since it otherwise need to contain alot of workaround instructions warnings, corrections etc. + +The tutorial is about using hooks only to work with relations on a trivial level. Not using reactive or ops. + +It proved to be much harder than I would have liked it to be, but it's a good experience and I think it will be a good one following up on the other tutorials using hooks-only charms I've wrote up. + +I would appreciate if some work could be offloaded to improving the docs for this purpose with more code examples and snippets since it really helps. + +------------------------- + +erik-lonroth | 2020-09-16 03:41:28 UTC | #8 + +I found this list of hook tools partially overlapping this document https://discourse.charmhub.io/t/command-hook-tools/1865 + +Here is a third also overlapping https://discourse.charmhub.io/t/the-hook-environment-hook-tools-and-how-hooks-are-run/1047 + +A fourth source for hook-tools https://old-docs.jujucharms.com/2.5/en/reference-hook-tools + + + +* The above pages contains more hook-tools not documented here +* This page is alot more useful. + +Can this be merged? + +------------------------- + +pedroleaoc | 2022-04-07 09:25:19 UTC | #9 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:28 UTC | #10 + + + +------------------------- + diff --git a/tmp/t/11672.md b/tmp/t/11672.md new file mode 100644 index 000000000..7d3397235 --- /dev/null +++ b/tmp/t/11672.md @@ -0,0 +1,6 @@ +tmihoc | 2024-04-26 13:09:04 UTC | #1 + +In Juju, `containeragent` is a binary that implements [agent](/t/5466) functionality for the [units](/t/5460) in a Juju deployment on a Kubernetes cloud. + +------------------------- + diff --git a/tmp/t/1168.md b/tmp/t/1168.md new file mode 100644 index 000000000..dc64f2692 --- /dev/null +++ b/tmp/t/1168.md @@ -0,0 +1,239 @@ +system | 2024-09-10 13:26:49 UTC | #1 + +> See also: [SDK | Status](https://juju.is/docs/sdk/status) + +In Juju, **status** can describe the status of an application or a unit, where the former can be inferred from the latter and the latter consists of the workload and the Juju agent status. This document gives more information about all of these different kinds of status -- their values and their meanings. + +**Contents:** + +- [Types of status](#heading--types-of-status) +- [Status in the output of `juju status`](#heading--status-in-the-output-of-juju-status) + +

Types of status

+ +- [Application status](#heading--application-status) +- [Unit status](#heading--unit-status) + +

Application status

+ +As its name indicates, the application status reports on the status of each deployed application. + +The application status can be specified by the charm author. When not specified, it is the highest-priority status of the workload statuses of all of the application's units. So if all workloads are active, the application will also be active, but if even just one workload is blocked, the application will also be marked blocked. + +> See more: [Workload / charm status](#heading--workload---charm-status) + +The following figure provides an illustration of the status an application may be in at a given time, and lists the reasons for the transitions between different statuses: + +![application_status|690x335](upload://5lZIVGfFcY5RWICbeInswylm4nk.png) + +

Unit status

+ +The unit status is given by the status of its workload/charm and the status of its `juju` agent. + +[note type="information"] +A unit's status is usually expressed as `/`, e.g. , `active/idle` or `unknown/lost`. +[/note] + +- [Workload / charm status](#heading--workload---charm-status) +- [Agent status](#heading--agent-status) + + +

Workload / charm status

+ + + +The workload / charm status reports the status of the charm(ed service): + +[note type=caution] +Except for `error`, `terminated` and `unknown`, which are set by Juju, the workload status is generally speaking set by the charm. As such, its semantics is ultimately up to the charm author. The meanings listed below represent just the ideal case, if the charm author has followed the best practice guidelines. +[/note] + + + + +| Status | Meaning | +|--|--| +| `error`| The unit is in error, likely from a hook failure. | +| `blocked` | The charm is stuck. Human intervention is required. | +| `maintenance` | The charm is performing some (long-running) task such as installing a package or restarting a service. No human intervention is required.| +| `waiting` | The charm is waiting for another charm it's integrated with to be ready. No human intervention required. | +| `active` | The charm is alive and well. Everything's fine. | +| `unknown` | The charm status is unknown. It may be still setting up, or something might have gone wrong. | +| `terminated` | The workload is being destroyed, e.g. as a consequence of `juju destroy-model`. | + + +

Agent status

+ + + + +The agent status reports the status of the `juju` agent running in the unit as it interacts with the `juju` controller: + +| Status | Meaning| +|--|--| +|`allocating` | The charm pod has not come up yet. | +| `idle` | The `juju` agent in the charm container is not doing anything at the moment, and waiting for events. | +| `executing` | The `juju` agent in the charm container is executing some task. | +| `error` | The `juju` agent in the charm container has run but has encountered an uncaught charm exception. | +| `lost` | The `juju` agent is unresponsive, or its pod/container has unexpectedly come down. | + +The agent status is determined and set by the `juju` agent, so it cannot be directly controlled by the charm or a human operator. + + +[note type="information] +Each newly deployed unit starts in `maintenance/allocating`, quickly going to `maintenance/executing` as the setup phase hooks are executed. If, by the time the install hook (if any) returns, the charm has set no workload status, the unit will go to `unknown/idle`. So, in principle, at the end of the install event handler it should be clear if all went well (in which case the user should set active) or not. +[/note] + + + + + +

Status in the output of `juju status`

+ +In the output of `juju status`, application status is given under `App[lication] > Status` and unit status -- consisting, as we said, of the workload / charm status and of the Juju agent status -- is given under `Unit > Workload, Agent`. + +----------- +[details=Expand to view a sample `juju status` output] +```text +Model Controller Cloud/Region Version SLA Timestamp +charm-model tutorial-controller microk8s/localhost 3.1.5 unsupported 14:23:55+02:00 + +App Version Status Scale Charm Channel Rev Address Exposed Message +demo-api-charm 1.0.0 active 1 demo-api-charm 0 10.152.183.175 no +postgresql-k8s 14.7 active 1 postgresql-k8s 14/stable 73 10.152.183.237 no Primary + +Unit Workload Agent Address Ports Message +demo-api-charm/0* active idle 10.1.157.72 +postgresql-k8s/0* active idle 10.1.157.74 Primary +``` +[/details] +-------------- + +> See more: [Command `juju status`](/t/1836) + +------------------------- + +lathiat | 2021-02-16 02:36:23 UTC | #2 + +A charm that does not implement update-status, including a reactive charm using the "default" update-status hook, will stay in the unknown state. This is a question I have fielded multiple times from users and it would be great to update the unknown documentation to indicate that this can be normal for some charms. + +------------------------- + +bthomas | 2022-03-09 13:53:26 UTC | #3 + +There is one similarity between the blocked status and waiting status that i think we should make explicit as this would help a charm writer develop a clear mental model on how his/her charm should behave *after* setting one of these two statuses. In *both these cases* the charm *should continue to monitor* the condition that lead to blocked/waiting status and reset the status to active if the condition no longer persists. Emphasizing this may be important because it is easy to develop the misunderstanding that the charm need not do anything further once it has set blocked status since the system administrator has now been informed, particularly so since there is no equivalent of `juju resolve` for blocked status. + +------------------------- + +pedroleaoc | 2022-04-07 08:32:24 UTC | #4 + + + +------------------------- + +sed-i | 2022-05-12 02:12:53 UTC | #5 + +[quote="system, post:1, topic:1168"] +|`unknown`|The charm status is unknown. It may be still setting up, or something might have gone wrong. +[/quote] + +It might be worthwhile noting here that if the charm didn't set a status, then Juju would mark it as Unknown. +https://github.com/canonical/operator/pull/727 + +--- + +[quote="system, post:1, topic:1168"] +The workload status is generally speaking set by the charm (via e.g. `self.unit.status = ActiveStatus('message')`). +[/quote] + +In the past I recall it was advised to avoid including a message in `ActiveStatus`. If this is still the case then the example should probably reflect that. + +--- + +[quote="system, post:1, topic:1168"] +Exceptions to this rule are `terminated` and `unknown` workload statuses: these are set by the juju agent and are not controlled by charm code. +[/quote] + +I think `UnknownStatus` can be set by the user: +https://github.com/canonical/operator/blob/448c8b17efe05d1d700599496cc6a19e318f5127/ops/model.py#L895 + +------------------------- + +pedroleaoc | 2022-10-14 11:30:46 UTC | #6 + + + +------------------------- + +ppasotti | 2022-11-22 12:07:02 UTC | #7 + +[quote="sed-i, post:5, topic:1168"] +I think `UnknownStatus` can be set by the user: [https://github.com/canonical/operator/blob/448c8b17efe05d1d700599496cc6a19e318f5127/ops/model.py#L895 ](https://github.com/canonical/operator/blob/448c8b17efe05d1d700599496cc6a19e318f5127/ops/model.py#L895) +[/quote] + +While not forbidden at the `ops` level, `juju`, on the other hand, will complain. +![image|690x114](upload://g3hNM1eAVEFreB15AzbQlBzOUwS.png) + +------------------------- + +crucible | 2024-07-31 21:46:20 UTC | #8 + +The following doc regarding [Workload / charm status](https://discourse.charmhub.io/t/status/1168?_gl=1*1o1sh73*_ga*MTQ3OTY2ODYwMi4xNzIxNDE5NjUz*_ga_5LTL1CNEJM*MTcyMjQ1NjU0Ny4yOC4xLjE3MjI0NTY2ODguMTkuMC4w#heading--workload---charm-status) has varying information and is redundant when compared to the +[docs/sdk/status](https://juju.is/docs/sdk/status) docs. + +While **docs/juju/status** provides more information, the **docs/sdk/status** is more helpful (even linking to the corresponding ops library status). + +> [Possible statuses](https://juju.is/docs/sdk/status#heading--possible-statuses) +> +> The possible status values are listed below, along with a link to their [`ops.StatusBase`](https://ops.readthedocs.io/en/latest/#ops.StatusBase) subclass for use in `ops`. They are listed in order from highest to lowest priority, where in case of multiple statuses the higher priority status is what gets surfaced to the user: +> +> * error: the unit is in error, likely from a hook failure (this status is set by Juju, not by the charm) +> * [`blocked`](https://ops.readthedocs.io/en/latest/#ops.BlockedStatus): the unit requires manual intervention from the Juju user, as specified by the charm author (see status notes or the charm’s docs) +> * [`maintenance`](https://ops.readthedocs.io/en/latest/#ops.MaintenanceStatus): the unit is performing maintenance tasks to get up and running +> * [`waiting`](https://ops.readthedocs.io/en/latest/#ops.WaitingStatus): the unit is waiting on an application it’s integrated with +> * [`active`](https://ops.readthedocs.io/en/latest/#ops.ActiveStatus): the unit is ready and offering the services it has been designed to offer +> * [`unknown`](https://ops.readthedocs.io/en/latest/#ops.UnknownStatus): the initial, unknown status when the unit has not set its status yet + +Specifically, + +> the unknown status can be achieved when the unit has not set its status yet + +Is not mentioned in [docs/juju/status # Workload / charm status](https://discourse.charmhub.io/t/status/1168?_gl=1*1o1sh73*_ga*MTQ3OTY2ODYwMi4xNzIxNDE5NjUz*_ga_5LTL1CNEJM*MTcyMjQ1NjU0Ny4yOC4xLjE3MjI0NTY2ODguMTkuMC4w#heading--workload---charm-status) and is more helpful than + +> The charm status is unknown. It may be still setting up, or something might have gone wrong. + +Consider consolidating the two to make use of 1 point of reference. + +Mention @tmihoc + +------------------------- + diff --git a/tmp/t/117.md b/tmp/t/117.md new file mode 100644 index 000000000..40a6df1a9 --- /dev/null +++ b/tmp/t/117.md @@ -0,0 +1,41 @@ +thumper | 2024-09-16 07:27:20 UTC | #1 + +Sometimes software doesn't do what you'd expect. Each of the [agents](11679) that Juju runs has an internal worker for responding to introspection requests. + +As the agents start up, a goroutine is started to listen on an abstract domain socket. The listener talks HTTP, and has a number of registered endpoints. The initial work was to expose the internal golang runtime debugging endpoints for getting access to the running goroutines, CPU profiles, and memory heap profiles. This was then extended to add additional endpoints for much more Juju specific information. + +The Juju machine agent writes out a file to `/etc/profile.d/juju-introspection.sh` that defines a number of functions to easily get information out of the agent. These function names changed in Juju 2.3.9 and 2.4.2 to use underscores instead of dashes. + +- `juju_agent` +- `juju_agent_call` +- `juju_application_agent_name` +- `juju_controller_agent_name` +- `juju_cpu_profile` +- [`juju_engine_report`](/t/146) +- [`juju_goroutines`](/t/118) +- [`juju_heap_profile`](/t/6640) +- `juju_machine_agent_name` +- [`juju_machine_lock`](/t/116) (since 2.3.9, 2.4.2) +- [`juju_metrics`](/t/6641) +- `juju_pubsub_report` (since 2.3) +- `juju_presence_report` (since 2.4) +- [`juju_start_unit`](/t/5667) (since 2.9) +- `juju_statepool_report` +- `juju_statetracker_report` +- [`juju_stop_unit`](/t/5668) (since 2.9) +- [`juju_unit_status`](/t/5666) (since 2.9) + +------------------------- + +pedroleaoc | 2022-04-07 08:33:11 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:31:25 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/118.md b/tmp/t/118.md new file mode 100644 index 000000000..44c28da30 --- /dev/null +++ b/tmp/t/118.md @@ -0,0 +1,142 @@ +thumper | 2024-08-22 13:12:55 UTC | #1 + +> See also: [Agent introspection](/t/117) + +The `juju_goroutines` function allows the operator to quickly get a list of running goroutines from the agent. + +When called without any argument the goroutines for the machine agent are returned. + +The output of this is mostly just useful for Juju developers to help identify where things may be stuck. + +```bash +$ juju_goroutines +Querying @jujud-machine-0 introspection socket: /debug/pprof/goroutine?debug=1 +goroutine profile: total 234 +19 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x951ada 0x9c34ed 0x9c0177 0x45b211 +# 0x951ad9 gopkg.in/tomb%2ev1.(*Tomb).Wait+0x49 /home/tim/go/src/gopkg.in/tomb.v1/tomb.go:113 +# 0x9c34ec github.com/juju/juju/api/watcher.(*commonWatcher).Wait+0x2c /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:138 +# 0x9c0176 github.com/juju/juju/worker/catacomb.(*Catacomb).add.func1+0x86 /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:175 + +19 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x9599f9 0xa20249 0x9c6673 0x9c69e9 0x45b211 +# 0x9599f8 github.com/juju/juju/rpc.(*Conn).Call+0x128 /home/tim/go/src/github.com/juju/juju/rpc/client.go:148 +# 0xa20248 github.com/juju/juju/api.(*state).APICall+0x1c8 /home/tim/go/src/github.com/juju/juju/api/apiclient.go:917 +# 0x9c6672 github.com/juju/juju/api/watcher.makeWatcherAPICaller.func1+0x142 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:54 +# 0x9c69e8 github.com/juju/juju/api/watcher.(*commonWatcher).commonLoop.func2+0xe8 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:104 + +19 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x9c6732 0x45b211 +# 0x9c6731 github.com/juju/juju/api/watcher.(*commonWatcher).commonLoop.func1+0x71 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:88 + +19 @ 0x42f59a 0x42f64e 0x43ff34 0x43fb59 0x4646a2 0x9c3438 0x45b211 +# 0x43fb58 sync.runtime_Semacquire+0x38 /snap/go/2130/src/runtime/sema.go:56 +# 0x4646a1 sync.(*WaitGroup).Wait+0x71 /snap/go/2130/src/sync/waitgroup.go:129 +# 0x9c3437 github.com/juju/juju/api/watcher.(*commonWatcher).commonLoop+0xf7 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:128 + +19 @ 0x42f59a 0x43f2b0 0x9c02d8 0x45b211 +# 0x9c02d7 github.com/juju/juju/worker/catacomb.(*Catacomb).add.func2+0x107 /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:181 + +15 @ 0x42f59a 0x43f2b0 0x9bffcd 0x45b211 +# 0x9bffcc github.com/juju/juju/worker/catacomb.Invoke.func2+0x14c /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:101 + +13 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0xe5ee92 0xe60115 0x45b211 +# 0xe5ee91 github.com/juju/juju/worker/fortress.(*fortress).Visit+0x191 /home/tim/go/src/github.com/juju/juju/worker/fortress/fortress.go:63 +# 0xe60114 github.com/juju/juju/worker/fortress.Occupy.func2+0x44 /home/tim/go/src/github.com/juju/juju/worker/fortress/occupy.go:50 + +11 @ 0x42f59a 0x42f64e 0x406c62 0x40695b 0x9c3813 0x9c6c73 0x45b211 +# 0x9c3812 github.com/juju/juju/api/watcher.(*notifyWatcher).loop+0x1c2 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:180 +# 0x9c6c72 github.com/juju/juju/api/watcher.NewNotifyWatcher.func1+0x52 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:160 + +7 @ 0x42f59a 0x43f2b0 0x9c0e35 0x9c1aca 0x9bfdd5 0x9c00a1 0x45b211 +# 0x9c0e34 github.com/juju/juju/watcher.(*NotifyWorker).loop+0x154 /home/tim/go/src/github.com/juju/juju/watcher/notify.go:90 +# 0x9c1ac9 github.com/juju/juju/watcher.(*NotifyWorker).(github.com/juju/juju/watcher.loop)-fm+0x29 /home/tim/go/src/github.com/juju/juju/watcher/notify.go:71 +# 0x9bfdd4 github.com/juju/juju/worker/catacomb.runSafely+0x54 /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:289 +# 0x9c00a0 github.com/juju/juju/worker/catacomb.Invoke.func3+0x80 /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:116 + +6 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x951ada 0x9bf89d 0x9c11f1 0xe5e49f 0xe5bbd7 0x45b211 +# 0x951ad9 gopkg.in/tomb%2ev1.(*Tomb).Wait+0x49 /home/tim/go/src/gopkg.in/tomb.v1/tomb.go:113 +# 0x9bf89c github.com/juju/juju/worker/catacomb.(*Catacomb).Wait+0x2c /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:204 +# 0x9c11f0 github.com/juju/juju/watcher.(*NotifyWorker).Wait+0x30 /home/tim/go/src/github.com/juju/juju/watcher/notify.go:138 +# 0xe5e49e github.com/juju/juju/worker/dependency.(*Engine).runWorker.func2+0x4ce /home/tim/go/src/github.com/juju/juju/worker/dependency/engine.go:464 +# 0xe5bbd6 github.com/juju/juju/worker/dependency.(*Engine).runWorker+0x1c6 /home/tim/go/src/github.com/juju/juju/worker/dependency/engine.go:468 + +6 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x951ada 0x9bf89d 0x9c11f1 0xe600bb 0xe5f6e1 0x45b211 +# 0x951ad9 gopkg.in/tomb%2ev1.(*Tomb).Wait+0x49 /home/tim/go/src/gopkg.in/tomb.v1/tomb.go:113 +# 0x9bf89c github.com/juju/juju/worker/catacomb.(*Catacomb).Wait+0x2c /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:204 +# 0x9c11f0 github.com/juju/juju/watcher.(*NotifyWorker).Wait+0x30 /home/tim/go/src/github.com/juju/juju/watcher/notify.go:138 +# 0xe600ba github.com/juju/juju/worker/fortress.Occupy.func1+0xca /home/tim/go/src/github.com/juju/juju/worker/fortress/occupy.go:38 +# 0xe5f6e0 github.com/juju/juju/worker/fortress.guestTicket.complete+0x40 /home/tim/go/src/github.com/juju/juju/worker/fortress/fortress.go:151 + +[extra bits snipped for brevity] +``` + +To call on a unit agent, the agent name as defined in `/var/lib/juju/agents/` should be specified as the second argument. + +```bash +$ juju_goroutines unit-ubuntu-lite-2 +Querying @jujud-unit-ubuntu-lite-2 introspection socket: /debug/pprof/goroutine?debug=1 +goroutine profile: total 216 +19 @ 0x42f59a 0x43f2b0 0x9c02d8 0x45b211 +# 0x9c02d7 github.com/juju/juju/worker/catacomb.(*Catacomb).add.func2+0x107 /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:181 + +17 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x951ada 0x9c34ed 0x9c0177 0x45b211 +# 0x951ad9 gopkg.in/tomb%2ev1.(*Tomb).Wait+0x49 /home/tim/go/src/gopkg.in/tomb.v1/tomb.go:113 +# 0x9c34ec github.com/juju/juju/api/watcher.(*commonWatcher).Wait+0x2c /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:138 +# 0x9c0176 github.com/juju/juju/worker/catacomb.(*Catacomb).add.func1+0x86 /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:175 + +17 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x9599f9 0xa20249 0x9c6673 0x9c69e9 0x45b211 +# 0x9599f8 github.com/juju/juju/rpc.(*Conn).Call+0x128 /home/tim/go/src/github.com/juju/juju/rpc/client.go:148 +# 0xa20248 github.com/juju/juju/api.(*state).APICall+0x1c8 /home/tim/go/src/github.com/juju/juju/api/apiclient.go:917 +# 0x9c6672 github.com/juju/juju/api/watcher.makeWatcherAPICaller.func1+0x142 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:54 +# 0x9c69e8 github.com/juju/juju/api/watcher.(*commonWatcher).commonLoop.func2+0xe8 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:104 + +17 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x9c6732 0x45b211 +# 0x9c6731 github.com/juju/juju/api/watcher.(*commonWatcher).commonLoop.func1+0x71 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:88 + +17 @ 0x42f59a 0x42f64e 0x43ff34 0x43fb59 0x4646a2 0x9c3438 0x45b211 +# 0x43fb58 sync.runtime_Semacquire+0x38 /snap/go/2130/src/runtime/sema.go:56 +# 0x4646a1 sync.(*WaitGroup).Wait+0x71 /snap/go/2130/src/sync/waitgroup.go:129 +# 0x9c3437 github.com/juju/juju/api/watcher.(*commonWatcher).commonLoop+0xf7 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:128 + +13 @ 0x42f59a 0x42f64e 0x406c62 0x40695b 0x9c3813 0x9c6c73 0x45b211 +# 0x9c3812 github.com/juju/juju/api/watcher.(*notifyWatcher).loop+0x1c2 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:180 +# 0x9c6c72 github.com/juju/juju/api/watcher.NewNotifyWatcher.func1+0x52 /home/tim/go/src/github.com/juju/juju/api/watcher/watcher.go:160 + +11 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0xe5ee92 0xe60115 0x45b211 +# 0xe5ee91 github.com/juju/juju/worker/fortress.(*fortress).Visit+0x191 /home/tim/go/src/github.com/juju/juju/worker/fortress/fortress.go:63 +# 0xe60114 github.com/juju/juju/worker/fortress.Occupy.func2+0x44 /home/tim/go/src/github.com/juju/juju/worker/fortress/occupy.go:50 + +10 @ 0x42f59a 0x42f64e 0x406c62 0x40695b 0xa2f2b8 0x45b211 +# 0xa2f2b7 gopkg.in/natefinch/lumberjack%2ev2.(*Logger).millRun+0x57 /home/tim/go/src/gopkg.in/natefinch/lumberjack.v2/lumberjack.go:379 + +10 @ 0x42f59a 0x43f2b0 0x9bffcd 0x45b211 +# 0x9bffcc github.com/juju/juju/worker/catacomb.Invoke.func2+0x14c /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:101 + +5 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x951ada 0x9bf89d 0x9c11f1 0xe5e49f 0xe5bbd7 0x45b211 +# 0x951ad9 gopkg.in/tomb%2ev1.(*Tomb).Wait+0x49 /home/tim/go/src/gopkg.in/tomb.v1/tomb.go:113 +# 0x9bf89c github.com/juju/juju/worker/catacomb.(*Catacomb).Wait+0x2c /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:204 +# 0x9c11f0 github.com/juju/juju/watcher.(*NotifyWorker).Wait+0x30 /home/tim/go/src/github.com/juju/juju/watcher/notify.go:138 +# 0xe5e49e github.com/juju/juju/worker/dependency.(*Engine).runWorker.func2+0x4ce /home/tim/go/src/github.com/juju/juju/worker/dependency/engine.go:464 +# 0xe5bbd6 github.com/juju/juju/worker/dependency.(*Engine).runWorker+0x1c6 /home/tim/go/src/github.com/juju/juju/worker/dependency/engine.go:468 + +5 @ 0x42f59a 0x42f64e 0x406c62 0x40691b 0x951ada 0x9bf89d 0x9c11f1 0xe600bb 0xe5f6e1 0x45b211 +# 0x951ad9 gopkg.in/tomb%2ev1.(*Tomb).Wait+0x49 /home/tim/go/src/gopkg.in/tomb.v1/tomb.go:113 +# 0x9bf89c github.com/juju/juju/worker/catacomb.(*Catacomb).Wait+0x2c /home/tim/go/src/github.com/juju/juju/worker/catacomb/catacomb.go:204 +# 0x9c11f0 github.com/juju/juju/watcher.(*NotifyWorker).Wait+0x30 /home/tim/go/src/github.com/juju/juju/watcher/notify.go:138 +# 0xe600ba github.com/juju/juju/worker/fortress.Occupy.func1+0xca /home/tim/go/src/github.com/juju/juju/worker/fortress/occupy.go:38 +# 0xe5f6e0 github.com/juju/juju/worker/fortress.guestTicket.complete+0x40 /home/tim/go/src/github.com/juju/juju/worker/fortress/fortress.go:151 +[extra bits snipped for brevity] +``` + +------------------------- + +pedroleaoc | 2022-04-07 09:25:43 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:41 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/1184.md b/tmp/t/1184.md new file mode 100644 index 000000000..2f82b02d4 --- /dev/null +++ b/tmp/t/1184.md @@ -0,0 +1,187 @@ +system | 2024-04-17 10:20:05 UTC | #1 + +> See also: [How to manage logs](/t/9151) + +A **log** is a computer-generated record about entities, activities, usage patterns, etc., within a system. In Juju, logs are produced by [`jujud`](/t/7319) and keep track of machine and unit agents, models, controllers, etc. + + +**Contents:** +- [Juju agent logs - machines](#heading--juju-agent-logs---machines) +- [Juju agent logs - Kubernetes](#heading--juju-agent-logs---kubernetes) + + + +

Juju agent logs - machines

+ +In machine deployments, Juju agent logs are organised into a number of files. These files are located on every machine that Juju creates, including the controller. Specifically, they can be found under `/var/log/juju`, and may include: + +- [Agent log files](#heading--agent-log-files) +- [Model log files](#heading--model-log-files) +- [The audit log file](#heading--the-audit-log-file) +- [The logsink log file](#heading--the-logsink-log-file) +- [The machine-lock log file](#heading--the-machine-lock-log-file) + +

Agent log files

+ +Agent log files (e.g., `/var/log/juju/unit-controller-0.log` ) contain the logs for the machine and unit [agents](/t/5466). + + +

Model log files

+ +Model log files (e.g., `/var/log/juju/models/admin-test-3850c8.log`) contain the logs for all the [workers](https://juju.is/docs/dev/worker) on a [model](/t/5456). + + + +

The audit log file

+ +The audit log file (`/var/log/juju/audit.log`) logs all the client commands and all the API calls and errors responses associated with a [controller](/t/5455), classified as one of the following: + +- *Conversation:* A collection of API methods associated with a single top-level CLI command. +- *Request:* A single API method. +- *ResponseErrors:* Errors resulting from an API method + +The audit log file can be found only on controller machines. + + + + + + + +

The logsink log file

+ +The logsink file (`logsink.log`) contains all the agent logs shipped to the [controller](/t/5455), in aggregated form. These logs will end up in Juju's internal database, MongoDB. + + + +[note type=information] +In a [controller high availability](/t/1066#heading--controller-high-availability) scenario, `logsink.log` is not guaranteed to contain all messages since agents have a choice of several controllers to send their logs to. The `debug-log` command should be used for accessing consolidated data across all controllers. +[/note] + +

The machine-lock log file

+ +The machine-lock log file (`machine-lock.log`) contains logs for the file lock that synchronises hook execution on Juju [machines](/t/5459). (A machine will only ever run one [hook](https://juju.is/docs/sdk/hook) at a time.) + + + +

Juju agent logs - Kubernetes

+ +In Kubernetes deployments, logs are written directly to `stdout` of the container and can be retrieved with native Kubernetes methods: `kubectl logs -n ` . + +By default, it will fetch the logs from the main container `charm` container. When fetching logs from other containers, use additional `-c` flag to specify the container, i.e. `kubectl logs -c -n ` . + + +
**Contributors:** @charlie4284 , @manadart, @reneradoi, @tmihoc
+ +------------------------- + +pedroleaoc | 2021-06-08 18:06:58 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:02 UTC | #3 + + + +------------------------- + +charlie4284 | 2024-01-03 07:58:18 UTC | #4 + +I think it would be helpful to have a section on retrieving logs for kubernetes agents. Perhaps something like below? + +### Kubernetes agent logs + +Kubernetes logs are written directly to `stdout` of the container and can be retrieved with native Kubernetes methods. `kubectl logs `. By default, it will fetch the logs from the main container `charm` container. When fetching logs from other containers, use additional `-c` flag to specify the container, i.e. `kubectl logs -c `. + +------------------------- + +tmihoc | 2024-01-03 08:06:43 UTC | #5 + +@charlie4284 If you wish, you can make the change directly. (I've checked and your trust level is high enough to allow that.) Please also add a Contributors line and your name to it on the bottom of the doc, as follows: + +``` +
**Contributors:** @charlie4284
+``` +It's something we've started doing on all new edits to the docs, for any feedback factored into the edit. + +------------------------- + +reneradoi | 2024-04-17 09:59:17 UTC | #6 + +For the record: fixed a typo (: instead of .) and added the namespace-parameter to the kubectl commands in the Kubernetes agent logs section. + +------------------------- + +tmihoc | 2024-04-17 10:19:18 UTC | #7 + +Thanks! (I was going to add you to the contributors but I see you're already there -- good!) + +------------------------- + diff --git a/tmp/t/1187.md b/tmp/t/1187.md new file mode 100644 index 000000000..bc447fdc0 --- /dev/null +++ b/tmp/t/1187.md @@ -0,0 +1,71 @@ +system | 2024-09-06 10:56:00 UTC | #1 + +From the point of view of the user, there are four basic failure scenarios: + +1. Command that fails to return – things hang at some step (e.g., `bootstrap` or `deploy`) and eventually timeout with an error. +1. Command that returns an error. +1. Command that returns but, immediately after, `juju status` shows errors. +1. Things look fine but, at some later point, `juju status` shows errors. + +In all cases you'll want to understand what's causing the error so you can figure out the way out: + +- For (1)-(3) you can check the documentation for the specific procedure you were trying to perform right before the error -- you might find a troubleshooting box with the exact error message, what it means, and how you can solve the issue. + +> Example: The troubleshooting box at the end of [How to manage models > Migrate](/t/1155#heading--migrate-a-workload-model-to-another-controller). +- For (1)-(2) you can also retry the command with the global flags `--debug` and `--verbose` (best used together; for `bootstrap`, also use `--keep-broken` -- if a machine is provisioned, this will ensure that it is not destroyed upon bootstrap fail, which will enable you to examine the logs). +- For all of (1)-(4), you can examine the logs by + - running `juju debug-log` (best used with `--tail`, because some errors are transient so the last lines tend to be the most relevant; also with `–level=ERROR` and, if the point of failure is known, `–include ...` as well, to filter the output) or + - examining the log files directly. + +> See more: [`juju debug-log`](/t/10116), [How to manage logs > Stream the logs](/t/9151#heading--stream-the-logs), [How to manage logs > View the log files](/t/9151#heading--view-the-log-files), [Juju logs](/t/1184) + +- For (3)-(4) the error might also be coming from a particular hook or action. In that case, use `juju debug-hooks` to launch a tmux session that will intercept matching hooks and/or actions. Then you can fix the error by manually configuring the workload, or editing the charm code. Once it is fixed you can run `juju resolved` to inform the charm that you have fixed the issue and it can continue. + +> See more: [`juju debug-hooks`](/t/1116) , [`juju resolved`](/t/10144) + +If none of this helps, use the information you've gathered to ask for help on our public [Charmhub Matrix chat](https://matrix.to/#/#charmhub:ubuntu.com) or our public [Charmhub Discourse forum](https://discourse.charmhub.io/t/welcome-to-the-charmed-operator-community). + +
+ +> **Contributors:** @aflynn, @barrettj12, @hmlanigan , @tmihoc + +------------------------- + +mdavistelstra | 2019-04-16 05:30:25 UTC | #2 + +What does it mean when a unit in `juju status` has an asteriks? + +What is the difference between `/0` and `/0*`? e.g. + +``` +Unit Workload Agent Machine Public address Ports Message +nova-cloud-controller/0* active idle 0/lxd/4 10.58.2.218 8774/tcp,8778/tcp Unit is ready +nova-compute/0 active idle 1 10.58.1.103 Unit is ready +``` + +------------------------- + +sborny | 2019-04-16 07:42:09 UTC | #3 + +The unit with an asterisk is the leader of that charm. It can for example be used inside the charm to make sure only one unit is making configuration changes. More info [here](https://docs.jujucharms.com/devel/en/authors-charm-leadership). + +------------------------- + +pmatulis | 2019-04-16 19:13:49 UTC | #4 + +I fixed this on the [Concepts](/t/concepts-and-terms/1144) page. Thanks for pointing out this lacking. + +------------------------- + +pedroleaoc | 2022-04-07 09:27:48 UTC | #5 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:33:41 UTC | #6 + + + +------------------------- + diff --git a/tmp/t/1194.md b/tmp/t/1194.md new file mode 100644 index 000000000..2474f249b --- /dev/null +++ b/tmp/t/1194.md @@ -0,0 +1,246 @@ +system | 2024-09-24 07:52:27 UTC | #1 + + + +> [List of supported clouds](/t/6665) > MicroK8s + +This document describes details specific to using your a MicroK8s cloud with Juju. + +> See more: [Getting started on Microk8s](https://microk8s.io/docs/getting-started) + +When using the MicroK8s cloud with Juju, it is important to keep in mind that it is a (1) [Kubernetes cloud](/t/5454#heading--machine-clouds-vs--kubernetes-clouds) and (2) [not some other cloud](/t/5454#heading--cloud-foo-vs--cloud-bar). + +> See more: [Cloud differences in Juju](/t/5454#heading--cloud-differences), [Kubernetes clouds and Juju](/t/15621) + +As the differences related to (1) are already documented generically in our [Tutorial](/t/6559), [How-to guides](/t/5334), and [Reference](/t/5348) docs, here we record just those that follow from (2). + +## Requirements + +### MicroK8s snap + +Juju 3.x requires MicroK8s to operate in strict mode. +> See more: [MicroK8s | Strict MicroK8s](https://microk8s.io/docs/install-strict) + +### Services that must enabled + +- `dns` +- `hostpath-storage` + +
+ +> **Contributors:** @tmihoc, @wideawakening + +------------------------- + +sesopenko | 2020-06-24 00:45:12 UTC | #2 + +There's an open bug which results in the microk8s installation failing. microk8s needs to be installed before juju and the user has to be in the microk8s group before installing juju or else the installation will fail. + +See: https://bugs.launchpad.net/juju/+bug/1881769?comments=all + +------------------------- + +timClicks | 2020-06-25 03:47:42 UTC | #3 + +Thanks for mentioning this @sesopenko. I've swapped the order around to make it clear that MicroK8s should be installed before Juju. + +------------------------- + +sesopenko | 2020-06-25 03:51:32 UTC | #4 + +It also ran out of memory when trying to bootstrap it in a VM with only 4GB. Some minimum system requirements for microk8s with juju together would be helpful. + +------------------------- + +doismellburning | 2020-07-14 16:28:33 UTC | #5 + +[quote="system, post:1, topic:1194"] +For example, here we deploy a charm by requesting the use of the ‘mariadb-pv’ workload storage pool we just set up +[/quote] + +Nothing else refers to setting up such a storage pool (and indeed the charm deploys without having explicitly done so) - is this a holdover from a previous version somewhere? + +------------------------- + +timClicks | 2020-07-15 03:02:54 UTC | #6 + +[quote="doismellburning, post:5, topic:1194"] +is this a holdover from a previous version somewhere? +[/quote] + +Likely, yes. I'll look into it. + +------------------------- + +timClicks | 2020-07-15 03:05:30 UTC | #7 + +[quote="sesopenko, post:4, topic:1194, full:true"] +It also ran out of memory when trying to bootstrap it in a VM with only 4GB. Some minimum system requirements for microk8s with juju together would be helpful. +[/quote] + +This is quite unfortunate. 4GB RAM is the recommended minimum currently, I believe. I've encountered issues at this size on another cluster before because Kubernetes was unable to deploy some background services alongside the Juju controller.. The controller pod was deemed "unplaceable". + +------------------------- + +kelvin.liu | 2021-04-18 22:02:00 UTC | #8 + +It seems the videos are not found now. I guess the videos are still there somewhere, it's just the links are not working now. +So I just removed them for now but we can bring them back anytime when we fixed the links. + +* http://discourse.charmhub.io/uploads/default/original/1X/ca1da0a2e668d808ea8d078fc78beef452e85013.mp4 +* http://discourse.charmhub.io/uploads/default/original/1X/88e808884b25f675bc59c067a6f0f4a842a831da.mp4 +* http://discourse.charmhub.io/uploads/default/original/1X/5c9ee6d9a4c831a32657f2dfe0554b09c65dc1b9.mp4 + +hi @pedroleaoc do you have any ideas of how to fix those links? + +Cheers +Kelvin + +------------------------- + +pedroleaoc | 2021-06-08 18:06:53 UTC | #9 + + + +------------------------- + +nsklikas | 2022-07-13 15:53:46 UTC | #10 + +https://discourse.charmhub.io/t/how-to-use-microk8s-with-juju/1194#heading--join-the-microk8s-group says: +> Add your account to the `microk8s` group. This grants the account elevated privileges to the cluster, meaning that `sudo` will not be required to interact with `microk8s`: +> +> sudo usermod -a -G microk8s $USER + + +This is not enough. You need to also run: + +`sudo chown -f -R $USER ~/.kube` + +This is stated in the microk8s installation instructions (https://microk8s.io/docs/getting-started) as well. + +------------------------- + +mcjaeger | 2022-08-31 12:46:20 UTC | #11 + +[quote="system, post:1, topic:1194"] +Now enable some MicroK8s addons for DNS and storage class support: + +``` +microk8s.enable dns storage +``` +[/quote] +a suggestion for the next refresh: enabling the `storage` add-on brings now a warning that `storage` is deprecated and `hostpath-storage` should be used instead + +------------------------- + +mthaddon | 2022-09-07 15:24:20 UTC | #12 + +It seems very odd that we're not suggesting/recommending that people install the `ingress` add-on here. Without it you won't have any kind of ingress controller configured so many http workloads won't be reachable without hitting the pod IP and port directly (or service port and IP), which seems less than ideal. + +Could we add that in? + +------------------------- + +pedroleaoc | 2022-10-14 11:30:11 UTC | #13 + + + +------------------------- + +axino | 2023-01-05 19:31:45 UTC | #14 + +Got this today while running this howto : + + $ juju bootstrap microk8s micro + Since Juju 3 is being run for the first time, it has downloaded the latest public cloud information. + ERROR "/var/snap/juju/21122/microk8s/credentials/client.config" does not exist: juju "3.0.0" can only work with strictly confined microk8s + + $ snap list + Name Version Rev Tracking Publisher Notes + [...] + juju 3.0.0 21122 latest/stable canonical✓ - + microk8s v1.25.4 4221 1.25/stable canonical✓ classic + +Refreshing the `juju` snap to `2.9/stable` worked. + +------------------------- + +mbeierl | 2023-07-20 18:06:18 UTC | #15 + + +[quote="system, post:1, topic:1194"] +``` +microk8s.enable hostpath-storage dns +``` +[/quote] +Microk8s no longer wants us to do that: +``` +WARNING: Do not enable or disable multiple addons in one command. + This form of chained operations on addons will be DEPRECATED in the future. + Please, enable one addon at a time: 'microk8s enable ' +``` + +[quote="system, post:1, topic:1194"] +``` +sudo snap install juju --classic +``` +[/quote] +results in +``` +Warning: flag --classic ignored for strictly confined snap juju +``` + +[quote="system, post:1, topic:1194"] +``` +juju bootstrap microk8s micro +``` +[/quote] +Results in +``` +ERROR "/var/snap/juju/23354/microk8s/credentials/client.config" does not exist: juju "3.1.5" can only work with strictly confined microk8s +``` + +------------------------- + +gbeuzeboc | 2023-08-17 14:19:20 UTC | #16 + +I am facing the exact same issue. Any solution? + +Edit: recent change in the doc solved the problem. It was a matter of version of microk8s. + +------------------------- + +itrue | 2024-03-11 13:35:47 UTC | #17 + +[quote="mbeierl, post:15, topic:1194"] +`ERROR "/var/snap/juju/23354/microk8s/credentials/client.config" does not exist: juju "3.1.5" can only work with strictly confined microk8s` +[/quote] + +You can work around this by manually adding the cluster to Juju: + +``` +$ sudo microk8s config | juju add-k8s my-k8s --client + +k8s substrate "microk8s/localhost" added as cloud "my-k8s". +You can now bootstrap to this cloud by running 'juju bootstrap my-k8s'. + +$ juju bootstrap my-k8s uk8sx +Creating Juju controller "uk8sx" on my-k8s/localhost +Bootstrap to Kubernetes cluster identified as microk8s/localhost +Creating k8s resources for controller "controller-uk8sx" +Downloading images +Starting controller pod +Bootstrap agent now started +Contacting Juju controller at 10.152.183.170 to verify accessibility... + +Bootstrap complete, controller "uk8sx" is now available in namespace "controller-uk8sx" + +Now you can run + juju add-model +to create a new model to deploy k8s workloads. +``` + +I'm not really sure why it says that it doesn't support it when it in reality does, just with a different command. + +------------------------- + diff --git a/tmp/t/11961.md b/tmp/t/11961.md new file mode 100644 index 000000000..ac714304a --- /dev/null +++ b/tmp/t/11961.md @@ -0,0 +1,227 @@ +bschimke95 | 2024-09-26 07:32:15 UTC | #1 + +> [From Zero to Hero: Write your first Kubernetes charm](/t/7113) > Write a unit test for your charm +> +> **See previous: [Observe your charm with COS Lite](/t/7814)** + +[note type=information] +This document is part of a series, and we recommend you follow it in sequence. However, you can also jump straight in by checking out the code from the previous branches: + +```bash +git clone https://github.com/canonical/juju-sdk-tutorial-k8s.git +cd juju-sdk-tutorial-k8s +git checkout 07_cos_integration +git checkout -b 08_unit_testing +``` +[/note] + +When you're writing a charm, you will want to ensure that it will behave reliably as intended. + +For example, that the various components -- relation data, pebble services, or configuration files -- all behave as expected in response to an event. + +You can ensure all this by writing a rich battery of units tests. In the context of a charm we recommended using [`pytest`](https://pytest.org/) (but [`unittest`](https://docs.python.org/3/library/unittest.html) can also be used) and especially the operator framework's built-in testing library -- [`ops.testing.Harness`](https://ops.readthedocs.io/en/latest/harness.html#module-ops.testing). We will be using the Python testing tool [`tox`](https://tox.wiki/en/4.14.2/index.html) to automate our testing and set up our testing environment. + +In this chapter you will write a simple unit test to check that your workload container is initialised correctly. + +**Contents**: + +1. [Prepare your test environment](#heading--prepare-your-test-environment) +1. [Prepare your test directory](#heading--prepare-your-test-directory) +1. [Write your unit test](#heading--write-your-unit-test) +1. [Run the test](#heading--run-the-test) +1. [Review the final code](#heading--review-the-final-code) + +

Prepare your test environment

+ +Create a file called `tox.ini` in your charm project's root directory and add the following to configure your test environment: + +``` +[tox] +no_package = True +skip_missing_interpreters = True +min_version = 4.0.0 +env_list = unit + +[vars] +src_path = {tox_root}/src +tests_path = {tox_root}/tests + +[testenv] +set_env = + PYTHONPATH = {tox_root}/lib:{[vars]src_path} + PYTHONBREAKPOINT=pdb.set_trace + PY_COLORS=1 +pass_env = + PYTHONPATH + CHARM_BUILD_DIR + MODEL_SETTINGS + +[testenv:unit] +description = Run unit tests +deps = + pytest + cosl + coverage[toml] + -r {tox_root}/requirements.txt +commands = + coverage run --source={[vars]src_path} \ + -m pytest \ + --tb native \ + -v \ + -s \ + {posargs} \ + {[vars]tests_path}/unit + coverage report +``` +> Read more: [`tox.ini`](https://tox.wiki/en/latest/config.html#tox-ini) + +

Prepare your test directory

+ +In your project root, create a `tests/unit` directory: + +```bash +mkdir -p tests/unit +``` + +

Write your unit test

+ +In your `tests/unit` directory, create a file called `test_charm.py`. + +In this file, do all of the following: + +First, add the necessary imports: + +```python +import ops +import ops.testing +import pytest + +from charm import FastAPIDemoCharm +``` + +Then, add a test [fixture](https://docs.pytest.org/en/7.1.x/how-to/fixtures.html) that sets up the testing harness and makes sure that it will be cleaned up after each test: + +```python +@pytest.fixture +def harness(): + harness = ops.testing.Harness(FastAPIDemoCharm) + harness.begin() + yield harness + harness.cleanup() + +``` + +Finally, add a first test case as a function, as below. As you can see, this test case is used to verify that the deployment of the `fastapi-service` within the `demo-server` container is configured correctly and that the service is started and running as expected when the container is marked as `pebble-ready`. It also checks that the unit's status is set to active without any error messages. Note that we mock some methods of the charm because they do external calls that are not represented in the state of this unit test. + +```python +def test_pebble_layer( + monkeypatch: pytest.MonkeyPatch, harness: ops.testing.Harness[FastAPIDemoCharm] +): + monkeypatch.setattr(FastAPIDemoCharm, 'version', '1.0.0') + # Expected plan after Pebble ready with default config + expected_plan = { + 'services': { + 'fastapi-service': { + 'override': 'replace', + 'summary': 'fastapi demo', + 'command': 'uvicorn api_demo_server.app:app --host=0.0.0.0 --port=8000', + 'startup': 'enabled', + # Since the environment is empty, Layer.to_dict() will not + # include it. + } + } + } + + # Simulate the container coming up and emission of pebble-ready event + harness.container_pebble_ready('demo-server') + harness.evaluate_status() + + # Get the plan now we've run PebbleReady + updated_plan = harness.get_container_pebble_plan('demo-server').to_dict() + service = harness.model.unit.get_container('demo-server').get_service('fastapi-service') + # Check that we have the plan we expected: + assert updated_plan == expected_plan + # Check the service was started: + assert service.is_running() + # Ensure we set a BlockedStatus with appropriate message: + assert isinstance(harness.model.unit.status, ops.BlockedStatus) + assert 'Waiting for database' in harness.model.unit.status.message +``` + + +> Read more: [`ops.testing`](https://ops.readthedocs.io/en/latest/harness.html#module-ops.testing) + +

Run the test

+ +In your Multipass Ubuntu VM shell, run your unit test as below: + +```bash +ubuntu@charm-dev:~/fastapi-demo$ tox -e unit +``` + +You should get an output similar to the one below: + +```bash +unit: commands[0]> coverage run --source=/home/ubuntu/fastapi-demo/src -m pytest --tb native -v -s /home/ubuntu/fastapi-demo/tests/unit +=============================================================================================================================================================================== test session starts =============================================================================================================================================================================== +platform linux -- Python 3.10.13, pytest-8.0.2, pluggy-1.4.0 -- /home/ubuntu/fastapi-demo/.tox/unit/bin/python +cachedir: .tox/unit/.pytest_cache +rootdir: /home/ubuntu/fastapi-demo +collected 1 item + +tests/unit/test_charm.py::test_pebble_layer PASSED + +================================================================================================================================================================================ 1 passed in 0.30s ================================================================================================================================================================================ +unit: commands[1]> coverage report +Name Stmts Miss Cover +---------------------------------- +src/charm.py 118 49 58% +---------------------------------- +TOTAL 118 49 58% + unit: OK (0.99=setup[0.04]+cmd[0.78,0.16] seconds) + congratulations :) (1.02 seconds) +``` + +Congratulations, you have now successfully implemented your first unit test! + +[note type=caution] +As you can see in the output, the current tests cover 58% of the charm code. In a real-life scenario make sure to cover much more! +[/note] + +

Review the final code

+ +For the full code see: [08_unit_testing](https://github.com/canonical/juju-sdk-tutorial-k8s/tree/08_unit_testing) + +For a comparative view of the code before and after this doc see: [Comparison](https://github.com/canonical/juju-sdk-tutorial-k8s/compare/07_cos_integration...08_unit_testing) + +> **See next: [Write scenario tests for your charm](/t/12128)** + +
+ +> **Contributors:** @abatisse , @acsgn , @benhoyt , @bschimke95, @mylesjp, @pmatulis, @tony-meyer, @tmihoc, @james-garner + +------------------------- + +ppasotti | 2023-10-04 13:19:33 UTC | #2 + +Hi @bschimke95, thanks for the nice write-up! +Two notes: +- should we link to the existing howtos for working with scenario? https://discourse.charmhub.io/t/scenario-how-to-guides/10584 +- the TOC links at the top appear to be broken, but I'm not sure how to fix them. @tmihoc? + +------------------------- + +ppasotti | 2023-10-04 13:21:49 UTC | #3 + +also, [back in the day](https://discourse.charmhub.io/t/getting-started-with-charm-testing/6894) I thought it was a good idea to add, for each type of test, pointers at how to automate them in CI. Is that going to be addressed in a separate tutorial or should we add the same here? + +------------------------- + +tmihoc | 2023-10-04 13:27:08 UTC | #4 + +Hi @ppasotti, thanks for the tips, they all sound great! + +FYI following a chat with @beliaev-maksim, planning to move the scenario bit to its own separate chapter, to reflect the fact that charmers usually file it separately (as in, e.g., https://github.com/canonical/prometheus-k8s-operator/tree/main/tests ). + +------------------------- + diff --git a/tmp/t/1199.md b/tmp/t/1199.md new file mode 100644 index 000000000..142fb7126 --- /dev/null +++ b/tmp/t/1199.md @@ -0,0 +1,96 @@ +system | 2024-07-12 07:15:38 UTC | #1 + +> See also: [How to upgrade your Juju deployment from `2.9` to `3.x`](/t/7530) + +In Juju, upgrades can happen at the level of the `juju` CLI client, the controller, the model, the application, and the machine. + +> See more: +> - [How to upgrade the client](/t/1083#heading--upgrade-the-client) +> - [How to upgrade a controller](/t/1111#heading--upgrade-a-controller) +> - [How to upgrade a model](/t/1155#heading--upgrade-a-model) +> - [How to upgrade an application](/t/5476#heading--upgrade-an-application) +> - [How to upgrade a machine](/t/5886#heading--upgrade-a-machine) + +Upgrades to the client, the controller, and the model are typically related: You upgrade the client by refreshing the `juju` snap, then you upgrade the controller and the model, which is done as follows: + +1. If you're upgrading between + - patch versions (e.g. 2.9.25 -> 2.9.26) + - minor versions before 3.0 (e.g. 2.7 -> 2.8) + + you can upgrade in place via `upgrade-controller` and `upgrade-model`. + +2. If you're upgrading between + - major versions (e.g. 2.9 -> 3.0) + - minor versions after 3.0 (e.g. 3.0 -> 3.1) + + you need to bootstrap a new controller, migrate your models to it, and then run `upgrade-model`. (This is because upgrades are risky, and model migration is a relatively safer way to upgrade than upgrading in place.) It is also important to pay attention to the allowed upgrade paths -- for example, to update from `juju v2.2` to `juju v3.0`, one must first upgrade the client, controller, and model to `juju v2.9` and then perform a second upgrade to `juju v3.0`. + +Application upgrades and machine upgrades are usually completely independent of this and of each other -- the former concerns the version of a charm and the latter the version of Ubuntu running on a machine. The only exception (relevant for upgrades to `3.0`) is when you upgrade across versions where the, e.g., a new controller has dropped support for, e.g., base (OS, series) required by some charm. In that case, before upgrading the controller, you'll want to make sure that all the existing machines (usually already attached to some application) have been upgraded to a supported series (`upgrade-machine`; going away in Juju 4) and also that any new machines provisioned for an application will use a supported series (`refresh `, `set-application-base `). See [How to upgrade your Juju deployment from `2.9` to `3.x`](/t/7530). + + + + + +In general, the upgrade of the agent software is independent of the following: + +- Client software + + Although client and server software are independent, an upgrade of the agents is an opportune time to first upgrade the client software. + +- Charms + + Charms and agent versions are orthogonal to one another. There is no necessity to upgrade charms before or after an upgrade of the agents. + +- Running workloads + + Workloads running are independent of Juju so a downtime maintenance window is not normally required in order to perform an upgrade. + +

Version nomenclature and the auto-selection algorithm

+ +A version is denoted by: + +`major.minor.patch` + +For instance: `2.0.1` + +When not specifying a version to upgrade to ('--version') an algorithm will be used to auto-select a version. + +Rules: + +1. If the agent major version matches the client major version, the version selected is minor+1. If such a minor version is not available then the next patch version is chosen. + +2. If the agent major version does not match the client major version, the version selected is that of the client version. + +To demonstrate, let the available online versions be: 1.25.1, 2.02, 2.03, 2.1, and 2.2. This gives: + +- client 2.03, agent 2.01 -> upgrade to 2.02 +- client 1.25, agent 1.25 -> upgrade to 1.25.1 +- client 2.1, agent 1.25 -> upgrade to 2.1 + +The stable online agent software is found here: https://streams.canonical.com/juju/tools/agent/ + +------------------------- + +pedroleaoc | 2021-06-08 18:06:30 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:05 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/11991.md b/tmp/t/11991.md new file mode 100644 index 000000000..10b681984 --- /dev/null +++ b/tmp/t/11991.md @@ -0,0 +1,268 @@ +bschimke95 | 2024-09-10 04:57:54 UTC | #1 + +> [From Zero to Hero: Write your first Kubernetes charm](/t/7113) > Write integration tests for your charm +> +> **See previous: [Write scenario tests for your charm](/t/11961)** + +[note type=information] +This document is part of a series, and we recommend you follow it in sequence. However, you can also jump straight in by checking out the code from the previous branches: + +``` +git clone https://github.com/canonical/juju-sdk-tutorial-k8s.git +cd juju-sdk-tutorial-k8s +git checkout 09_scenario_test +git checkout -b 10_integration_testing +``` +[/note] + +A charm should function correctly not just in a mocked environment but also in a real deployment. + +For example, it should be able to pack, deploy, and integrate without throwing exceptions or getting stuck in a `waiting` or a `blocked` status -- that is, it should correctly reach a status of `active` or `idle`. + +You can ensure this by writing integration tests for your charm. In the charming world, these are usually written with the [`pytest-operator`](https://github.com/charmed-kubernetes/pytest-operator) library. + +In this chapter you will write two small integration tests -- one to check that the charm packs and deploys correctly and one to check that the charm integrates successfully with the PostgreSQL database. + +**Contents**: + +1. [Prepare your test environment](#heading--prepare-your-test-environment) +1. [Prepare your test directory](#heading--prepare-your-test-directory) +1. [Write and run a pack-and-deploy integration test](#heading--write-and-run-a-pack-and-deploy-integration-test) +1. [Write and run an integrate-with-database integration test](#heading--write-and-run-an-integrate-with-database-integration-test) +1. [Review the final code](#heading--review-the-final-code) + +

Prepare your test environment

+ +In your `tox.ini` file, add the following new environment: + +``` +[testenv:integration] +description = Run integration tests +deps = + pytest + juju + pytest-operator + -r {tox_root}/requirements.txt +commands = + pytest -v \ + -s \ + --tb native \ + --log-cli-level=INFO \ + {posargs} \ + {[vars]tests_path}/integration +``` + +

Prepare your test directory

+ +Create a `tests/integration` directory: +```bash +mkdir ~/fastapi-demo/tests/integration + +``` + +

Write and run a pack-and-deploy integration test

+ +Let's begin with the simplest possible integration test, a [smoke test](https://en.wikipedia.org/wiki/Smoke_testing_(software)). This test will build and deploy the charm and verify that the installation hooks finish without any error. + + +In your `tests/integration` directory, create a file `test_charm.py` and add the following test case: + +```python +import asyncio +import logging +from pathlib import Path + +import pytest +import yaml +from pytest_operator.plugin import OpsTest + +logger = logging.getLogger(__name__) + +METADATA = yaml.safe_load(Path('./charmcraft.yaml').read_text()) +APP_NAME = METADATA['name'] + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest): + """Build the charm-under-test and deploy it together with related charms. + + Assert on the unit status before any relations/configurations take place. + """ + # Build and deploy charm from local source folder + charm = await ops_test.build_charm('.') + resources = { + 'demo-server-image': METADATA['resources']['demo-server-image']['upstream-source'] + } + + # Deploy the charm and wait for blocked/idle status + # The app will not be in active status as this requires a database relation + await asyncio.gather( + ops_test.model.deploy(charm, resources=resources, application_name=APP_NAME), + ops_test.model.wait_for_idle( + apps=[APP_NAME], status='blocked', raise_on_blocked=False, timeout=120 + ), + ) +``` + +In your Multipass Ubuntu VM, run the test: + +```bash +tox -e integration +``` + +The test takes some time to run as the `pytest-operator` running in the background will add a new model to an existing cluster (whose presence it assumes). If successful, it'll verify that your charm can pack and deploy as expected. + +

Write and run an integrate-with-database integration test

+ +The charm requires a database to be functional. Let's verify that this behaviour works as intended. For that, we need to deploy a database to the test cluster and integrate both applications. Finally, we should check that the charm reports an active status. + +In your `tests/integration/test_charm.py` file add the following test case: + +```python +@pytest.mark.abort_on_fail +async def test_database_integration(ops_test: OpsTest): + """Verify that the charm integrates with the database. + + Assert that the charm is active if the integration is established. + """ + await ops_test.model.deploy( + application_name='postgresql-k8s', + entity_url='postgresql-k8s', + channel='14/stable', + ) + await ops_test.model.integrate(f'{APP_NAME}', 'postgresql-k8s') + await ops_test.model.wait_for_idle( + apps=[APP_NAME], status='active', raise_on_blocked=False, timeout=120 + ) +``` + + +[note type=information] +But if you run the one and then the other (as separate `pytest ...` invocations, then two separate models will be created unless you pass `--model=some-existing-model` to inform pytest-operator to use a model you provide. +[/note] + +In your Multipass Ubuntu VM, run the test again: + + +```bash +ubuntu@charm-dev:~/fastapi-demo$ tox -e integration + +``` + +The test may again take some time to run. + +[note type=positive] +**Pro tip:** To make things faster, use the `--model=` to inform `pytest-operator` to use the model it has created for the first test. Otherwise, charmers often have a way to cache their pack or deploy results; an example is https://github.com/canonical/spellbook . +[/note] + +When it's done, the output should show two passing tests: + +``` +... + demo-api-charm/0 [idle] waiting: Waiting for database relation +INFO juju.model:model.py:2759 Waiting for model: + demo-api-charm/0 [idle] active: +PASSED +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- live log teardown -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +INFO pytest_operator.plugin:plugin.py:783 Model status: + +Model Controller Cloud/Region Version SLA Timestamp +test-charm-2ara main-controller microk8s/localhost 3.1.5 unsupported 09:45:56+02:00 + +App Version Status Scale Charm Channel Rev Address Exposed Message +demo-api-charm 1.0.1 active 1 demo-api-charm 0 10.152.183.99 no +postgresql-k8s 14.7 active 1 postgresql-k8s 14/stable 73 10.152.183.50 no + +Unit Workload Agent Address Ports Message +demo-api-charm/0* active idle 10.1.208.77 +postgresql-k8s/0* active idle 10.1.208.107 + +INFO pytest_operator.plugin:plugin.py:789 Juju error logs: + + +INFO pytest_operator.plugin:plugin.py:877 Resetting model test-charm-2ara... +INFO pytest_operator.plugin:plugin.py:866 Destroying applications demo-api-charm +INFO pytest_operator.plugin:plugin.py:866 Destroying applications postgresql-k8s +INFO pytest_operator.plugin:plugin.py:882 Not waiting on reset to complete. +INFO pytest_operator.plugin:plugin.py:855 Forgetting main... + + +========================================================================================================================================================================== 2 passed in 290.23s (0:04:50) ========================================================================================================================================================================== + integration: OK (291.01=setup[0.04]+cmd[290.97] seconds) + congratulations :) (291.05 seconds) +``` + +Congratulations, with this integration test you have verified that your charms relation to PostgreSQL works as well! + +

Review the final code

+ +For the full code see: [10_integration_testing](https://github.com/canonical/juju-sdk-tutorial-k8s/tree/09_scenario_test) + +For a comparative view of the code before and after this doc see: [Comparison](https://github.com/canonical/juju-sdk-tutorial-k8s/compare/09_scenario_test...10_integration_testing) + +> **See next: [Open a Kubernetes port in your charm](/t/12244)** + +> Contributors: @bschimke95, @mylesjp, @tony-meyer, @tmihoc, @james-garner + +------------------------- + +ibraaoad | 2023-10-23 11:40:41 UTC | #2 + +[quote="bschimke95, post:1, topic:11991"] +``` + await ops_test.model.deploy( + application_name="postgresql-k8s", + entity_url="https://charmhub.io/postgresql-k8s", + channel="14/stable", + ) +``` +[/quote] + +I think the above needs to have the trust param set to True to accommodate for Postgres RBAC, when running without it it returns the below + +> forbidden: User "system:serviceaccount:test-charm-uvfr:postgresql-k8s" cannot delete resource "endpoints" in API group "" in the namespace "test-charm-uvfr" + +------------------------- + +javierdelapuente | 2023-11-21 15:58:30 UTC | #3 + +Setting the environment as stated in https://juju.is/docs/sdk/set-up-your-development-environment, when running the integration tests I get: + +`juju.errors.JujuError: invalid charm url schema https` + +I believe that in the integration test, instead of `entity_url="https://charmhub.io/postgresql-k8s"`, it should be: + + await ops_test.model.deploy( + application_name="postgresql-k8s", + entity_url="ch://charmhub.io/postgresql-k8s", + channel="14/stable", + ) + +------------------------- + +mylesjp | 2024-04-05 21:02:02 UTC | #4 + +I believe the code here should reference `./charmcraft.yaml` rather than `./metadata.yaml` since Charmcraft 2.5 uses `charmcraft.yaml` and this tutorial does not have the user making a `metadata.yaml` file. +``` +import asyncio +import logging +from pathlib import Path + +import pytest +import yaml +from pytest_operator.plugin import OpsTest + +logger = logging.getLogger(__name__) + +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +APP_NAME = METADATA["name"] +``` + +------------------------- + +tmihoc | 2024-04-25 11:22:52 UTC | #5 + +Thanks for making the update -- I've added you to the list of contributors on the bottom of the doc. + +------------------------- + diff --git a/tmp/t/11998.md b/tmp/t/11998.md new file mode 100644 index 000000000..57b5da3f4 --- /dev/null +++ b/tmp/t/11998.md @@ -0,0 +1,42 @@ +0x12b | 2023-09-29 08:38:37 UTC | #1 + +Now that COS Lite has been generally available for a while, and we're seeing more and more users wanting to replace their current LMA setup with COS Lite, it feels like an appropriate time to provide some details on how to accomplish such a migration. + +First off, COS Lite is not a new version of LMA, but a completely new product that draws upon the lessons learned from LMA to create a heavily integrated, mainly automated, turn-key observability stack. The flip-side of that is that there isn't any direct, in-place migration path. + +This post aims to describe how to, in a way that's as safe as possible, go from LMA to COS, but as always with potentially destructive operations like these you should make sure you have up-to-date backups before trying this. + +Let's assume this is our, heavily simplified, existing environment: + +![image|690x226](upload://kzbrMOlogHXdAz3djsbpDv7s9KK.png) + +## 1. Upgrade your existing controller to Juju `>=2.9.44` + +As COS requires a Juju version which is equal to, or higher than, `3.1`, we first need to upgrade our existing controller to Juju `2.9.44` or newer. See the official [Juju docs](https://juju.is/docs/juju/juju-upgrade-controller) on how to perform this upgrade. The reason why we're picking `2.9.44` (or newer if and when they are released) is because we need a version that is recent enough to include support for cross-controller relations with Juju 3, and then we might as well go to the latest version in the 2.9 track. + +## 2. Deploy COS to an isolated Microk8s instance +This model needs to be running Juju 3.1. For instructions on how to deploy COS, see [our tutorial on the topic](https://charmhub.io/topics/canonical-observability-stack/tutorials/install-microk8s). + +It will now look somewhat like this: + +![LMA to COS (2)|690x464](upload://cAJI8o1cn3NDorlLBOvnJlh3nuh.png) + +## 3. Deploy `cos-proxy` and `grafana-agent` in your pre-existing model +Deploy [`cos-proxy`](https://charmhub.io/cos-proxy) in your existing model and wire it up to all the same targets as you would with LMA. cos-proxy is designed to bridge the gap between your current LMA enabled charms that utilize filebeat and NRPE, and COS, which is utilizing prometheus and loki/promtail. For Grafana Agent you only need to relate it to your principal charms. + +By now, you will have something that looks a little something like this: + +![LMA to COS (1)|690x366](upload://v3wbTI2hsTnJQNMthixgGKM2KbL.png) + +[`cos-proxy`](https://charmhub.io/cos-proxy) and [`grafana-agent`](https://charmhub.io/grafana-agent) will continue to work on Juju 2.9 for the time being. This is mainly to support migrations from LMA to COS. + +## 4. Evaluate solution parity + +You'll now receive your telemetry in both LMA and COS, which is great as it allows you to in your own pace evaluate and validate that you have coverage for the checks and alarms you're used to in LMA in COS before deciding to push the decomission button. + +## 5. Decomission LMA + +Now that you have COS Lite up and running and have verified that it works even better than what you had with LMA, you can now start decomission your LMA setup. As it is a migration between solution, none of your historical data in LMA will be migrated to COS, so in case this is data you care about, you should make sure you keep the backups you did prior to following this tutorial until they're no longer relevant. + +------------------------- + diff --git a/tmp/t/12005.md b/tmp/t/12005.md new file mode 100644 index 000000000..3e5d82509 --- /dev/null +++ b/tmp/t/12005.md @@ -0,0 +1,152 @@ +ppasotti | 2024-03-27 15:22:43 UTC | #1 + +The [`cos-lite` bundle](https://github.com/canonical/cos-lite-bundle) is meant to be run by Juju. However, not all workloads that you may want to monitor do. The good news is that you can use `cos-lite` to monitor workloads that are not charmed (aka 'not managed by Juju'). The bad news is that it's relatively straightforward to do so. Not bad at all. + + +**Contents:** + +- [Deploy `cos-lite`](#heading--deploy-cos-lite) +- [Deploy `grafana-agent`](#heading--deploy-grafana-agent) +- [Get the API endpoints from `traefik`](#heading--get-the-api-endpoints-from-traefik) +- [Add custom dashboards and alerts](#heading--add-custom-dashboards-and-alerts) +- [TLS](#heading--tls) +- [Known limitations and upcoming features](#heading--known-limitations-and-upcoming-features) + - [Identity](#heading--identity) + - [Tracing](#heading--tracing) +- [Only export metrics with `prometheus-scrape-target`](#heading--only-export-metrics-with-prometheus-scrape-target) + + + + +

Deploy `cos-lite`

+ + +The first step will be to get a hold of a machine, somewhere, and follow [this guide on how to get started with COS lite on microk8s](https://charmhub.io/topics/canonical-observability-stack/tutorials/install-microk8s). + +And be sure to follow the [best practices](/t/12012)! + +[note] Unless you're also planning to monitor some charmed applications with this cos-lite deployment, you will not need to use [the `offers` overlay](https://charmhub.io/topics/canonical-observability-stack/tutorials/install-microk8s#heading--deploy-the-cos-lite-bundle-with-overlays). [/note] + + +

Deploy `grafana-agent`

+ + +The Grafana agent will act as an intermediary between the applications you want to monitor and the `cos-lite` stack. It will gather telemetry from your applications and send them to `cos-lite`, where you will be able to inspect them through the Grafana dashboards. + +We recommend to host the Grafana agent as close as possible to the workloads you intend to monitor, to minimise the risk of network faults and the resulting gaps in telemetry collection. +Also, we recommend to install the Grafana agent via a handy snap we maintain: + +[![Get it from the Snap Store](https://snapcraft.io/static/images/badges/en/snap-store-black.svg)](https://snapcraft.io/grafana-agent) + +However, Grafana agent is also available as a single Go binary, and you are free to install it and run it the way you like. See the [official documentation](https://grafana.com/docs/agent/latest/) for the publisher's recommendations and guides. + +[note type=positive] Last but not least, we also have it [containerized](http://ghcr.io/canonical/grafana-agent) and [petrified](https://github.com/canonical/grafana-agent-rock/). [/note] + +Now that you have Grafana Agent up and running, you will need to configure it. + +

Get the API endpoints from `traefik`

+ +`cos-lite` includes a `traefik` instance that takes care of load balancing and ingressing the various observability components of the stack. Since `cos-lite` runs on Kubernetes, this allows you to talk to them via `traefik` over a stable URL. + +[note type=caution] +Before you can use Traefik from an external service such as Grafana agent, you will need to ensure that the Traefik URL is routable from the service host, and that the address is stable. (e.g. not a dynamic IP) +In other words, Traefik's own URL aso needs to be stable. +[/note] + + +In the Juju model where `cos-lite` is installed, you can run: + +> `juju run traefik/0 show-proxied-endpoints` + +Assuming you have [configured the `traefik` charm](https://github.com/canonical/traefik-k8s-operator#configurations) to use an external hostname, for example `"traefik.url"`, you will see something like: + +``` +proxied-endpoints: '{ + "prometheus/0": {"url": "https://traefik.url/mymodel-prometheus-0"}, + "loki/0": {"url": "https://traefik.url/mymodel-loki-0"}, + "alertmanager": {"url": "https://traefik.url/mymodel-alertmanager"}, + "catalogue": {"url": "https://traefik.url/mymodel-catalogue"}, +}' +``` + +[note type=positive] You can also open `https://traefik.url/mymodel-catalogue` in a browser to see a page with links to all `cos-lite` components' user interfaces. [/note] + +At this point you will need to follow [the documentation on how to configure the Grafana agent](https://grafana.com/docs/agent/latest/static/configuration/#configure-static-mode). +Use the urls you obtained from traefik to tell the agent where to send its telemetry. + + +

Add custom dashboards and alerts

+ + +In order to add your own dashboards and alerts to `cos-lite` you will need to deploy the [`cos-config` charm](https://github.com/canonical/cos-configuration-k8s-operator) on top of `cos-lite`. + +Follow [this guide](https://github.com/canonical/cos-configuration-k8s-operator#deployment) to set up `cos-config` in the same Juju model in which `cos-lite` is deployed. + + +

TLS

+ +You can deploy cos-lite with the [tls](https://github.com/canonical/cos-lite-bundle/pull/80) overlay to enable secure communications with and within COS Lite. + +You can follow [this guide](https://charmhub.io/traefik-k8s/docs/tls-termination) to enable TLS in Traefik and COS Lite. + + +

Known limitations and upcoming features

+ +

Identity

+ +We are "working towards"[citation needed] an integration with canonical's [IAM bundle](https://github.com/canonical/iam-bundle) to provide a charmed identity solution to support locking down your observability stack behind an identity provider. Stay tuned for updates! + +

Tracing

+ +We are "working towards"[citation needed] a [tracing overlay](https://github.com/canonical/cos-lite-bundle/pull/79) to add distributed tracing capabilities to cos-lite. Once that work is done, you will be able to add [Grafana Tempo](https://grafana.com/oss/tempo/) to the stack. + + +

Only export metrics with `prometheus-scrape-target`

+ + +In some rare circumstances, you might prefer to use `prometheus-scrape-target` instead of `grafana-agent`. +Namely: +- when you only need metrics (no logs, traces, etc...) +- when you'd rather make the necessary firewall changes in the workload you want to monitor, than ingress cos-lite +- when you're not able to install anything (or the grafana-agent anyway) on the workload you want to monitor + +If this is your situation, we've got you covered. You can deploy [`prometheus-scrape-target`](https://github.com/canonical/prometheus-scrape-target-k8s-operator) and configure it to scrape your workload. + +------------------------- + +ppasotti | 2023-10-02 09:20:46 UTC | #2 + + + +------------------------- + +ppasotti | 2023-10-04 08:24:55 UTC | #3 + +@tmihoc can you help get the TOC to work? @jose added it earlier yesterday but for some reason it doesn't seem to work + +------------------------- + +tmihoc | 2023-10-04 08:59:35 UTC | #4 + +Done. (Also fixed some typos.) + +------------------------- + +sombrafam | 2024-05-20 02:01:07 UTC | #5 + +[quote="ppasotti, post:1, topic:12005"] +Now that you have Grafana Agent up and running, you will need to configure it. +[/quote] + +Hey, can you provide a grafana-agent.yaml sample in a way it would connect to COS Lite? I can't really figure out which one of the proxyed endpoints I should use, and where I should set it in the agent. + +Having a simple metric collection from the host we are monitoring, for example, CPU usage, would be very desirable. + +------------------------- + +ppasotti | 2024-05-20 16:16:06 UTC | #6 + +Good point! I'll see if someone from the team can chip in a sample config for you. + +------------------------- + diff --git a/tmp/t/12012.md b/tmp/t/12012.md new file mode 100644 index 000000000..1798195b7 --- /dev/null +++ b/tmp/t/12012.md @@ -0,0 +1,143 @@ +0x12b | 2024-07-30 02:48:17 UTC | #1 + + +**Contents** + +- [Juju compatibility](#juju-compatibility) +- [Topology](#topology) + - [Deploy in isolation](#deploy-in-isolation) + - [COS Alerter](#cos-alerter) + - [Avoid pulling data cross-model](#avoid-pulling-data-cross-model) +- [Networking](#networking) + - [Ingress](#ingress) + - [Egress](#egress) + - [Controller routing](#controller-routing) +- [Storage](#storage) + - [Set up distributed storage](#set-up-distributed-storage) + - [Storage volume](#storage-volume) +- [Maintenance](#maintenance) + - [Known Issues](#known-issues) +- [Upgrading](#upgrading) + + + + +# Juju compatibility + +COS Lite requires Juju 3.1 to function properly. It is able to observe applications that are on Juju 2.9, but COS Lite itself needs to be deployed on a model that is Juju 3.1+. To be able to set up cross-model, cross-controller relations with existing Juju controllers and models, we therefore recommend upgrading your existing controllers (with applications that are to be observed by COS Lite) either to the latest Juju 3 version (at the time of writing 3.1.5), or to the latest version in the 2.9 track (at the time of writing: 2.9.44). + +# Topology + +## Deploy in isolation + +COS Lite should at the very least be deployed in its own model, but preferably even on its own substrate with its own controller. This limits the blast radius + of anything malfunctioning in the workloads you observe or the observability stack itself. We **strongly** recommend using [a separate three-node Microk8s cluster](https://microk8s.io/docs/high-availability). + +## COS Alerter + +Apart from COS Lite itself, the [COS Alerter](https://github.com/canonical/cos-alerter) should be deployed on separate infrastructure, preferably on completely different hardware. The purpose of the alerter is to let operators know whenever the routing of notifications from COS Lite stops working, preventing a false sense of security. + +## Avoid pulling data cross-model + +Cross-model relations using the `prometheus_scrape` interface should be avoided. Instead, deploy a Grafana agent in each of the models you want to observe and let the agents be a fan-in point pushing the data to COS. This makes for a less error-prone networking topology that is easier to reason about, especially at scale. + +# Networking + +## Ingress + +MetalLB, or an equivalent load balancer, should be configured on the Kubernetes environment COS is running on. As part of the COS Lite bundle, Traefik is deployed and configured to provide network ingressing for the bundle components. Make sure the load balancer provides Traefik with **a static IP**, or some other identity that remains stable over time. + +## Egress + +Some charms require external connectivity for the COS Lite bundle to function correctly. + +As a common requirement, the environment should be able to reach: +* Charmhub; +* the Juju registry; +* Snapcraft. + +There are other charm-specific URLs that some charms access by default: +* https://objects.githubusercontent.com/, needed by [Loki](https://charmhub.io/loki-k8s#network-requirements-9); +* stats.grafana.org, needed by [Grafana](https://charmhub.io/grafana-k8s/docs/network-requirements) and [Grafana Agent](https://charmhub.io/grafana-agent-k8s#network-requirements-5). + +To disable the functionalities that require those URLs, please refer to linked docs for the relevant charms. + +## Controller routing + +If the network topology is anything other than flat, the Juju controllers will need to be bootstrapped with `--controller-external-ips`, `--controller-external-name`, or both, so that the controllers are able to communicate over routable identities for your cross--controller relations. For example: + +``` +juju bootstrap microk8s uk8s \ + --config controller-service-type=loadbalancer \ + --config controller-external-ips=[10.0.0.2] +``` + +Note that these config values can only be set at bootstrap time, and are read-only thereafter. + +# Storage + +## Set up distributed storage +[note] +Note: **Do not** use the [`hostpath-storage`](https://microk8s.io/docs/addon-hostpath-storage) microk8s addon in production: + * `PersistentVolumeClaims` created by the hostpath storage provisioner are bound to the local node, so it is *impossible to move them to a different node*. + * A hostpath volume can *grow beyond the capacity set in the volume claim manifest*. + +Instead, you could use the [`rook-ceph`](https://microk8s.io/docs/addon-rook-ceph) addon together with microceph. See the [microceph tutorial](https://charmhub.io/cos-lite/docs/tutorials/distributed-storage?channel=latest/edge). +[/note] + +## Storage volume + +You should come up with an appropriate [storage overlay](https://github.com/canonical/cos-lite-bundle/blob/main/overlays/storage-small-overlay.yaml) for your use case. For example, a deployment that handles roughly: + +- 1M samples/min with 150 targets +- 100k loglines/min for about 150 targets + +has a growth rate of about 50GB per day under normal operations. So, if you want a retention interval of about two months, you’ll need 3TB of storage only for the telemetry. + +# Maintenance +Before restarting a Kubernetes node with COS applications on it, you should cordon and drain it so that the StatefulSets are moved to another node. This process will ensure the least amount of downtime. + +In the event that a node goes down unexpectedly and cannot be recovered, you can manually recover the COS units by force deleting the pod and any volumeattachments that existed on the inaccessible node. The pods will then be rescheduled to a working node. + +## Known issues +- High availability during maintenance is only possible on clusters utilizing distributed storage, such as MicroCeph. +- All of the COS applications use StatefulSets, so these pods will not self-heal and deploy to another node automatically. +- The juju controller needs to be up for COS pods to start, otherwise their charm container will fail, causing the pod to go into a crash loop. + +# Upgrading +Remember to `juju refresh` with `--trust`. If omitted, you would need to `juju trust X --scope=cluster`. + +------------------------- + +jose | 2023-09-28 19:09:08 UTC | #2 + +Great doc Simme, + +I would only add this link: + +```diff +- We recommend using a separate three-node Microk8s cluster. ++ [We recommend using a separate three-node Microk8s cluster.](https://microk8s.io/docs/high-availability) +``` + +------------------------- + +0x12b | 2023-09-29 08:33:16 UTC | #3 + +Good point, José! Thanks! + +------------------------- + +sed-i | 2023-11-22 23:00:57 UTC | #4 + +Has there been any experience around max num of file descriptors (`ulimit -n`)? + +------------------------- + +sed-i | 2024-04-08 14:27:51 UTC | #5 + +Reference bundles for deploying microceph: +https://github.com/canonical/mimir-coordinator-k8s-operator/issues/45#issuecomment-2042342350 + +------------------------- + diff --git a/tmp/t/12042.md b/tmp/t/12042.md new file mode 100644 index 000000000..c34fa686c --- /dev/null +++ b/tmp/t/12042.md @@ -0,0 +1,28 @@ +sed-i | 2023-10-03 16:34:38 UTC | #1 + +## TLS +COS Lite can be deployed unencrypted, with TLS termination only, or end-to-end encrypted. + +### Unencrypted COS Lite +The [cos-lite bundle](https://charmhub.io/cos-lite) deploys COS with workloads communicating using plain HTTP (unencrypted). + +### TLS-terminated COS Lite +The traefik charm can function as a TLS termination point by relating it to an external CA (integrator) charm. Within the COS model, charms would still communicate using plain HTTP (unencrypted). + +### COS Lite with end-to-end TLS +The cos-lite bundle together with the TLS overlay deploy an end-to-end encrypted COS. +- COS charms generate CSRs with the k8s fqdn as the SAN DNS and the internal CA signs. +- All COS charms trust the internal CA by installing the CA certificate in the charm and workload containers, using the `update-ca-certificates` tool. +- The external CA provides a certificate for traefik's external URL. +- Within the COS model, workloads communicate via k8s fqdn URLs. +- Requests coming from outside of the model, use the ingress URLs. +- Traefik is able to establish a secure connection with its proxied apps thanks to trusting the local CA. + +Note: currently there is a [known issue](https://github.com/canonical/operator/issues/970) due to which some COS relations are limited to in-cluster relations only. + +The end-to-end COS TLS design is described in the diagram below. The diagram is limited to prometheus and alertmanager for brevity and clarity. + +![tls|690x327](upload://cSpBUYTLRbV26SlzNheN95e79FJ.png) + +------------------------- + diff --git a/tmp/t/12128.md b/tmp/t/12128.md new file mode 100644 index 000000000..44d7d79cb --- /dev/null +++ b/tmp/t/12128.md @@ -0,0 +1,248 @@ +bschimke95 | 2024-10-10 22:16:39 UTC | #1 + +> [From Zero to Hero: Write your first Kubernetes charm](/t/7113) > Write scenario tests for your charm +> +> **See previous: [Write unit tests for your charm](/t/11961)** + +[note type=information] +This document is part of a series, and we recommend you follow it in sequence. However, you can also jump straight in by checking out the code from the previous branches: + +``` +git clone https://github.com/canonical/juju-sdk-tutorial-k8s.git +cd juju-sdk-tutorial-k8s +git checkout 08_unit_testing +git checkout -b 09_scenario_testing +``` +[/note] + +In the previous chapter we checked the basic functionality of our charm by writing unit tests. + +However, there is one more type of test to cover, namely: state transition tests. + +In the charming world the current recommendation is to write state transition tests with the 'scenario' model popularised by the [`ops-scenario`](/t/10583) library. + +[note] Scenario is a state-transition testing SDK for operator framework charms. [/note] + +In this chapter you will write a scenario test to check that the `get_db_info` action that you defined in an earlier chapter behaves as expected. + + +**Contents**: + +1. [Prepare your test environment](#heading--prepare-your-test-environment) +1. [Prepare your test directory](#heading--prepare-your-test-directory) +1. [Write your scenario test](#heading--write-your-integration-test) +1. [Run the test](#heading--run-the-test) +1. [Review the final code](#heading--review-the-final-code) + + +

Prepare your test environment

+ +Install `ops-scenario`: + +```bash +pip install ops-scenario +``` +In your project root's existing `tox.ini` file, add the following: + +``` +... + +[testenv:scenario] +description = Run scenario tests +deps = + pytest + cosl + ops-scenario ~= 7.0 + coverage[toml] + -r {tox_root}/requirements.txt +commands = + coverage run --source={[vars]src_path} \ + -m pytest \ + --tb native \ + -v \ + -s \ + {posargs} \ + {[vars]tests_path}/scenario + coverage report +``` + +And adjust the `env_list` so that the Scenario tests will run with a plain `tox` command: + +``` +env_list = unit, scenario +``` + +

Prepare your test directory

+ +By convention, scenario tests are kept in a separate directory, `tests/scenario`. Create it as below: + +``` +mkdir -p tests/scenario +cd tests/scenario +``` + + +

Write your scenario test

+ +In your `tests/scenario` directory, create a new file `test_charm.py` and add the test below. This test will check the behaviour of the `get_db_info` action that you set up in a previous chapter. It will first set up the test context by setting the appropriate metadata, then define the input state, then run the action and, finally, check if the results match the expected values. + +```python +from unittest.mock import Mock + +import scenario +from pytest import MonkeyPatch + +from charm import FastAPIDemoCharm + + +def test_get_db_info_action(monkeypatch: MonkeyPatch): + monkeypatch.setattr('charm.LogProxyConsumer', Mock()) + monkeypatch.setattr('charm.MetricsEndpointProvider', Mock()) + monkeypatch.setattr('charm.GrafanaDashboardProvider', Mock()) + + # Use scenario.Context to declare what charm we are testing. + # Note that Scenario will automatically pick up the metadata from + # your charmcraft.yaml file, so you typically could just do + # `ctx = scenario.Context(FastAPIDemoCharm)` here, but the full + # version is included here as an example. + ctx = scenario.Context( + FastAPIDemoCharm, + meta={ + 'name': 'demo-api-charm', + 'containers': {'demo-server': {}}, + 'peers': {'fastapi-peer': {'interface': 'fastapi_demo_peers'}}, + 'requires': { + 'database': { + 'interface': 'postgresql_client', + } + }, + }, + config={ + 'options': { + 'server-port': { + 'default': 8000, + } + } + }, + actions={ + 'get-db-info': {'params': {'show-password': {'default': False, 'type': 'boolean'}}} + }, + ) + + # Declare the input state. + state_in = scenario.State( + leader=True, + relations={ + scenario.Relation( + endpoint='database', + interface='postgresql_client', + remote_app_name='postgresql-k8s', + local_unit_data={}, + remote_app_data={ + 'endpoints': '127.0.0.1:5432', + 'username': 'foo', + 'password': 'bar', + }, + ), + }, + containers={ + scenario.Container('demo-server', can_connect=True), + }, + ) + + # Run the action with the defined state and collect the output. + ctx.run(ctx.on.action('get-db-info', params={'show-password': True}), state_in) + + assert ctx.action_results == { + 'db-host': '127.0.0.1', + 'db-port': '5432', + 'db-username': 'foo', + 'db-password': 'bar', + } +``` + + +

Run the test

+ +In your Multipass Ubuntu VM shell, run your scenario test as below: + +```bash +ubuntu@charm-dev:~/juju-sdk-tutorial-k8s$ tox -e scenario +``` + +You should get an output similar to the one below: + +```bash +scenario: commands[0]> coverage run --source=/home/tameyer/code/juju-sdk-tutorial-k8s/src -m pytest --tb native -v -s /home/tameyer/code/juju-sdk-tutorial-k8s/tests/scenario +======================================= test session starts ======================================== +platform linux -- Python 3.11.9, pytest-8.3.3, pluggy-1.5.0 -- /home/tameyer/code/juju-sdk-tutorial-k8s/.tox/scenario/bin/python +cachedir: .tox/scenario/.pytest_cache +rootdir: /home/tameyer/code/juju-sdk-tutorial-k8s +plugins: anyio-4.6.0 +collected 1 item + +tests/scenario/test_charm.py::test_get_db_info_action PASSED + +======================================== 1 passed in 0.19s ========================================= +scenario: commands[1]> coverage report +Name Stmts Miss Cover +---------------------------------- +src/charm.py 129 57 56% +---------------------------------- +TOTAL 129 57 56% + scenario: OK (6.89=setup[6.39]+cmd[0.44,0.06] seconds) + congratulations :) (6.94 seconds) +``` + +Congratulations, you have written your first scenario test! + +

Review the final code

+ + +For the full code see: [09_scenario_testing](https://github.com/canonical/juju-sdk-tutorial-k8s/tree/09_scenario_test) + +For a comparative view of the code before and after this doc see: [Comparison](https://github.com/canonical/juju-sdk-tutorial-k8s/compare/08_unit_testing...09_scenario_test) + +> **See next: [Write integration tests for your charm](/t/11991)** + +> Contributors: @bschimke95, @james-garner, @tony-meyer + +------------------------- + +ppasotti | 2023-10-23 07:41:55 UTC | #2 + +Hi Benjamin, thanks for the excellent write-up. +I noticed a couple of things that I think should be improved: + +assuming this is a 'regular' charm project, i.e.: +- `./src/charm/charm.py` contains a `FastAPIDemoCharm` class, and +- there is a valid `./metadata.yaml` file, +- `actions.yaml`, +- etc... + +then Scenario is smart enough to find and populate the `meta=`, `actions=`, `config=` fields of `Context`. +In other words, all you need is + +> `ctx = Context(FastAPIDemoCharm)` + +Scenario will take care of the rest. No need to manually pass meta, config, or actions. + +Second point, `scenario.Relation` is smart enough to figure out the interface given the endpoint (again from metadata.yaml), so you typically omit the interface. + +Third point, unless that's intentional, as a way to show the API, I would omit the default-value fields. (`local_unit_data={}`). + +The rest looks great. + +One 'high-level' piece of feedback is that action tests are usually not very representative of Scenario tests because actions tend not to affect the charm state at all. And scenario is best at doing state transition testing, so picking an action event (one where no state transition occurs) to showcase it is, IMHO, a poor choice. + +You could do an 'assert the state has NOT changed' type of test, of course, but that's a bit +:man_shrugging: :man_shrugging: + +> `assert state_in == action_out.state` + +another option would be to make the action store something to stored data (cache it), and verify that the state of the cache has been updated correctly by using the `scenario.State.stored_state` API. + +But imho it's best to pick a different kind of event altogether. + +------------------------- + diff --git a/tmp/t/12244.md b/tmp/t/12244.md new file mode 100644 index 000000000..9cbd7678d --- /dev/null +++ b/tmp/t/12244.md @@ -0,0 +1,408 @@ +mmkay | 2024-09-10 05:02:31 UTC | #1 + +> [From Zero to Hero: Write your first Kubernetes charm](/t/7113) > Open a Kubernetes port in your charm +> +> **See previous: [Write integration tests for your charm](/t/11991)** + +[note type=information] +This document is part of a series, and we recommend you follow it in sequence. However, you can also jump straight in by checking out the code from the previous branches: + +``` +git clone https://github.com/canonical/juju-sdk-tutorial-k8s.git +cd juju-sdk-tutorial-k8s +git checkout 10_integration_testing +git checkout -b 11_open_port_k8s_service +``` +[/note] + +A deployed charm should be consistently accessible via a stable URL on a cloud. + +However, our charm is currently accessible only at the IP pod address and, if the pod gets recycled, the IP address will change as well. + +> See earlier chapter: [Make your charm configurable](/t/7401) + +In Kubernetes you can make a service permanently reachable under a stable URL on the cluster by exposing a service port via the `ClusterIP`. In Juju 3.1+, you can take advantage of this by using the `Unit.set_ports()` method. + +> Read more: [ClusterIP](https://kubernetes.io/docs/concepts/services-networking/service/#type-clusterip) + +In this chapter of the tutorial you will extend the existing `server-port` configuration option to use Juju `open-port` functionality to expose a Kubernetes service port. Building on your experience from the previous testing chapters, you will also write tests to check that the new feature you've added works as intended. + +**Contents**: + +1. [Add a Kubernetes service port to your charm](#heading--add-a-kubernetes-service-port-to-your-charm) +1. [Test the new feature](#heading--test-the-new-feature) +1. [Validate your charm](#heading--validate-your-charm) +1. [Review the final code](#heading--review-the-final-code) + +

Add a Kubernetes service port to your charm

+ +In your `src/charm.py` file, do all of the following: + +In the `_on_config_changed` method, add a new method: + +```python +self._handle_ports() +``` + +Then, in the definition of the `FastAPIDemoCharm` class, define the method: + +```python +def _handle_ports(self) -> None: + port = cast(int, self.config['server-port']) + self.unit.set_ports(port) +``` + +> See more: [`ops.Unit.set_ports`](https://ops.readthedocs.io/en/latest/#ops.Unit.set_ports) + + +

Test the new feature

+ +- [Write a unit test](#heading--write-a-unit-test) +- [Write a scenario test](#heading--write-a-scenario-test) +- [Write an integration test](#heading--write-an-integration-test) + +

Write a unit test

+ + +[note type=information] +**If you've skipped straight to this chapter:**
Note that it builds on the earlier unit testing chapter. To catch up, see: [Write unit tests for your charm](/t/11961). +[/note] + +Let's write a unit test to verify that the port is opened. Open `tests/unit/test_charm.py` and add the following test function to the file. + +```python +@pytest.mark.parametrize( + 'port,expected_status', + [ + (22, ops.BlockedStatus('Invalid port number, 22 is reserved for SSH')), + (1234, ops.BlockedStatus('Waiting for database relation')), + ], +) +def test_port_configuration( + monkeypatch, harness: ops.testing.Harness[FastAPIDemoCharm], port, expected_status +): + # Given + monkeypatch.setattr(FastAPIDemoCharm, 'version', '1.0.1') + harness.container_pebble_ready('demo-server') + # When + harness.update_config({'server-port': port}) + harness.evaluate_status() + currently_opened_ports = harness.model.unit.opened_ports() + port_numbers = {port.port for port in currently_opened_ports} + server_port_config = harness.model.config.get('server-port') + unit_status = harness.model.unit.status + # Then + if port == 22: + assert server_port_config not in port_numbers + else: + assert server_port_config in port_numbers + assert unit_status == expected_status +``` + +[note type=information] +**Tests parametrisation**
Note that we used the `parametrize` decorator to run a single test against multiple sets of arguments. Adding a new test case, like making sure that the error message is informative given a negative or too big port number, would be as simple as extending the list in the decorator call. +See [How to parametrize fixtures and test functions](https://docs.pytest.org/en/8.0.x/how-to/parametrize.html). +[/note] + +Time to run the tests! + +In your Multipass Ubuntu VM shell, run the unit test: + +``` +ubuntu@charm-dev:~/fastapi-demo$ tox -re unit +``` + +If successful, you should get an output similar to the one below: + +``` +$ tox -re unit +unit: remove tox env folder /home/ubuntu/fastapi-demo/.tox/unit +unit: install_deps> python -I -m pip install cosl 'coverage[toml]' pytest -r /home/ubuntu/fastapi-demo/requirements.txt +unit: commands[0]> coverage run --source=/home/ubuntu/fastapi-demo/src -m pytest --tb native -v -s /home/ubuntu/fastapi-demo/tests/unit +========================================= test session starts ========================================= +platform linux -- Python 3.10.13, pytest-8.0.2, pluggy-1.4.0-- /home/ubuntu/fastapi-demo/.tox/unit/bin/python +cachedir: .tox/unit/.pytest_cache +rootdir: /home/ubuntu/fastapi-demo +collected 3 items + +tests/unit/test_charm.py::test_pebble_layer PASSED +tests/unit/test_charm.py::test_port_configuration[22-expected_status0] PASSED +tests/unit/test_charm.py::test_port_configuration[1234-expected_status1] PASSED + +========================================== 3 passed in 0.21s ========================================== +unit: commands[1]> coverage report +Name Stmts Miss Cover +---------------------------------- +src/charm.py 122 43 65% +---------------------------------- +TOTAL 122 43 65% + unit: OK (6.00=setup[5.43]+cmd[0.49,0.09] seconds) + congratulations :) (6.04 seconds) +``` + +

Write a scenario test

+ +Let's also write a scenario test! Add this test to your `tests/scenario/test_charm.py` file: + +```python +def test_open_port(monkeypatch: MonkeyPatch): + monkeypatch.setattr('charm.LogProxyConsumer', Mock()) + monkeypatch.setattr('charm.MetricsEndpointProvider', Mock()) + monkeypatch.setattr('charm.GrafanaDashboardProvider', Mock()) + + # Use scenario.Context to declare what charm we are testing. + ctx = scenario.Context( + FastAPIDemoCharm, + meta={ + 'name': 'demo-api-charm', + 'containers': {'demo-server': {}}, + 'peers': {'fastapi-peer': {'interface': 'fastapi_demo_peers'}}, + 'requires': { + 'database': { + 'interface': 'postgresql_client', + } + }, + }, + config={ + 'options': { + 'server-port': { + 'default': 8000, + } + } + }, + actions={ + 'get-db-info': {'params': {'show-password': {'default': False, 'type': 'boolean'}}} + }, + ) + state_in = scenario.State( + leader=True, + relations=[ + scenario.Relation( + endpoint='database', + interface='postgresql_client', + remote_app_name='postgresql-k8s', + local_unit_data={}, + remote_app_data={ + 'endpoints': '127.0.0.1:5432', + 'username': 'foo', + 'password': 'bar', + }, + ), + scenario.PeerRelation( + endpoint='fastapi-peer', + peers_data={'unit_stats': {'started_counter': '0'}}, + ), + ], + containers=[ + scenario.Container(name='demo-server', can_connect=True), + ], + ) + state1 = ctx.run('config_changed', state_in) + assert len(state1.opened_ports) == 1 + assert state1.opened_ports[0].port == 8000 + assert state1.opened_ports[0].protocol == 'tcp' +``` + +In your Multipass Ubuntu VM shell, run your scenario test as below: + +``` +ubuntu@charm-dev:~/fastapi-demo$ tox -re scenario +``` + +If successful, this should yield: +``` +scenario: remove tox env folder /home/ubuntu/fastapi-demo/.tox/scenario +scenario: install_deps> python -I -m pip install cosl 'coverage[toml]' ops-scenario pytest -r /home/ubuntu/fastapi-demo/requirements.txt +scenario: commands[0]> coverage run --source=/home/ubuntu/fastapi-demo/src -m pytest --tb native -v -s /home/ubuntu/fastapi-demo/tests/scenario +========================================= test session starts ========================================= +platform linux -- Python 3.10.13, pytest-8.0.2, pluggy-1.4.0 -- /home/ubuntu/fastapi-demo/.tox/scenario/bin/python +cachedir: .tox/scenario/.pytest_cache +rootdir: /home/ubuntu/fastapi-demo +collected 2 items + +tests/scenario/test_charm.py::test_get_db_info_action PASSED +tests/scenario/test_charm.py::test_open_port PASSED + +========================================== 2 passed in 0.31s ========================================== +scenario: commands[1]> coverage report +Name Stmts Miss Cover +---------------------------------- +src/charm.py 122 22 82% +---------------------------------- +TOTAL 122 22 82% + scenario: OK (6.66=setup[5.98]+cmd[0.59,0.09] seconds) + congratulations :) (6.69 seconds) +``` + +

Write an integration test

+ +In your `tests/integration` directory, create a `helpers.py` file with the following contents: + +```python +import socket +from pytest_operator.plugin import OpsTest + + +async def get_address(ops_test: OpsTest, app_name: str, unit_num: int = 0) -> str: + """Get the address for a the k8s service for an app.""" + status = await ops_test.model.get_status() + k8s_service_address = status['applications'][app_name].public_address + return k8s_service_address + + +def is_port_open(host: str, port: int) -> bool: + """check if a port is opened in a particular host""" + try: + with socket.create_connection((host, port), timeout=5): + return True # If connection succeeds, the port is open + except (ConnectionRefusedError, TimeoutError): + return False # If connection fails, the port is closed +``` + +In your existing `tests/integration/test_charm.py` file, import the methods defined in `helpers.py`: + +```python +from helpers import is_port_open, get_address +``` + +Now add the test case that will cover open ports: + +```python +@pytest.mark.abort_on_fail +async def test_open_ports(ops_test: OpsTest): + """Verify that setting the server-port in charm's config correctly adjust k8s service + + Assert blocked status in case of port 22 and active status for others + """ + app = ops_test.model.applications.get('demo-api-charm') + + # Get the k8s service address of the app + address = await get_address(ops_test=ops_test, app_name=APP_NAME) + # Validate that initial port is opened + assert is_port_open(address, 8000) + + # Set Port to 22 and validate app going to blocked status with port not opened + await app.set_config({'server-port': '22'}) + (await ops_test.model.wait_for_idle(apps=[APP_NAME], status='blocked', timeout=120),) + assert not is_port_open(address, 22) + + # Set Port to 6789 "Dummy port" and validate app going to active status with port opened + await app.set_config({'server-port': '6789'}) + (await ops_test.model.wait_for_idle(apps=[APP_NAME], status='active', timeout=120),) + assert is_port_open(address, 6789) +``` +In your Multipass Ubuntu VM shell, run the test as below: + +``` +ubuntu@charm-dev:~/fastapi-demo$ tox -re integration +``` + +This test will take longer as a new model needs to be created. If successful, it should yield something similar to the output below: + +``` +==================================== 3 passed in 234.15s (0:03:54) ==================================== + integration: OK (254.77=setup[19.55]+cmd[235.22] seconds) + congratulations :) (254.80 seconds) +``` + +

Validate your charm

+ +Congratulations, you've added a new feature to your charm, and also written tests to ensure that it will work properly. Time to give this feature a test drive! + +In your Multipass VM, repack and refresh your charm as below: + +```bash +ubuntu@charm-dev:~/fastapi-demo$ charmcraft pack +juju refresh \ + --path="./demo-api-charm_ubuntu-22.04-amd64.charm" \ + demo-api-charm --force-units --resource \ + demo-server-image=ghcr.io/canonical/api_demo_server:1.0.1 +``` + +Watch your charm deployment status change until deployment settles down: + +``` +juju status --watch 1s +``` + +Use `kubectl` to list the available services and verify that `demo-api-charm` service exposes the `ClusterIP` on the expected port: + + +``` +$ kubectl get services -n charm-model +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +modeloperator ClusterIP 10.152.183.231 17071/TCP 34m +demo-api-charm-endpoints ClusterIP None 19m +demo-api-charm ClusterIP 10.152.183.92 65535/TCP,8000/TCP 19m +postgresql-k8s-endpoints ClusterIP None 18m +postgresql-k8s ClusterIP 10.152.183.162 5432/TCP,8008/TCP 18m +postgresql-k8s-primary ClusterIP 10.152.183.109 8008/TCP,5432/TCP 18m +postgresql-k8s-replicas ClusterIP 10.152.183.29 8008/TCP,5432/TCP 18m +patroni-postgresql-k8s-config ClusterIP None 17m +``` + +Finally, `curl` the `ClusterIP` to verify that the `version` endpoint responds on the expected port: + +``` +$ curl 10.152.183.92:8000/version +{"version":"1.0.1"} +``` + +Congratulations, your service now exposes an external port that is independent of any pod / node restarts! + +

Review the final code

+ +For the full code see: [11_open_port_k8s_service](https://github.com/canonical/juju-sdk-tutorial-k8s/tree/11_open_port_k8s_service) + +For a comparative view of the code before and after this doc see: [Comparison](https://github.com/canonical/juju-sdk-tutorial-k8s/compare/10_integration_testing...11_open_port_k8s_service) + +> **See next: [Publish your charm on Charmhub](/t/12281)** + +> Contributors: @adithya-raj, @mmkay @ibraaoad, @tmihoc, @james-garner + +------------------------- + +beliaev-maksim | 2023-11-15 11:01:35 UTC | #2 + +[quote="mmkay, post:1, topic:12244"] +Unit.set.ports() +[/quote] + +typo? + +@tmihoc + +------------------------- + +mmkay | 2023-11-15 11:03:25 UTC | #3 + +Seems so, updated. + +------------------------- + +jedel | 2024-03-01 18:18:34 UTC | #4 + +Edited the error message for the value of `ops.BlockedStatus` on the parametrized test. The correct error message on `charm.py` has no capitalization on the first letter. + +------------------------- + +adithya-raj | 2024-06-14 14:55:48 UTC | #5 + +Edited the `test_port_configuration` function to properly update based on the new configuration and match the code provided on the GitHub. + +```python + # When + harness.update_config({"server-port": port}) + harness.evaluate_status() # Added this line + currently_opened_ports = harness.model.unit.opened_ports() + port_numbers = {p.port for p in currently_opened_ports} +``` + +------------------------- + +tmihoc | 2024-06-14 15:14:23 UTC | #6 + +Thanks, @adithya-raj ! PS Moved your name to the front of the contributors list as we do it alphabetically. + +------------------------- + diff --git a/tmp/t/12246.md b/tmp/t/12246.md new file mode 100644 index 000000000..558296ba1 --- /dev/null +++ b/tmp/t/12246.md @@ -0,0 +1,48 @@ +tmihoc | 2024-03-25 11:38:38 UTC | #1 + +Juju is designed with performance in mind. With Juju, your cloud operations become: + + +- **Quick and easy.** + +Juju is intuitive. To deploy an application, run `juju deploy`. To configure it, run `juju config`. And so on. + +> See more: [Get started with Juju](/t/6559) + +- **Powerful.** + +In Juju, application integration is a first-class citizen: To integrate, run `juju integrate`. With 160+ intuitive CLI commands, any operation is just one command line away. + +> See more: [Juju CLI commands](/t/10045) + +- **Optimizable.** + +When you `juju deploy`, Juju automatically provisions infrastructure for you. However, you can also fine-tune the CPU, memory, and network resources, or ssh into a machine or pod. And Juju applications ship with sensible defaults, but they also expose further knobs that you may wish to turn -- say hello to 'configurations' and 'actions'! + +> See more: [How to manage machines](/t/5886), [How to manage storage](/t/5892), [How to manage spaces](/t/6664) + +- **Scalable.** + +You need to make an application highly available? Just add a few more applications units! + +> See more: [How to scale an application](/t/5476#heading--scale-an-application) + +- **Portable.** + +Juju is model-driven. It separates application logic from business logic, and takes care of the former so you can focus on the latter. Whatever you want done, declare it in a model. The model is attached to a controller bootstrapped into a cloud. You can export and share it or migrate it to another controller on another cloud. You can also connect workloads on different models and even different clouds. With Juju supporting a long list of clouds -- public or private, machine or Kubernetes, branded or entirely ad hoc -- the possibilities are endless. + +> See more: +> - [How to migrate a model](/t/1155#heading--migrate-a-workload-model-to-another-controller) +> - [How to manage cross-model integrations](/t/1150) +> - [List of supported clouds](/t/6665) + +- **Responsive and efficient.** + +Juju is designed to be both concurrent and parallel. It can manage multiple applications, services, and environments responsively and efficiently. + +- **Observable.** + +Juju's performance can be monitored using built-in tools and third-party solutions. + +------------------------- + diff --git a/tmp/t/12248.md b/tmp/t/12248.md new file mode 100644 index 000000000..aca18d23d --- /dev/null +++ b/tmp/t/12248.md @@ -0,0 +1,94 @@ +tmihoc | 2024-10-02 14:42:49 UTC | #1 + +> See also: [Juju security](/t/15684) + +Juju ships with sensible security defaults. However, security doesn't stop there. + +## Harden the cloud + +Use a private cloud. + +> See more: [List of supported clouds](https://juju.is/docs/juju/juju-supported-clouds) + +If you want to go one step further, take your cloud (and the entire deployment) offline. + +> See more: [How to take your deployment offline](/t/14367) + +## Harden the client and the agent binaries + +When you install Juju (= the `juju` CLI client + the Juju agent binaries) on Linux, you're installing it from a strictly confined snap. Make sure to keep this snap up to date. + +> See more: [Snapcraft | Snap confinement](https://snapcraft.io/docs/snap-confinement), [Install and manage the client](https://juju.is/docs/juju/install-and-manage-the-client), [Roadmap & Releases](https://juju.is/docs/juju/roadmap) + + +## Harden the controller(s) + +In a typical Juju workflow you allow your client to read your locally stored cloud credentials, then copy them to the controller, so that the controller can use them to authenticate with the cloud. However, for some clouds Juju now supports a workflow where your (client and) controller doesn't need to know your credentials directly -- you can just supply an instance profile (AWS) or a managed identity (Azure). One way to harden your controller is to take advantage of this workflow. + +> See more: [Bootstrap a controller](https://juju.is/docs/juju/manage-controllers#heading--bootstrap-a-controller), [AWS and Juju](/t/1084), [Azure and Juju](/t/1086) + +(Like all the cloud resources provisioned through Juju,) the cloud resource(s) (machines or containers) that a controller is deployed on by default run the latest Ubuntu LTS. This Ubuntu is *not* CIS- and DISA-STIG-compliant (see more: [Ubuntu | The Ubuntu Security Guide](https://ubuntu.com/security/certifications/docs/usg)). However, it is by default behind a firewall, inside a VPC, with only the following three ports opened -- as well as hardened (through security groups) -- by default: + +- (always:) `17070`, to allow access from clients and agents; +- (in high-availability scenarios): mongo +- (In high-availability scenarios): `controller-api-port`, which can be turned off (see [Controller configuration keys > `controller-api-port`](https://juju.is/docs/juju/list-of-controller-configuration-keys#heading--controller-api-port)). + +When a controller deploys a charm, all the traffic between the controller and the resulting application unit agent(s) is [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security)-encrypted (each agent starts out with a CA certificate from the controller and, when they connect to the controller, they get another certificate that is then signed by the preshared CA certificate). In addition to that, every unit agent authenticates itself with the controller using a password. + +> See more: [Wikipedia | TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security) + + + + +## Harden the user(s) + +When you bootstrap a controller into a cloud, you automatically become a user with controller admin access. Make sure to change your password, and choose a strong password. + +Also, when you create other users (whether human or for an application), take advantage of Juju's granular access levels to grant access to clouds, controllers, models, or application offers only as needed. Revoke or remove any users that are no longer needed. + +> See more: [User](https://juju.is/docs/juju/user), [User access levels](https://juju.is/docs/juju/user-permissions), [Manage users](https://juju.is/docs/juju/manage-users) + +## Harden the model(s) + +Within a single controller, living on a particular cloud, you can have multiple users, each of which can have different models (i.e., workspaces or namespaces), each of which can be associated with a different credential for a different cloud. Juju thus supports multi-tenancy. + +You can also restrict user access to a model and also restrict the commands that any user can perform on a given model. + +> See more: [Manage models](https://juju.is/docs/juju/manage-models) + +## Harden the applications + +When you deploy (an) application(s) from a charm or a bundle, choose the charm / bundle carefully: + +- Choose charms / bundles that show up in the Charmhub search – that means they’ve passed formal review – and which have frequent releases -- that means they're actively maintained. + +- Choose charms that don’t require deployment with `--trust` (i.e., access to the cloud credentials). If not possible, make sure to audit those charms. + +- Choose charms whose `charmcraft.yaml > containers > uid` and `gid` are not 0 (do not require root access). If not possible, make sure to audit those charms. + +- *Starting with Juju 3.6:* Choose charms whose `charmcraft.yaml > containers > charm-user` field set to `non-root`. If not possible, make sure to audit those charms. + +- Choose charms that support secrets (see more: [Secret](/t/7286)). + +(Like all the cloud resources provisioned through Juju,) the cloud resource(s) (machines or containers) that an application is deployed on by default run the latest Ubuntu LTS. This Ubuntu is *not* CIS- and DISA-STIG-compliant (see more: [Ubuntu | The Ubuntu Security Guide](https://ubuntu.com/security/certifications/docs/usg)). However, it is by default behind a firewall, inside a VPC. Just make sure to expose application or application offer endpoints only as needed. + +Keep an application's charm up to date. + +> See more: [Manage applications](https://juju.is/docs/juju/manage-applications) + +## Audit and observe + +Juju generates agent logs that can help administrators perform auditing for troubleshooting, security maintenance, or compliance. + +> See more: [Logs](/t/1184) + +You can also easily collect metrics about or generally monitor and observe your deployment by deploying and integrating with the Canonical Observability Stack. + +> See more: [Collect metrics about a controller](https://juju.is/docs/juju/manage-controllers#heading--collect-metrics-about-a-controller) (the same recipe -- integration with the [Canonical Observability Stack](https://charmhub.io/topics/canonical-observability-stack) bundle -- can be used to observe applications other than the controller) + +------------------------- + diff --git a/tmp/t/12281.md b/tmp/t/12281.md new file mode 100644 index 000000000..bdf08fdee --- /dev/null +++ b/tmp/t/12281.md @@ -0,0 +1,297 @@ +ibraaoad | 2024-08-30 10:36:17 UTC | #1 + +> [From Zero to Hero: Write your first Kubernetes charm](/t/7113) > Pushing your charm to charmhub +> +> **See previous: [Open a Kubernetes port in your charm ](/t/12244)** + +[note type=information] +This document is part of a series, and we recommend you follow it in sequence. However, you can also jump straight in by checking out the code from the previous branches: + +``` +git clone https://github.com/canonical/juju-sdk-tutorial-k8s.git +cd juju-sdk-tutorial-k8s +git checkout 11_open_port_k8s_service +``` +[/note] + +In this tutorial you've done a lot of work, and the result is an increasingly functional charm. + +You can enjoy this charm on your own, or pass it around to friends, but why not share it with the whole world? + +The Canonical way to share a charm publicly is to publish it on [Charmhub](https://charmhub.io/). Aside from making your charm more visible, this also means you can deploy it more easily, as Charmhub is the default source for `juju deploy`. Besides, Charmcraft is there to support you every step of the way. + +In this chapter of the tutorial you will use Charmcraft to release your charm on Charmhub. + +**Contents:** + +1. [Log in to Charmhub](#heading--log-in-to-charmhub) +1. [Register your charm's name](#heading--register-your-charms-name) +1. [Upload the charm and its resources](#heading--upload-the-charm-and-its-resources) +1. [Release the charm](#heading--release-the-charm) + +

Log in to Charmhub

+ +[note type=caution] +**You will need an Ubuntu SSO account.**
+If you don't have one yet, sign up on https://login.ubuntu.com/+login +[/note] + +[note type="information"] +Logging into Charmhub is typically a simple matter of running `charmcraft login` . However, here we are within a Multipass VM, so we have to take some extra steps. +[/note] + + +On your Multipass VM, run the code below: + +```text +ubuntu@charm-dev:~/fastapi-demo$ charmcraft login --export ~/secrets.auth +``` + +Once you've put in your login information, you should see something similar to the output below: + +```text +Opening an authorization web page in your browser. +If it does not open, please open this URL: + https://api.jujucharms.com/identity/login?did=48d45d919ca2b897a81470dc5e98b1a3e1e0b521b2fbcd2e8dfd414fd0e3fa96 +``` + +Copy-paste the provided web link into your web browser. Use your Ubuntu SSO to log in. + +When you're done, you should see in your terminal the following: + +```text +Login successful. Credentials exported to '~/secrets.auth'. +``` + +Now set an environment variable with the new token: + +```text +export CHARMCRAFT_AUTH=$(cat ~/secrets.auth) +``` + +Well done, you're now logged in to Charmhub! + +

Register your charm's name

+ +On your Multipass VM, generate a random 8-digit hexadecimal hash, then view it in the shell: + +```text +random_hash=$(cat /dev/urandom | tr -dc 'a-f0-9' | head -c 8) +echo "Random 8-digit hash: $random_hash" +``` +[note type=information] +Naming your charm is usually less random than that; see [Charm naming guidelines](/t/5364). However, here we are in a tutorial setting, so you just need to make sure to pick a unique name, any name. +[/note] + +Navigate to the `charmcraft.yaml` file of your charm and update the `name` field with the randomly generated name. + +Once done, prepare the charm for upload by executing `charmcraft pack` . This command will create a compressed file with the updated name prefix, as discussed earlier. + +Now pass this hash as the name to register for your charm on Charmhub: + +```text +$ charmcraft register +Congrats! You are now the publisher of '' +``` + +You're all set! + +

Upload the charm and its resources

+ +On your Multipass VM, run the code below. (The argument to `charmcraft upload` is the filepath to the `.charm` file.) + +```text +charmcraft upload _ubuntu-22.04-amd64.charm +Revision 1 of created +``` + +[note type="information"] +Every time a new binary is uploaded for a charm, a new revision is created on Charmhub. We can verify its current status easily by running `charmcraft revisions `. +[/note] + + +Now upload the charm's resource -- in your case, the `demo-server-image` OCI image specified in your charm's `charmcraft.yaml` as follows: + + + +First, pull it locally: + +```text +docker pull ghcr.io/canonical/api_demo_server:1.0.1 +``` + +Then, take note of the image ID: + +```text +docker images ghcr.io/canonical/api_demo_server +``` + +This should output something similar to the output below: + +```text +REPOSITORY TAG IMAGE ID CREATED SIZE +ghcr.io/canonical/api_demo_server 1.0.1 6 months ago 532MB +``` + +Finally, upload the image as below, specifying first the charm name, then the image name, then a flag with the image digest: + +```text +charmcraft upload-resource demo-server-image --image= +``` + +Sample output: + +```text +Revision 1 created of resource 'demo-server-image' for charm ''. +``` + +All set! + +

Release the charm

+ +Release your charm as below. + +[note type=information] +**Do not worry:**
+While releasing a charm to Charmhub gives it a public URL, the charm will not appear in the Charmhub search results until it has passed formal review -- see [Requirements for public listing](/t/10632#heading--requirements-for-public-listing). +[/note] + + +```text +$ charmcraft release --revision=1 --channel=beta --resource=demo-server-image:1 +Revision 1 of charm '` released to beta +``` + +This releases it into a channel so it can become available for downloading. + +Just in case, also check your charm's status: + +```text +$ charmcraft status +Track Base Channel Version Revision Resources +latest ubuntu 22.04 (amd64) stable - - - + candidate - - - + beta 1 1 demo-server-image (r1) + edge ↑ ↑ ↑ +``` + +Congratulations, your charm has now been published to charmhub.io! + +You can view it at any time at `charmhub.io/`. + + +> Contributors: @abatisse, @ibraaoad, @mvlassis, @mylesjp, @tmihoc, @pik4ez + +------------------------- + +beliaev-maksim | 2023-10-24 09:43:10 UTC | #2 + +[quote="ibraaoad, post:1, topic:12281"] +Log in to Charmhub +[/quote] + +@tmihoc please assist with the login section with the way that was proposed in the past + +------------------------- + +beliaev-maksim | 2023-10-24 09:44:35 UTC | #3 + +[quote="ibraaoad, post:1, topic:12281"] +can pull it locally via +[/quote] + +is there a way to push an image straight from the upstream without downloading it ? + +------------------------- + +ibraaoad | 2023-10-24 11:12:28 UTC | #4 + +Not according to Charmcraft [source code](https://github.com/canonical/charmcraft/blob/18b4d9d74eaa62c01200f21339b9adde16677c9c/charmcraft/commands/store/__init__.py#L1855C39-L1855C56), when image isn't within the canonical registry it always tries to get from user's local registry + +------------------------- + +mylesjp | 2024-04-05 21:07:24 UTC | #5 + +As of Charmcraft 2.5 the charm information is in `charmcraft.yaml` rather than `metadata.yaml` so the following references should be updated: + +> Navigate to the `metadata.yaml` file of your charm and update the `name` field with the randomly generated name. + +> Now upload the charm’s resource – in your case, the `demo-server-image` OCI image specified in your charm’s `metadata.yaml` as follows: + +------------------------- + +jose | 2024-08-01 20:27:30 UTC | #6 + +Hi @ibraaoad + +If I check the `upload-resource` sub-command help I can see: + +```shell +$ charmcraft help upload-resource + +.... + +Options: + --image: The digest (remote or local) or id (local, exclude + "sha256:") of the OCI image +``` + + +So if I remove `sha256` and try to upload the resource I get: + +```shell +$ charmcraft upload-resource loki-worker-k8s loki-image --image=583ddc10f52bb4fe4806baa05ecbaa2a3246238f1423bd2a6c35b02fd87b446c +Unknown OCI image reference. +Recommended resolution: Pass a valid container transport string. +Full execution log: '/home/jose/.local/state/charmcraft/log/charmcraft-20240801-171033.641131.log' +``` + +The same happens if I do not remove `sha256:` + +```shell +$ charmcraft upload-resource loki-worker-k8s loki-image --image=sha256:583ddc10f52bb4fe4806baa05ecbaa2a3246238f1423bd2a6c35b02fd87b446c +Unknown OCI image reference. +Recommended resolution: Pass a valid container transport string. +Full execution log: '/home/jose/.local/state/charmcraft/log/charmcraft-20240801-171905.784405.log' +``` + +I've found an [old Facundo's reply mentioning](https://discourse.charmhub.io/t/charmcrafts-upload-resource-command/4580/16?u=jose) `IMAGE ID`, so: + + +```shell +$ docker images ubuntu/loki --digests +REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE +ubuntu/loki 3.0-22.04 sha256:583ddc10f52bb4fe4806baa05ecbaa2a3246238f1423bd2a6c35b02fd87b446c 8dad11c50d75 5 weeks ago 351MB +``` + +And now using the `IMAGE ID` the image is uploaded: + +```shell +$ charmcraft upload-resource loki-worker-k8s loki-image --image=8dad11c50d75 +Revision 1 created of resource 'loki-image' for charm 'loki-worker-k8s'. +``` + +```shell +$ charmcraft resources loki-worker-k8s +Charm Rev Resource Type Optional +1 loki-image oci-image True +``` + +I'm using charmcraft `3.1.1` + +@sergiusens Do you know if this is just a documentation issue for the sub-command `upload-resource` or is an issue of the sub-command itself?? + +------------------------- + +lengau | 2024-10-10 16:33:39 UTC | #7 + +Hi @jose ! Yeah, this is an issue related to a behaviour change in Charmcraft 3. I've [created a bug report](https://github.com/canonical/charmcraft/issues/1953) for it. + +The short version is that when we changed from home-grown implementations to using docker-py and skopeo, we forgot to reimplement the piece that checks for a digest, so it only works with the image ID right now. + +Separately I've added [another issue](https://github.com/canonical/charmcraft/issues/1952) to document the new functionality this gives us. + +------------------------- + diff --git a/tmp/t/12372.md b/tmp/t/12372.md new file mode 100644 index 000000000..10fa59783 --- /dev/null +++ b/tmp/t/12372.md @@ -0,0 +1,55 @@ +sed-i | 2024-08-16 13:39:15 UTC | #1 + +# Solution matrix + +| | K8s charm | Machine charm | Legacy charms - LMA deps | Non-juju workload | +| ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------- | -------------------------------- | ------------------------------------------ | +| [Logs](/t/cos-lite-docs-logging-architecture/13926) | [LokiPushApiConsumer][loki_push_api] (for workloads that can communicate directly with Loki, [LogProxyConsumer] (installs promtail in the workload; not suitable for air-gapped), LogForwarder (pebble-native, Juju>=3.4.1) | [cos_agent] (subordinate, pulls logs from `/var/log` or from other snaps with a matching slot) | [cos-proxy] | [grafana-agent snap] (manually configured) | +| Metrics | [prometheus_scrape] (in-model), [prometheus_remote_write] (CMR) - with grafana-agent | [cos_agent] | [cos-proxy] | [scrape-target], [grafana-agent snap] | +| Traces | [tracing] (instrumented workloads), [charm_tracing] (for the charm itself) | [tracing] (CMR), [cos_agent] | N/A | [grafana-agent charm] | +| Dashboards | [grafana_dashboard], [cos-configuration] | [cos_agent], [cos-configuration] | [cos-proxy], [cos-configuration] | [cos-configuration] | +| Alert rules | via metrics and logs relations, [cos-configuration] | [cos_agent], [cos-configuration] | [cos-proxy], [cos-configuration] | [cos-configuration] | + +- The [COS Lite bundle](https://charmhub.io/cos-lite) does not include tracing by default; however, there's a bundle overlay for that. + +[loki_push_api]: https://charmhub.io/loki-k8s/libraries/loki_push_api +[prometheus_scrape]: https://charmhub.io/prometheus-k8s/libraries/prometheus_scrape +[prometheus_remote_write]: https://charmhub.io/prometheus-k8s/libraries/prometheus_remote_write +[tracing]: https://charmhub.io/tempo-k8s/libraries/tracing +[charm_tracing]: https://charmhub.io/tempo-k8s/libraries/charm_tracing +[grafana_dashboard]: https://charmhub.io/grafana-k8s/libraries/grafana_dashboard +[cos-configuration]: https://charmhub.io/cos-configuration-k8s +[cos_agent]: https://charmhub.io/grafana-agent/libraries/cos_agent +[cos-proxy]: https://charmhub.io/cos-proxy +[grafana-agent snap]: https://snapcraft.io/grafana-agent +[grafana-agent charm]: https://charmhub.io/grafana-agent-k8s +[scrape-target]: https://charmhub.io/prometheus-scrape-target-k8s + [LogProxyConsumer]: https://charmhub.io/loki-k8s/libraries/loki_push_api + +## External links +- [Monitoring Agents Comparative Study](https://wiki.anuket.io/display/HOME/Monitoring+Agents+Comparative+Study) +- [How to integrate COS-Lite with non-juju workloads](/t/12005) + +------------------------- + +ppasotti | 2024-08-15 08:04:38 UTC | #2 + +@sed-i should we be adding grafana-agent to 'tracing/non-juju workloads' now that we have merged support for it? Also we could be link to https://discourse.charmhub.io/t/how-to-integrate-cos-lite-with-uncharmed-applications/12005 for more context? + +------------------------- + +sed-i | 2024-08-15 15:17:03 UTC | #3 + +Yes to both questions :slight_smile: + +I also think the table is not very readable with discourse's width limit. +Would you like to try splitting this page into sections? From first glance it seems that "Logs", "Metrics", ... could be the new section titles, but perhaps you discover something different. + +------------------------- + +ppasotti | 2024-08-16 13:35:43 UTC | #4 + +will give it a shot next week if I have time. For now adding gagent and the link + +------------------------- + diff --git a/tmp/t/12435.md b/tmp/t/12435.md new file mode 100644 index 000000000..6cbcedbd5 --- /dev/null +++ b/tmp/t/12435.md @@ -0,0 +1,602 @@ +tmihoc | 2024-09-24 15:29:30 UTC | #1 + +In this tutorial you will learn all the basic things that you need to know to start writing a [machine charm](https://juju.is/docs/juju/charmed-operator) for [Juju](https://juju.is/docs/juju). + +------ +**What you'll need:** +- A workstation, e.g., a laptop, with amd64 architecture and which has sufficient resources to launch a virtual machine with 4 CPUs, 8 GB RAM, and 50 GB disk space +- Familiarity with Linux +- Familiarity with [Juju](https://juju.is/docs/juju) +- Familiarity with object-oriented programming in Python + +**What you'll do:** + +- [Study your application](#heading--study-your-application) +- [Set up your development environment](#heading--set-up-your-development-environment) + +1. [Enable `juju deploy microsample-vm`](#heading--enable-juju-deploy-microsample-vm) +1. [Enable `juju deploy microsample-vm --config channel=`](#heading--enable-juju-deploy-microsample-vm---config-channelchannel) +1. [Enable `juju status` with `App Version`](#heading--enable-juju-status-with-app-version) + +- [Clean up: Destroy your test environment](#heading--clean-up-destroy-your-test-environment) +- [Next steps](#heading--next-steps) + +--------- + +[note type=positive status="At any point, to give feedback or ask for help"] +Don't hesitate to get in touch on [Matrix](https://matrix.to/#/#charmhub-charmdev:ubuntu.com) or [Discourse](https://discourse.charmhub.io/). +[/note] + +

Study your application

+ +In this tutorial we will be writing a charm for Microsample (`microsample`) -- a small educational application that delivers a Flask microservice. + +The application has been packaged and published as a snap ([https://snapcraft.io/microsample](https://snapcraft.io/microsample)). We will write our charm such that `juju deploy` will install it from this snap. This will make workload installation straightforward and upgrades automatic (as they will happen automatically through `snapd`). + +The application snap has been released into multiple channels -- `edge`, `beta`, `candidate`, and `stable`. We will write our charm such that a user can choose the channel they prefer by running `juju deploy microsample channel=`. + +The application has other features that we can exploit, but for now this is enough to get us started with a simple charm. + +

Set up your development environment

+ +> See [Set up your development environment automatically](/t/4450#heading--set-up-your-development-environment-automatically) for instructions on how to set up your development environment so that it's ready for you to test-deploy your charm. At the charm directory step, call it `microsample-vm`. At the cloud step, choose LXD. + +[note type=information] +- Going forward: + - Use your host machine (on Linux, `cd ~/microsample-vm`) to create and edit your charm files. This will allow you to use your favorite local editor. + - Use the Multipass VM shell (on Linux, `ubuntu@charm-dev:~$ cd ~/microsample-vm`) to run Charmcraft and Juju commands. + + +- At any point: + - To exit the shell, press `mod key + C` or type `exit`. + - To stop the VM after exiting the VM shell, run `multipass stop charm-dev`. + - To restart the VM and re-open a shell into it, type `multipass shell charm-dev`. +[/note] + + +

Enable `juju deploy microsample-vm`

+ + +Let's charm our `microsample` application into a `microsample-vm` charm such that a user can successfully install it on any machine cloud simply by running `juju deploy microsample-vm`! + +In your Multipass VM shell, enter your charm directory, run `charmcraft init --profile machine` to initialise the file tree structure for your machine charm, and inspect the result. Sample session: + +```text +# Enter your charm directory: +ubuntu@charm-dev:~$ cd microsample-vm/ + +# Initialise the charm tree structure: +ubuntu@charm-dev:~/microsample-vm$ charmcraft init --profile machine +Charmed operator package file and directory tree initialised. + +Now edit the following package files to provide fundamental charm metadata +and other information: + +charmcraft.yaml +src/charm.py +README.md + +# Inspect the result: +ubuntu@charm-dev:~/microsample-vm$ ls -R +.: +CONTRIBUTING.md README.md pyproject.toml src tox.ini +LICENSE charmcraft.yaml requirements.txt tests + +./src: +charm.py + +./tests: +integration unit + +./tests/integration: +test_charm.py + +./tests/unit: +test_charm.py + +``` + +> See more: [How to set up a charm project](/t/5547), [List of files in a charm project](/t/4454) + +In your local editor, open the `charmcraft.yaml` file and customise its contents as below (you only have to edit the `title`, `summary`, and `description`): + +```text +# (Required) +name: microsample-vm + +# (Required) +type: charm + +# (Recommended) +title: Microsample VM Charm + +# (Required) +summary: A charm that deploys the microsample snap and allows for a configuration of the snap channel via juju config. + +# (Required) +description: | + A machine charm for the Microsample application, built on top of the `microsample` snap. + + The charm allows you to deploy the application via `juju deploy`. + It also defines a channel config that allows you to choose which snap channel to install from during deployment. + + This charm makes it easy to deploy the Microsample application on any machine cloud. + + The primary value of this charm is educational -- beginner machine charms can study it to learn how to build a machine charm. + +# (Required for 'charm' type) +bases: + - build-on: + - name: ubuntu + channel: "22.04" + run-on: + - name: ubuntu + channel: "22.04" + +``` + +> See more: [File `charmcraft.yaml`](/t/7132) + +Now open the `src/charm.py` file and update it as below (you'll have to add an import statement for `os` and an observer and handler for the `install` event -- in the definition of which you'll be using `os` and `ops`). + +```text +#!/usr/bin/env python3 +import os +import logging +import ops + +logger = logging.getLogger(__name__) + +class MicrosampleVmCharm(ops.CharmBase): + + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.start, self._on_start) + self.framework.observe(self.on.install, self._on_install) + + def _on_start(self, event: ops.StartEvent): + """Handle start event.""" + self.unit.status = ops.ActiveStatus() + + def _on_install(self, event: ops.InstallEvent): + """Handle install event.""" + self.unit.status = ops.MaintenanceStatus("Installing microsample snap") + os.system(f"snap install microsample --channel edge") + self.unit.status = ops.ActiveStatus("Ready") + + +if __name__ == "__main__": # pragma: nocover + ops.main(MicrosampleVmCharm) # type: ignore +``` + +> See more: [File `src/charm.py`](/t/7150), [Ops](/t/5527), [Event `start`](/t/6482), [Event `install`](/t/6469) + +Next, in your Multipass VM shell, inside your project directory, run `charmcraft pack` to pack the charm. It may take a few minutes the first time around but, when it's done, your charm project should contain a `.charm` file. Sample session: + + +```text +# Pack the charm into a '.charm' file: +ubuntu@charm-dev:~/microsample-vm$ charmcraft pack +Created 'microsample-vm_ubuntu-22.04-amd64.charm'. +Charms packed: + microsample-vm_ubuntu-22.04-amd64.charm + +# Inspect the results -- your charm's root directory should contain a .charm file: +ubuntu@charm-dev:~/microsample-vm$ ls +CONTRIBUTING.md charmcraft.yaml requirements.txt tox.ini +LICENSE microsample-vm_ubuntu-22.04-amd64.charm src +README.md pyproject.toml tests +``` + +> See more: [How to pack a charm](/t/5548) + +Now, open a new shell into your Multipass VM and use it to configure the Juju log verbosity levels and to start a live debug session: + +```text +# Set your logging verbosity level to `DEBUG`: +ubuntu@charm-dev:~$ juju model-config logging-config="=WARNING;unit=DEBUG" + +# Start a live debug session: +ubuntu@charm-dev:~$ juju debug-log +``` + +In your old VM shell, use Juju to deploy your charm. If all has gone well, you should see your App and Unit -- Workload status show as `active`: + +```text +# Deploy the Microsample VM charm as the 'microsample' application: +ubuntu@charm-dev:~/microsample-vm$ juju deploy ./microsample-vm_ubuntu-22.04-amd64.charm microsample +Located local charm "microsample-vm", revision 0 +Deploying "microsample" from local charm "microsample-vm", revision 0 on ubuntu@22.04/stable + +# Check the deployment status +# (use --watch 1s to update it automatically at 1s intervals): +ubuntu@charm-dev:~/microsample-vm$ juju status +Model Controller Cloud/Region Version SLA Timestamp +welcome-lxd lxd localhost/localhost 3.1.6 unsupported 12:49:26+01:00 + +App Version Status Scale Charm Channel Rev Exposed Message +microsample active 1 microsample-vm 0 no + +Unit Workload Agent Machine Public address Ports Message +microsample/0* active idle 1 10.122.219.101 + +Machine State Address Inst id Base AZ Message +1 started 10.122.219.101 juju-f25b73-1 ubuntu@22.04 Running + + +``` + +Finally, test that the service works by executing `curl` on your application unit: + +```text +ubuntu@charm-dev:~/microsample-vm$ juju exec --unit microsample/0 -- "curl -s http://localhost:8080" +Online +``` + +[note type=information status="If your deployment enters an `error` state"] +1. Fix the code in `src/charm.py`. +2. Rebuild the charm: `charmcraft pack` +3. Refresh the application from the repacked charm: `juju refresh microsample --path=./microsample-vm_ubuntu-22.04-amd64.charm --force-units` +4. Let the model know the issue is resolved (fixed): `juju resolved microsample/0`. +[/note] + + + + + +[note type=information status="In case you were wondering"] +The template content from `charmcraft init` was sufficient for the charm to pack and deploy successfully. However, our goal here was to make it run successfully, that is, to actually install the `microsample` application on our LXD cloud. With the edits above, this goal has been achieved. +[/note] + + +

Enable `juju deploy microsample-vm --config channel=`

+ +Let's now evolve our charm so that a user can successfully choose which version of `microsample` they want installed by running `juju config microsample-vm channel=`! + +In your local editor, in your `charmcraft.yaml` file, define the configuration option as below: + +```text +config: + options: + channel: + description: | + Channel for the microsample snap. + default: "edge" + type: string +``` + +> See more: [File `charmcraft.yaml` > Key `config`](/t/7132#heading--config) + +Then, in the `src/charm.py` file, update the `_on_install` function to make use of the new configuration option, as below: + +```text +def _on_install(self, event: ops.ConfigChangedEvent): + """Handle install event.""" + self.unit.status = ops.MaintenanceStatus("Installing microsample snap") + channel = self.config.get('channel') + if channel in ['beta', 'edge', 'candidate', 'stable']: + os.system(f"snap install microsample --{channel}") + self.unit.status = ops.ActiveStatus("Ready") + else: + self.unit.status = ops.BlockedStatus("Invalid channel configured.") +``` + +Now, in your Multipass VM shell, inside your project directory, pack the charm, refresh it in the Juju model, and inspect the results: + +```text + +# Pack the charm: +ubuntu@charm-dev:~/microsample-vm$ charmcraft pack +Created 'microsample-vm_ubuntu-22.04-amd64.charm'. +Charms packed: + microsample-vm_ubuntu-22.04-amd64.charm + +# Refresh the application from the repacked charm: +ubuntu@charm-dev:~/microsample-vm$ juju refresh microsample --path=./microsample-vm_ubuntu-22.04-amd64.charm +Added local charm "microsample-vm", revision 1, to the model + +# Verify that the new configuration option is available: +ubuntu@charm-dev:~/microsample-vm$ juju config microsample +application: microsample +application-config: + trust: + default: false + description: Does this application have access to trusted credentials + source: default + type: bool + value: false +charm: microsample-vm +settings: + channel: + default: edge + description: | + Channel for the microsample snap. + source: default + type: string + value: edge + +``` + +Back to the `src/charm.py` file, in the `__init__` function of your charm, observe the `config-changed` event and pair it with an event handler: + +```text +self.framework.observe(self.on.config_changed, self._on_config_changed) +``` +> See more: [Event `config-changed`](/t/6465) + + +Next, in the body of the charm definition, define the event handler, as below: + +```text +def _on_config_changed(self, event: ops.ConfigChangedEvent): + channel = self.config.get('channel') + if channel in ['beta', 'edge', 'candidate', 'stable']: + os.system(f"snap refresh microsample --{channel}") + self.unit.status = ops.ActiveStatus("Ready at '%s'" % channel) + else: + self.unit.status = ops.BlockedStatus("Invalid channel configured.") +``` + +Now, in your Multipass VM shell, inside your project directory, pack the charm, refresh it in the Juju model, and inspect the results: + +```text +# Pack the charm: +ubuntu@charm-dev:~/microsample-vm$ charmcraft pack +Created 'microsample-vm_ubuntu-22.04-amd64.charm'. +Charms packed: + microsample-vm_ubuntu-22.04-amd64.charm + +# Refresh the application: +ubuntu@charm-dev:~/microsample-vm$ juju refresh microsample --path=./microsample-vm_ubuntu-22.04-amd64.charm +Added local charm "microsample-vm", revision 2, to the model + +# Change the 'channel' config to 'beta': +ubuntu@charm-dev:~/microsample-vm$ juju config microsample channel=beta + +# Inspect the Message column +# ('Ready at beta' is what we expect to see if the snap channel has been changed to 'beta'): +ubuntu@charm-dev:~/microsample-vm$ juju status +Model Controller Cloud/Region Version SLA Timestamp +welcome-lxd lxd localhost/localhost 3.1.6 unsupported 13:54:53+01:00 + +App Version Status Scale Charm Channel Rev Exposed Message +microsample active 1 microsample-vm 2 no Ready at 'beta' + +Unit Workload Agent Machine Public address Ports Message +microsample/0* active idle 1 10.122.219.101 Ready at 'beta' + +Machine State Address Inst id Base AZ Message +1 started 10.122.219.101 juju-f25b73-1 ubuntu@22.04 Running +``` + +Congratulations, your charm users can now deploy the application from a specific channel! + +

Enable `juju status` with `App Version`

+ +Let's evolve our charm so that a user can see which version of the application has been installed simply by running `juju status`! + +In your local editor, update the `requirements.txt` file as below (you'll have to add the `requests` and `requests-unixsocket` lines): + +```text +ops ~= 2.5 +requests==2.28.1 +requests-unixsocket==0.3.0 +``` + +> See more: [File `requirements.txt`](/t/7148), [PyPI > Library `requests`](https://pypi.org/project/requests/), [PyPI > Library `requests-unixsocket`](https://pypi.org/project/requests-unixsocket/) + +Then, in your `src/charm.py` file, import the `requests_unixsocket` package, update the `_on_config_changed` function to set the workload version to the output of a function `_getWorkloadVersion`, and define the function to retrieve the Microsample workload version from the `snapd` API via a Unix socket, as below: + +```text +#!/usr/bin/env python3 +# Copyright 2023 Ubuntu +# See LICENSE file for licensing details. + +"""Charm the application.""" + +import os +import logging +import ops +import requests_unixsocket + +logger = logging.getLogger(__name__) + + +class MicrosampleVmCharm(ops.CharmBase): + """Charm the application.""" + + def __init__(self, *args): + super().__init__(*args) + self.framework.observe(self.on.start, self._on_start) + self.framework.observe(self.on.install, self._on_install) + self.framework.observe(self.on.config_changed, self._on_config_changed) + + def _on_start(self, event: ops.StartEvent): + """Handle start event.""" + self.unit.status = ops.ActiveStatus() + + def _on_install(self, event: ops.InstallEvent): + """Handle install event.""" + self.unit.status = ops.MaintenanceStatus("Installing microsample snap") + channel = self.config.get('channel') + if channel in ['beta', 'edge', 'candidate', 'stable']: + os.system(f"snap install microsample --{channel}") + self.unit.status = ops.ActiveStatus("Ready") + else: + self.unit.status = ops.BlockedStatus("Invalid channel configured.") + + def _on_config_changed(self, event: ops.ConfigChangedEvent): + channel = self.config.get('channel') + if channel in ['beta', 'edge', 'candidate', 'stable']: + os.system(f"snap refresh microsample --{channel}") + workload_version = self._getWorkloadVersion() + self.unit.set_workload_version(workload_version) + self.unit.status = ops.ActiveStatus("Ready at '%s'" % channel) + else: + self.unit.status = ops.BlockedStatus("Invalid channel configured.") + + def _getWorkloadVersion(self): + """Get the microsample workload version from the snapd API via unix-socket""" + snap_name = "microsample" + snapd_url = f"http+unix://%2Frun%2Fsnapd.socket/v2/snaps/{snap_name}" + session = requests_unixsocket.Session() + # Use the requests library to send a GET request over the Unix domain socket + response = session.get(snapd_url) + # Check if the request was successful + if response.status_code == 200: + data = response.json() + workload_version = data["result"]["version"] + else: + workload_version = "unknown" + print(f"Failed to retrieve Snap apps. Status code: {response.status_code}") + + # Return the workload version + return workload_version + +if __name__ == "__main__": # pragma: nocover + ops.main(MicrosampleVmCharm) # type: ignore +``` + + + +Finally, in your Multipass VM shell, pack the charm, refresh it in Juju, and check the Juju status -- it should now show the version of your workload. + +```text +# Pack the charm: +ubuntu@charm-dev:~/microsample-vm$ charmcraft pack +Created 'microsample-vm_ubuntu-22.04-amd64.charm'. +Charms packed: + microsample-vm_ubuntu-22.04-amd64.charm + +# Refresh the application: +ubuntu@charm-dev:~/microsample-vm$ juju refresh microsample --path=./microsample-vm_ubuntu-22.04-amd64.charm +Added local charm "microsample-vm", revision 3, to the model + +# Verify that the App Version now shows the version: +ubuntu@charm-dev:~/microsample-vm$ juju status +Model Controller Cloud/Region Version SLA Timestamp +welcome-lxd lxd localhost/localhost 3.1.6 unsupported 14:04:39+01:00 + +App Version Status Scale Charm Channel Rev Exposed Message +microsample 0+git.49ff7aa active 1 microsample-vm 3 no Ready at 'beta' + +Unit Workload Agent Machine Public address Ports Message +microsample/0* active idle 1 10.122.219.101 Ready at 'beta' + +Machine State Address Inst id Base AZ Message +1 started 10.122.219.101 juju-f25b73-1 ubuntu@22.04 Running + + +``` + +Congratulations, your charm user can view the version of the workload deployed from your charm! + +

Clean up: Destroy your test environment

+ +> See [Set up your development environment automatically > Clean up](/t/4450#heading--automatic-clean-up) + +

Next steps

+ +By the end of this tutorial you will have built a machine charm and evolved it in a number of typical ways. But there is a lot more to explore: + +| If you are wondering...| visit...| +|--|--| +| "How do I...?" | [SDK How-to docs](/t/5521) | +| "What is...?" | [SDK Reference docs](/t/5522) | +| "Why...?", "So what?" | [SDK Explanation docs](/t/5523) | + + +--- + +
+ +>**Contributors:** @acsgn, @bittner, @erik-lonroth, @mcjaeger, @tmihoc + +------------------------- + +bittner | 2024-04-24 07:21:48 UTC | #2 + +[quote="tmihoc, post:1, topic:12435"] +Don’t hesitate to get in touch on [Mattermost](https://chat.charmhub.io/charmhub/channels/juju) or [Discourse](https://discourse.charmhub.io/). +[/quote] + +The "Mattermost" link needs to be updated to [Matrix](https://matrix.to/#/#charmhub:ubuntu.com), I believe. (Looks like I don't have permission to make that change myself, directly.) + +------------------------- + +tmihoc | 2024-04-24 07:43:53 UTC | #3 + +Fixed, thanks! PS Also added you to the list of contributors on the bottom of the doc. + +------------------------- + +merkata | 2024-06-12 13:56:24 UTC | #4 + +That's a great intro doc! :clap: + +I'm looking at the `os.system()` calls and wonder whether we could/shouldn't use the [operator-libs-linux](https://charmhub.io/operator-libs-linux) library? + +You would fetch the lib + +```bash +charmcraft fetch-lib charms.operator_libs_linux.v2.snap +``` + +Import it + +```python +from charms.operator_libs_linux.v2 import snap +``` + +Use like + +```python + try: + cache = snap.SnapCache() + charmed_microsample = cache["microsample"] + charmed_microsample.restart(reload=True) + except snap.SnapError as e: + error_msg = ( + f"An exception occurred when reloading microsample. Reason: {e}" + ) + logger.error(error_msg) + raise ReloadError(error_msg) from e +``` + +I believe you can get the snap revision as well and ditch the requests dependency as well. + +------------------------- + +tony-meyer | 2024-06-30 21:52:43 UTC | #5 + +[quote="merkata, post:4, topic:12435"] +I’m looking at the `os.system()` calls and wonder whether we could/shouldn’t use the [operator-libs-linux](https://charmhub.io/operator-libs-linux) library? +[/quote] + +We should definitely replace `os.system`, with `subprocess` if nothing else. + +Using the operator-libs-linux library is indeed what I'd recommend for a real charm. The only complication here is that you'll need to introduce charm libs (including `charmcraft fetch-lib` and the wacky way that `lib` gets added to PYTHONPATH) basically as soon as you're writing charm code. + +Overall, I think it's still worth doing. + +------------------------- + diff --git a/tmp/t/12562.md b/tmp/t/12562.md new file mode 100644 index 000000000..b911db2e7 --- /dev/null +++ b/tmp/t/12562.md @@ -0,0 +1,78 @@ +ppasotti | 2023-11-16 11:24:40 UTC | #1 + +> [Event](/t/6361) > [List of events > Ops events](/t/6657#heading--ops-events) > `collect-app-status` and `collect-unit-status` +> +> Source: [`CollectStatusEvent`](https://ops.readthedocs.io/en/latest/#ops.CollectStatusEvent) + +The `collect-app-status` and `collect-unit-status` events are produced not by Juju but Ops. + +The events are emitted on the charm, starting from `ops 2.7`, before the framework exits. The goal is to offer the charm a chance to uniformly set the application or unit status based on its internal state after processing the Juju event that triggered this execution. + +**Contents:** +- [Emission sequence](#heading--emission-sequence) +- [Observing these events in Ops](#heading--observing-these-events-in-ops) + + +

Emission sequence

+ +The `collect-app-status` and `collect-unit-status` events are fired once by the framework *after* any Juju event is emitted, and before any other framework event is emitted. The `-app-status` event is for setting application status, and the `-unit-status` event for setting unit status. + +For example, if the unit is processing a `config-changed` event, the charm will see: + +> `config-changed` -> **`collect-unit-status`** -> `pre-commit` -> `commit` + + +

Observing these events in Ops

+ +To observe the `collect-unit-status` event: + +```python +# in MyCharm.__init__ +self.framework.observe(self.on.collect_unit_status, self._on_collect_unit_status) +``` + +This will observe the event uniformly across the units of the charm. + + +To observe the `collect-app-status` event: + +```python +# in MyCharm.__init__ +self.framework.observe(self.on.collect_app_status, self._on_collect_app_status) +``` + +This will ensure that only the leader unit processes it, and the result becomes the overall application status. + +A [`CollectStatusEvent`](https://ops.readthedocs.io/en/latest/#ops.CollectStatusEvent) instance does not expose any specific attributes but exposes an `add_status` method to automatically manage statuses with different priorities. Read the [API reference docs](https://ops.readthedocs.io/en/latest/#ops.CollectStatusEvent) for more information and examples. + +------------------------- + +ppasotti | 2023-11-20 10:02:11 UTC | #2 + +@benhoyt @tony-meyer I see a challenge in documenting these events: there's a clear discrepancy with how these work and how all other events work. + +All other events in `ops` follow the same pattern: you observe the event in the charm's `__init__`, and you can be assured that this unit will be notified of that event, no matter what. +If you only want the leader unit to process the event (e.g. it's only interesting for the leader) you have one choice: + +```python +def _on_my_event(self, e): + if self.unit.is_leader(): + self._process(e) + return +``` + +(or you can, in `__init__`, do: `if self.unit.is_leader(): framework.observe(...)` but that's ugly.) + +In the case of collect-status, the event does that distinction for you. +Listen to collect-app-status: that method will only be called if this unit is leader. + +Can you elaborate on the rationale behind this choice? And maybe some thoughts on how to make this behaviour clear in the docs, at the moment I feel it might be hard to take in. + +------------------------- + +benhoyt | 2023-11-20 20:45:17 UTC | #3 + +It's a good point. The rationale is simply that you can only set application status from the leader unit, so it only makes sense for the framework to collect app statuses on the leader. I don't think it's particularly hard to document, see for example https://ops.readthedocs.io/en/latest/#ops.CollectStatusEvent + +------------------------- + diff --git a/tmp/t/12588.md b/tmp/t/12588.md new file mode 100644 index 000000000..95a9d49d9 --- /dev/null +++ b/tmp/t/12588.md @@ -0,0 +1,75 @@ +0x12b | 2023-11-17 12:29:05 UTC | #1 + +When deploying the Grafana Agent or Prometheus charms in large environments, you may sometimes bump into an issue where the large amount of scrape targets leads to the process hitting the max open files count, as set by ulimit. + +This issue can be identified by looking in your Grafana Agent logs, or Prometheus Scrape Targets in the UI, for the following kind of message: + +```plain +Get "http://10.0.0.1:9275/metrics": dial tcp 10.0.0.1:9275: socket: too many open files + +``` + +To resolve this, we need to increase the max open file limit of the Kubernetes deployment itself. For Microk8s, this would be done by increasing the limits in `/var/snap/microk8s/current/args/containerd-env`. + +### 1. Juju SSH into the machine + +``` +$ juju ssh uk8s/1 +``` +Substitute `uk8s/1` with the name of your MicroK8s unit. If you have more than one unit, you will need to repeat this for each of them. + +### 2. Open the `containerd-env` + +You can use whatever editor you prefer for this. In this how-to, we'll use `vim`. + +``` +$ vim /var/snap/microk8s/current/args/containerd-env +``` + +### 3. Increase the `ulimit`s + +```diff + +# Attempt to change the maximum number of open file descriptors +# this get inherited to the running containers +# +- ulimit -n 1024 || true ++ ulimit -n 65536 || true + +# Attempt to change the maximum locked memory limit +# this get inherited to the running containers +# +- ulimit -l 1024 || true ++ ulimit -l 16384 || true +``` + +### 4. Restart the microk8s machine + +Restart the machine the microk8s unit is deployed on and then wait for it to come back up. + +``` +$ sudo reboot +``` + +### 5. Validate + +Validate that the change made it through and had the desired effect once the machine is back up and running. + +```plain +$ juju ssh uk8s/1 cat /var/snap/microk8s/current/args/containerd-env + +[...] + +# Attempt to change the maximum number of open file descriptors +# this get inherited to the running containers +# +ulimit -n 65536 || true + +# Attempt to change the maximum locked memory limit +# this get inherited to the running containers +# +ulimit -l 16384 || true +``` + +------------------------- + diff --git a/tmp/t/12659.md b/tmp/t/12659.md new file mode 100644 index 000000000..6233dee5d --- /dev/null +++ b/tmp/t/12659.md @@ -0,0 +1,72 @@ +ppasotti | 2023-11-30 14:51:47 UTC | #1 + +> Also see: +> - [Interface tests](/t/12691) +> - [How to register an interface](/t/12689) +> - [How to write interface tests](/t/12690) + +[`charm-relation-interfaces`](https://github.com/canonical/charm-relation-interfaces) is a repository containing specifications, databag schemas and interface tests for Juju relation interfaces. In other words, it is the source of truth for data and behavior of providers and requirers of integrations. + +The purpose of this project is to provide uniformity in the landscape of all possible integrations and promote charm interoperability. + +Juju interfaces are untyped, which means that for juju to think two charms can be integrated all it looks at is whether the interface names of the two endpoints you're trying to connect are the same string. But it might be that the two charms have different, incompatible implementations of two different integrations that happen to have the same name. + +In order to prevent two separate charms from rolling their own integration with the same name, and prevent a sprawl of many subtly different interfaces with similar semantics and similar purposes, we introduced `charm-relation-interfaces`. + +## Using `charm-relation-interfaces` + +If you have a charm that provides a service, you should search `charm-relation-interfaces` (or directly charmhub in the future) and see if it exists already, or perhaps a similar one exists that lacks the semantics you need and can be extended to support it. + +Conversely, if the charm you are developing needs some service (a database, an ingress url, an authentication endpoint...) you should search `charm-relation-interfaces` to see if there is an interface you can use, and to find existing charms that provide it. + +There are three actors in play: + +* **the owner of the specification** of the interface, which also owns the tests that can be used to verify "does charm X 'really' support this interface?". This is the `charm-relation-interfaces` repo. +* **the owner of the implementation** of an interface. In practice, this often is the charm that owns the charm library with the reference implementation for an interface. +* **the interface user**: a charm that wants to use the interface (either as requirer or as provider). + +The interface user needs the implementation (typically, the provider also happens to be the owner and so it already has the implementation). This is addressed by `charmcraft fetch-lib`. + +The owner of the implementation needs the specification, to help check that the implementation is in fact compliant. + +## Repository structure + +For each interface, the charm-relation-interfaces repository hosts: +- the **specification**: a semi-formal definition of what the semantics of the interface is, and what its implementations are expected to do in terms of both the provider and the requirer +- a list of **reference charms**: these are the charms that implement this interface, typically, the owner of the charm library providing the original implementation. +- the **schema**: pydantic models unambiguously defining the accepted unit and application databag contents for provider and requirer. +- the **interface tests**: python tests that can be run to verify that a charm complies with the interface specification. + + +## Charm relation interfaces in Charmhub +In the future, Charmhub will have a searchable collection of integration interfaces. +Charmhub will, for all charms using the interface, verify that they implement it correctly (regardless of whether they use the 'official' implementation or they roll their own) in order to give the charm a happy checkmark on `charmhub.io`. In order to do that it will need to fetch the specification (from `charm-relation-interfaces`) *and* the charm repo, because we can't know what implementation they are using: we need the source code. + + +> Contributors: @ppasotti + +------------------------- + +carlcsaposs | 2023-11-28 11:47:03 UTC | #2 + +Not sure if this is worth mentioning now, but may be worth considering: + +Something I see currently missing from charm-relation-interfaces, at least for the database interfaces, is specification about mutability of the databags. + +For example, most of Data Platform's database charms are written in a way that expects the requirer to write to the databag once and to never change that data. On the provider side, the data can be changed (or sometimes removed) and the requirer is expected to handle that gracefully + +------------------------- + +ppasotti | 2023-11-28 13:03:04 UTC | #3 + +Very true. As it is the schemas are static, we don't fully capture yet the 'multi-stage' or back-and-forth interface chatter. +Can somewhat do that with interface tests, but not with schemas. + +------------------------- + +ppasotti | 2023-11-30 11:39:36 UTC | #4 + + + +------------------------- + diff --git a/tmp/t/12676.md b/tmp/t/12676.md new file mode 100644 index 000000000..ebcc167d8 --- /dev/null +++ b/tmp/t/12676.md @@ -0,0 +1,19 @@ +tmihoc | 2024-06-19 07:59:52 UTC | #1 + +The `dispatch` file in your charm is an executable shell script whose responsibility is to execute the `src/charm.py` file with certain environment variables. + +The file is created automatically by `charmcraft pack` and you can inspect it by unzipping the `.charm` archive (`unzip .charm` ) or by deploying the charm, SSHing into one its units, and inspecting the charm directory in there (e.g., for unit `0`: `ls agents/unit--0/charm`). + +--- +[details=Expand to view contents of a sample dispatch file] +```bash +#!/bin/sh + +JUJU_DISPATCH_PATH="${JUJU_DISPATCH_PATH:-$0}" PYTHONPATH=lib:venv \ + exec ./src/charm.py +``` +[/details] +--- + +------------------------- + diff --git a/tmp/t/12689.md b/tmp/t/12689.md new file mode 100644 index 000000000..69aaa913b --- /dev/null +++ b/tmp/t/12689.md @@ -0,0 +1,222 @@ +ppasotti | 2024-06-26 08:26:50 UTC | #1 + +> Also see: +> - [Interface tests](/t/12691) +> - [How to write interface tests](/t/12690) +> - [How to test interface tests](/t/13062) + +Suppose you have determined that you need to create a new relation interface called `my_fancy_database`. + +Suppose that your interface specification has the following data model: +- the requirer app is supposed to forward a list of tables that it wants to be provisioned by the database provider +- the provider app (the database) at that point will reply with an API endpoint and, for each replica, it will provide a separate secret ID to authenticate the requests + +These are the steps you need to take in order to register it with [`charm-relation-interfaces`](/t/12659). + +--- +[details="Expand to preview some example results"] +- [A bare-minimum example](https://github.com/IronCore864/charm-relation-interfaces/tree/my-fancy-database/interfaces/my_fancy_database/v0) +- [A more realistic example](https://github.com/canonical/charm-relation-interfaces/tree/main/interfaces/ingress/v1): + - As you can see from the [`interface.yaml`](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/ingress/v1/interface.yaml) file, the [`canonical/traefik-k8s-operator` charm](https://github.com/canonical/traefik-k8s-operator) plays the provider role in the interface. + - The schema of this interface is defined in [`schema.py`](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/ingress/v1/schema.py). + - You can find out more information about this interface in the [README](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/ingress/v1/README.md). + +[/details] + +--- + +## 1. Clone (a fork of) [the `charm-relation-interfaces` repo](https://github.com/canonical/charm-relation-interfaces) and set up an interface specification folder + +```bash +git clone https://github.com/canonical/charm-relation-interfaces +cd /path/to/charm-relation-interfaces +``` + +## 2. Make a copy of the template folder +Copy the template folder to a new folder called the same as your interface (with underscores instead of dashes). + +```bash +cp -r ./interfaces/__template__ ./interfaces/my_fancy_database +``` + +At this point you should see this directory structure: + +``` +# tree ./interfaces/my_fancy_database +./interfaces/my_fancy_database +└── v0 + ├── README.md + ├── interface.yaml + ├── interface_tests + └── schema.py +2 directories, 3 files +``` + +## 3. Edit `interface.yaml` + +Add to `interface.yaml` the charm that owns the reference implementation of the `my_fancy_database` interface. Assuming your `my_fancy_database_charm` plays the `provider` role in the interface, your `interface.yaml` will look like this: + +```yaml +# interface.yaml +providers: + - name: my-fancy-database-operator # same as metadata.yaml's .name + url: https://github.com/your-github-slug/my-fancy-database-operator +``` + +## 4. Edit `schema.py` + +Edit `schema.py` to contain: + +```python +# schema.py + +from interface_tester.schema_base import DataBagSchema +from pydantic import BaseModel, AnyHttpUrl, Field, Json +import typing + + +class ProviderUnitData(BaseModel): + secret_id: str = Field( + description="Secret ID for the key you need in order to query this unit.", + title="Query key secret ID", + examples=["secret:12312323112313123213"], + ) + + +class ProviderAppData(BaseModel): + api_endpoint: AnyHttpUrl = Field( + description="URL to the database's endpoint.", + title="Endpoint API address", + examples=["https://example.com/v1/query"], + ) + + +class ProviderSchema(DataBagSchema): + app: ProviderAppData + unit: ProviderUnitData + + +class RequirerAppData(BaseModel): + tables: Json[typing.List[str]] = Field( + description="Tables that the requirer application needs.", + title="Requested tables.", + examples=[["users", "passwords"]], + ) + + +class RequirerSchema(DataBagSchema): + app: RequirerAppData + # we can omit `unit` because the requirer makes no use of the unit databags +``` + +To verify that things work as they should, you can `pip install pytest-interface-tester` and then run `interface_tester discover --include my_fancy_database` from the `charm-relation-interfaces` root. + +You should see: +```yaml +- my_fancy_database: + - v0: + - provider: + - + - schema OK + - charms: + - my_fancy_database_charm (https://github.com/your-github-slug/my-fancy-database-operator) custom_test_setup=no + - requirer: + - + - schema OK + - +``` + +In particular pay attention to `schema`. If it says `NOT OK` then there is something wrong with the pydantic model. + +## 5. Edit `README.md` + +Edit the `README.md` file to contain: +```markdown +# `my_fancy_database` + +## Overview +This relation interface describes the expected behavior between of any charm claiming to be able to interface with a Fancy Database and the Fancy Database itself. +Other Fancy Database-compatible providers can be used interchangeably as well. + +## Usage + +Typically, you can use the implementation of this interface from [this charm library](https://github.com/your_org/my_fancy_database_operator/blob/main/lib/charms/my_fancy_database/v0/fancy.py), although charm developers are free to provide alternative libraries as long as they comply with this interface specification. + +## Direction +The `my_fancy_database` interface implements a provider/requirer pattern. +The requirer is a charm that wishes to act as a Fancy Database Service consumer, and the provider is a charm exposing a Fancy Database (-compatible API). + +/```mermaid +flowchart TD + Requirer -- tables --> Provider + Provider -- endpoint, access_keys --> Requirer +/``` + +## Behavior + +The requirer and the provider must adhere to a certain set of criteria to be considered compatible with the interface. + +### Requirer + +- Is expected to publish a list of tables in the application databag + + +### Provide + +- Is expected to publish an endpoint URL in the application databag +- Is expected to create and grant a Juju Secret containing the access key for each shard and publish its secret ID in the unit databags. + +## Relation Data + +See the [\[Pydantic Schema\]](./schema.py) + + +### Requirer + +The requirer publishes a list of tables to be created, as a json-encoded list of strings. + +#### Example +\```yaml +application_data: { + "tables": "['users', 'passwords']" +} +\``` + +### Provider + +The provider publishes an endpoint url and access keys for each shard. + +#### Example +\``` +application_data: { + "api_endpoint": "https://foo.com/query" +}, +units_data : { + "my_fancy_unit/0": { + "secret_id": "secret:12312321321312312332312323" + }, + "my_fancy_unit/1": { + "secret_id": "secret:45646545645645645646545456" + } +} +\``` +``` + +## 6. Add interface tests + +See [How to write interface tests](/t/12690). + +## 7. Open a PR to [the `charm-relation-interfaces` repo](https://github.com/canonical/charm-relation-interfaces) + +Finally, open a pull request to the `charm-relation-interfaces` repo and drive it to completion, addressing any feedback or concerns that the maintainers may have. + +> Contributors: @ironcore864, @ppasotti + +------------------------- + +ppasotti | 2023-11-30 12:31:13 UTC | #2 + + + +------------------------- + diff --git a/tmp/t/12690.md b/tmp/t/12690.md new file mode 100644 index 000000000..30111ed97 --- /dev/null +++ b/tmp/t/12690.md @@ -0,0 +1,269 @@ +ppasotti | 2024-09-06 12:50:20 UTC | #1 + +> Also see: +> - [How to register an interface](/t/12689) +> - [Interface tests](/t/12691) +> - [How to write interface tests](/t/12690) +> - [How to test interface tests](/t/13062) + +Suppose you have an interface specification in [`charm-relation-interfaces`](/t/12659), or you are working on one, and you want to add [interface tests](/t/todo). These are the steps you need to take. + +We will continue from the running example from the previous HowTo on [How to register an interface](/t/12689). +Your starting setup should look like this: + +```bash +$ tree ./interfaces/my_fancy_database +./interfaces/my_fancy_database +└── v0 + ├── interface.yaml + ├── interface_tests + ├── README.md + └── schema.py + +2 directories, 3 files +``` + +# Write the tests + +## Create the test module + +Add a file to the `interface_tests` directory called `test_provider.py`. +> touch ./interfaces/my_fancy_database/interface_tests/test_provider.py + +## Write a test for the 'negative' path + +Write to `test_provider.py` the code below: + +```python +from interface_tester import Tester +from scenario import State, Relation + + +def test_nothing_happens_if_remote_empty(): + # GIVEN that the remote end has not published any tables + t = Tester( + State( + leader=True, + relations=[ + Relation( + endpoint="my-fancy-database", # the name doesn't matter + interface="my_fancy_database", + ) + ], + ) + ) + # WHEN the database charm receives a relation-joined event + state_out = t.run("my-fancy-database-relation-joined") + # THEN no data is published to the (local) databags + t.assert_relation_data_empty() +``` + +This test verifies part of a 'negative' path: it verifies that if the remote end did not (yet) comply with his part of the contract, then our side did not either. + +## Write a test for the 'positive' path + +Append to `test_provider.py` the code below: + +```python +import json + +from interface_tester import Tester +from scenario import State, Relation + + +def test_contract_happy_path(): + # GIVEN that the remote end has requested tables in the right format + tables_json = json.dumps(["users", "passwords"]) + t = Tester( + State( + leader=True, + relations=[ + Relation( + endpoint="my-fancy-database", # the name doesn't matter + interface="my_fancy_database", + remote_app_data={"tables": tables_json}, + ) + ], + ) + ) + # WHEN the database charm receives a relation-changed event + state_out = t.run("my-fancy-database-relation-changed") + # THEN the schema is satisfied (the database charm published all required fields) + t.assert_schema_valid() +``` + +This test verifies that the databags of the 'my-fancy-database' relation are valid according to the pydantic schema you have specified in `schema.py`. + +To check that things work as they should, you can run `interface_tester discover --include my_fancy_database` from the `charm-relation-interfaces` root. + +[note] +Note that the `interface_tester` is installed in the previous how-to guide [How to register an interface](/t/12689). If you haven't done it yet, install it by running: `pip install pytest-interface-tester `. +[/note] + +You should see: + +```yaml +- my_fancy_database: + - v0: + - provider: + - test_contract_happy_path + - test_nothing_happens_if_remote_empty + - schema OK + - charms: + - my_fancy_database_charm (https://github.com/your-github-slug/my-fancy-database-operator) custom_test_setup=no + - requirer: + - + - schema OK + - +``` + +In particular, pay attention to the `provider` field. If it says `` then there is something wrong with your setup, and the collector isn't able to find your test or identify it as a valid test. + +Similarly, you can add tests for requirer in `./interfaces/my_fancy_database/v0/interface_tests/test_requirer.py`. Don't forget to edit the `interface.yaml` file in the "requirers" section to add the name of the charm and the URL. See the "Edit `interface.yaml`" section in the previous how-to guide [How to register an interface](/t/12689#edit-interfaceyaml-3) for more detail on editing `interface.yaml`. [Here](https://github.com/IronCore864/charm-relation-interfaces/tree/my-fancy-database/interfaces/my_fancy_database/v0) is an example of tests for requirers added. + +## Merge in charm-relation-interfaces + +You are ready to merge this files in the charm-relation-interfaces repository. Open a PR and drive it to completion. + +# Prepare the charm + +In order to be testable by charm-relation-interfaces, the charm needs to expose and configure a fixture. + +[note] +This is because the `fancy-database` interface specification is only supported if the charm is well-configured and has leadership, since it will need to publish data to the application databag. +Also, interface tests are Scenario tests and as such they are mock-based: there is no cloud substrate running, no Juju, no real charm unit in the background. So you need to patch out all calls that cannot be mocked by Scenario, as well as provide enough mocks through State so that the charm is 'ready' to support the interface you are testing. +[/note] + +Go to the Fancy Database charm repository root. + +```bash +cd path/to/my-fancy-database-operator +``` + +Create a `conftest.py` file under `tests/interface`: + +> mkdir ./tests/interface +> touch ./tests/interface/conftest.py + +Write in `conftest.py`: + +```python +import pytest +from charm import MyFancyDatabaseCharm +from interface_tester import InterfaceTester +from scenario.state import State + + +@pytest.fixture +def interface_tester(interface_tester: InterfaceTester): + interface_tester.configure( + charm_type=MyFancyDatabaseCharm, + state_template=State( + leader=True, # we need leadership + ), + ) + # this fixture needs to yield (NOT RETURN!) interface_tester again + yield interface_tester +``` + +[note] +This fixture overrides a homonym pytest fixture that comes with `pytest-interface-tester`. +[/note] + + +[note] +You can configure the fixture name, as well as its location, but that needs to happen in the `charm-relation-interfaces` repo. Example: +``` +providers: + - name: my-fancy-database-provider + url: YOUR_REPO_URL + test_setup: + location: tests/interface/conftest.py + identifier: database_tester +``` +[/note] + + +## Verifying the `interface_tester` configuration + +To verify that the fixture is good enough to pass the interface tests, run the `run_matrix.py` script from the `charm-relation-interfaces` repo: + +```bash +cd path/to/charm-relation-interfaces +python run_matrix.py --include my_fancy_database +``` + +If you run this test, unless you have already merged the interface tests PR to `charm-relation-interfaces`, it will fail with some error message telling you that it's failing to collect the tests for the interface, because by default, `pytest-interface-tester` will try to find tests in the `canonical/charm-relation-interfaces` repo's `main` branch. + +To run tests with a branch in your forked repo, run: + +```bash +cd path/to/my-forked/charm-relation-interfaces +python run_matrix.py --include my_fancy_database --repo https://github.com/your-github-slug/charm-relation-interfaces --branch my-fancy-database +``` + +[note] +In the above command, remember to replace `your-github-slug` to your own slug, change the repo name accordingly (if you have renamed the forked repo), and update the `my-fance-database` branch name from the above command to the branch that contains your tests. +[/note] + +Now the tests should be collected and executed. You should get similar output to the following: + +```bash +INFO:root:Running tests for interface: my_fancy_database +INFO:root:Running tests for version: v0 +INFO:root:Running tests for role: provider + +... + ++++ Results +++ +{ + "my_fancy_database": { + "v0": { + "provider": { + "my-fancy-database-operator": true + }, + "requirer": { + "my-fancy-database-operator": true + } + } + } +} +``` + +For reference, [here](https://github.com/IronCore864/my-fancy-database-operator) is an example of a bare minimum `my-fancy-database-operator` charm to make the test pass. In the charm, application relation data and unit relation data are set according to our definition (see the beginning part of the previous how-to guide [How to register an interface](/t/12689)). + +## Troubleshooting and debugging the tests + +### your charm is missing some configurations/mocks + +Solution to this is to add the missing mocks/patches to the `interface_tester` fixture in `conftest.py`. +Essentially, you need to make it so that the charm runtime 'thinks' that everything is normal and ready to process and accept the interface you are testing. +This may mean mocking the presence and connectivity of a container, system calls, substrate API calls, and more. +If you have scenario or unittests in your codebase, you most likely already have all the necessary patches scattered around and it's a matter of collecting them. + +Remember that if you run your tests using `run_matrix.py` locally, in your troubleshooting you need to point `interface.yaml` to the branch where you commited your changes as `run_matrix` fetches the charm repositories in order to run the charms: + +``` +requirers: + - name: my-fancy-database-operator + url: https://my-fancy-database-operator-repo + branch: branch-with-my-conftest-changes +``` +Remember, however, to merge the changes first in the operator repository before merging the pull request to `charm-relation-interfaces`. + +> See more: +> +> [Here](https://github.com/IronCore864/my-fancy-database-operator) is a minimum charm that both provides and requires the `my_fancy_database` interface from this how-to guide and [this](https://github.com/IronCore864/my-fancy-database-operator/blob/main/tests/interface/conftest.py) is an example of the bare minimum of `conftest.py`. See the content of `test_provider.py` [here in a forked repo](https://github.com/IronCore864/charm-relation-interfaces/blob/my-fancy-database/interfaces/my_fancy_database/v0/interface_tests/test_provider.py) and `test_requirer.py` [here](https://github.com/IronCore864/charm-relation-interfaces/blob/my-fancy-database/interfaces/my_fancy_database/v0/interface_tests/test_requirer.py). +> +> For a more realistic reference, refer to the [`test_provider.py`](https://github.com/canonical/charm-relation-interfaces/blob/main/interfaces/ingress/v1/interface_tests/test_provider.py) for the ingress interface defined in the `charm-relation-interfaces` repository, and check out the [`traefik-k8s-operator` charm](https://github.com/canonical/traefik-k8s-operator) for its content of the [`conftest.py`](https://github.com/canonical/traefik-k8s-operator/blob/main/tests/interface/conftest.py) file. + +> Contributors: @ppasotti @ironcore864 @mmkay + +------------------------- + +ppasotti | 2023-11-30 14:59:12 UTC | #2 + + + +------------------------- + diff --git a/tmp/t/12691.md b/tmp/t/12691.md new file mode 100644 index 000000000..61b718fcc --- /dev/null +++ b/tmp/t/12691.md @@ -0,0 +1,45 @@ +ppasotti | 2024-01-30 11:51:43 UTC | #1 + +> Also see: +> - [Interface tests](/t/12691) +> - [How to write interface tests](/t/12690) +> - [How to test interface tests](/t/13062) + +Interface tests are tests that verify the compliance of a charm with an interface specification. +Interface specifications, stored in [charm-relation-interfaces](/t/12659), are contract definitions that mandate how a charm should behave when integrated with another charm over a registered interface. + +Interface tests will allow `charmhub` to validate the integrations of a charm and verify that your charm indeeed supports "the" `ingress` interface and not just an interface called "ingress", which happens to be the same name as "the official `ingress` interface v2" as registered in charm-relation-interfaces (see [here](https://github.com/canonical/charm-relation-interfaces/tree/main/interfaces/ingress/v2)). + +Also, they allow alternative implementations of an interface to validate themselves against the contractual specification stored in charm-relation-interfaces, and they help verify compliance with multiple versions of an interface. + +An interface test is a contract test powered by [`Scenario`](/t/10583) and a pytest plugin called [`pytest-interface-tester`](https://github.com/canonical/pytest-interface-tester). An interface test has the following pattern: +1) **GIVEN** an initial state of the relation over the interface under test +2) **WHEN** a specific relation event fires +3) **THEN** the state of the databags is valid (e.g. it satisfies an expected pydantic schema) + +On top of databag state validity, one can check for more elaborate conditions. + +A typical interface test will look like: + +```python +from interface_tester import Tester + +def test_data_published_on_changed_remote_valid(): + """This test verifies that if the remote end has published valid data and we receive a db-relation-changed event, then the schema is satisfied.""" + # GIVEN that we have a relation over "db" and the remote end has published valid data + relation = Relation(endpoint='db', interface='db', + remote_app_data={'model': '"bar"', 'port': '42', 'name': '"remote"', }, + remote_units_data={0: {'host': '"0.0.0.42"', }}) + t = Tester(State(relations=[relation])) + # WHEN the charm receives a db-relation-changed event + state_out = t.run(relation.changed_event) + # THEN the schema is valid + t.assert_schema_valid() +``` + +This allows us to, independently from what charm we are testing, determine if the behavioural specification of this interface is complied with. + +> Contributors: @ppasotti + +------------------------- + diff --git a/tmp/t/12731.md b/tmp/t/12731.md new file mode 100644 index 000000000..5c31620b6 --- /dev/null +++ b/tmp/t/12731.md @@ -0,0 +1,58 @@ +tmihoc | 2023-12-05 15:49:55 UTC | #1 + +The `pyproject.toml` file in your charm's root directory is a typical Python `pyproject.toml` file. + +> See more: [`pip` | `pyproject.toml`](https://pip.pypa.io/en/stable/reference/build-system/pyproject-toml/) + +This file is generated automatically by `charmcraft init` with the contents below: + +```text +# Testing tools configuration +[tool.coverage.run] +branch = true + +[tool.coverage.report] +show_missing = true + +[tool.pytest.ini_options] +minversion = "6.0" +log_cli_level = "INFO" + +# Formatting tools configuration +[tool.black] +line-length = 99 +target-version = ["py38"] + +# Linting tools configuration +[tool.ruff] +line-length = 99 +select = ["E", "W", "F", "C", "N", "D", "I001"] +extend-ignore = [ + "D203", + "D204", + "D213", + "D215", + "D400", + "D404", + "D406", + "D407", + "D408", + "D409", + "D413", +] +ignore = ["E501", "D107"] +extend-exclude = ["__pycache__", "*.egg_info"] +per-file-ignores = {"tests/*" = ["D100","D101","D102","D103","D104"]} + +[tool.ruff.mccabe] +max-complexity = 10 + +[tool.codespell] +skip = "build,lib,venv,icon.svg,.tox,.git,.mypy_cache,.ruff_cache,.coverage" + +[tool.pyright] +include = ["src/**.py"] +``` + +------------------------- + diff --git a/tmp/t/12732.md b/tmp/t/12732.md new file mode 100644 index 000000000..5f27a8677 --- /dev/null +++ b/tmp/t/12732.md @@ -0,0 +1,99 @@ +tmihoc | 2023-12-05 15:50:26 UTC | #1 + +The `tox.ini` file in your charm’s root directory is a typical Tox configuration file. + +> See more: [Tox | Configuration](https://tox.wiki/en/latest/user_guide.html#configuration) + +This file is generated automatically by `charmcraft init` with the contents below: + +```text + Copyright 2023 Ubuntu +# See LICENSE file for licensing details. + +[tox] +no_package = True +skip_missing_interpreters = True +env_list = format, lint, static, unit +min_version = 4.0.0 + +[vars] +src_path = {tox_root}/src +tests_path = {tox_root}/tests +;lib_path = {tox_root}/lib/charms/operator_name_with_underscores +all_path = {[vars]src_path} {[vars]tests_path} + +[testenv] +set_env = + PYTHONPATH = {tox_root}/lib:{[vars]src_path} + PYTHONBREAKPOINT=pdb.set_trace + PY_COLORS=1 +pass_env = + PYTHONPATH + CHARM_BUILD_DIR + MODEL_SETTINGS + +[testenv:format] +description = Apply coding style standards to code +deps = + black + ruff +commands = + black {[vars]all_path} + ruff --fix {[vars]all_path} + +[testenv:lint] +description = Check code against coding style standards +deps = + black + ruff + codespell +commands = + # if this charm owns a lib, uncomment "lib_path" variable + # and uncomment the following line + # codespell {[vars]lib_path} + codespell {tox_root} + ruff {[vars]all_path} + black --check --diff {[vars]all_path} + +[testenv:unit] +description = Run unit tests +deps = + pytest + coverage[toml] + -r {tox_root}/requirements.txt +commands = + coverage run --source={[vars]src_path} \ + -m pytest \ + --tb native \ + -v \ + -s \ + {posargs} \ + {[vars]tests_path}/unit + coverage report + +[testenv:static] +description = Run static type checks +deps = + pyright + -r {tox_root}/requirements.txt +commands = + pyright {posargs} + +[testenv:integration] +description = Run integration tests +deps = + pytest + juju + pytest-operator + -r {tox_root}/requirements.txt +commands = + pytest -v \ + -s \ + --tb native \ + --log-cli-level=INFO \ + {posargs} \ + {[vars]tests_path}/integration +``` + +------------------------- + diff --git a/tmp/t/12733.md b/tmp/t/12733.md new file mode 100644 index 000000000..56a491429 --- /dev/null +++ b/tmp/t/12733.md @@ -0,0 +1,50 @@ +tmihoc | 2023-12-05 15:50:04 UTC | #1 + +The `tests/integration/test_charm.py` file is the companion to `src/charm.py` for integration testing. + +This file is created automatically by `charmcraft init` and it is pre-populated with standard constructs used by `pytest-operator`, similar to the below: + +```text +#!/usr/bin/env python3 +# Copyright 2023 Ubuntu +# See LICENSE file for licensing details. + +import asyncio +import logging +from pathlib import Path + +import pytest +import yaml +from pytest_operator.plugin import OpsTest + +logger = logging.getLogger(__name__) + +METADATA = yaml.safe_load(Path("./metadata.yaml").read_text()) +APP_NAME = METADATA["name"] + + +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest): + """Build the charm-under-test and deploy it together with related charms. + + Assert on the unit status before any relations/configurations take place. + """ + # Build and deploy charm from local source folder + charm = await ops_test.build_charm(".") + resources = { + "some-container-image": METADATA["resources"]["some-container-image"]["upstream-source"] + } + + # Deploy the charm and wait for active/idle status + await asyncio.gather( + ops_test.model.deploy(charm, resources=resources, application_name=APP_NAME), + ops_test.model.wait_for_idle( + apps=[APP_NAME], status="active", raise_on_blocked=True, timeout=1000 + ), + ) + + +``` + +------------------------- + diff --git a/tmp/t/12734.md b/tmp/t/12734.md new file mode 100644 index 000000000..e1395ef51 --- /dev/null +++ b/tmp/t/12734.md @@ -0,0 +1,454 @@ +javierdelapuente | 2024-04-24 09:02:31 UTC | #1 + +> See also: [Testing > Integration](/t/11125#heading--integration-testing) + +This document shows how to write integration tests for a charm. + +[note type=information] +Integration testing is only one part of a comprehensive testing strategy. See [How to test a charm](/t/4461) for unit testing and [How to write a functional test](/t/10585) for functional tests. +[/note] + +The instructions all use the Juju `python-libjuju` client, either through the `pytest-operator` library or directly. + +> See more: [`python-libjuju`](https://github.com/charmed-kubernetes/pytest-operator), [`pytest-operator`](/t/12818) + + + +**Contents:** +1. [Prepare your environment](#heading--prepare-test-environment) +1. [Prepare the `tox.ini` configuration file](#heading--prepare-the-tox-ini-configuration-file) +1. [Create a test file](#heading--create-test-file) +1. [Build your tests](#heading--build-tests) + - [Test build and deploy](#heading--test-build-and-deploy) + - [Deploy your charm with resources](#heading--deploy-your-charm-with-resources) + - [Test a relation](#heading--test-a-relation) + - [Test a configuration](#heading--test-a-configuration) + - [Test an action](#heading--test-an-action) + - [Interact with the workload](#heading--interact-with-the-workload) + - [Run a subprocess command within Juju context](#heading--run-a-subprocess-command-within-juju-context) + - [Use several models](#heading--use-several-models) + - [Deploy a bundle](#heading--deploy-a-bundle) + - [Speed up `update_status` with `fast_forward`](#heading--fast-forward) +1. [Run your tests](#heading--run-tests) +1. [Generate crash dumps](#heading--crash-dumps) + +

Prepare your environment

+ +In order to run integrations tests you will need to have your environment set up with `tox` installed. + +> See more: [How to set up your development environment](/t/4450) + +

Prepare the `tox.ini` configuration file

+ +Check that the next information is in your `tox.ini` file. If you initialised the charm with `charmcraft init` it should already be there. + +``` +[testenv:integration] +description = Run integration tests +deps = + pytest + juju + pytest-operator + -r {tox_root}/requirements.txt +commands = + pytest -v \ + -s \ + --tb native \ + --log-cli-level=INFO \ + {posargs} \ + {[vars]tests_path}/integration +``` + +

Create a test file

+ +By convention, integration tests are kept in the charm’s source tree, in a directory called `tests/integration`. + +If you initialised the charm with `charmcraft init`, your charm directory should already contain a `tests/integration/test_charm.py` file. Otherwise, create this directory structure manually (the test file can be called whatever you wish) and, inside the `.py` file, import `pytest` and, from the `pytest_operator.plugin`, the `OpsTest` class provided by the `ops_test` fixture: + +``` +import pytest +from pytest_operator.plugin import OpsTest +``` + +The `ops_test` fixture is your entry point to the `pytest-operator` library, and the preferred way of interacting with Juju in integration tests. This fixture will create a model for each test file -- if you write two tests that should not share a model, make sure to place them in different files. + +

Build your tests

+ +[note type=positive status="Pro tip"] +Use `pytest` custom markers to toggle which types of tests are being run so you can skip the destructive parts and focus on the business logic tests. See more: [Discourse | Pasotti: Classify tests with pytest custom markers for quick integration testing iterations](https://discourse.charmhub.io/t/classify-tests-with-pytest-custom-markers-for-quick-integration-testing-iterations/14006). +[/note] + +

Test build and deploy

+ +To build and deploy the current charm, in your integration test file, add the function below: +``` +@pytest.mark.skip_if_deployed +@pytest.mark.abort_on_fail +async def test_build_and_deploy(ops_test: OpsTest): + charm = await ops_test.build_charm(".") + app = await ops_test.model.deploy(charm) + + await ops_test.model.wait_for_idle(status="active", timeout=60) +``` + +Tests run sequentially in the order they are written in the file. It can be useful to put tests that build and deploy applications in the top of the file as the applications can be used by other tests. For that reason, adding extra checks or `asserts` in this test is not recommended. + +The decorator `@pytest.mark.abort_on_fail` abort all next tests if something goes wrong. With the decorator `@pytest.mark.skip_if_deployed` you can skip that test if a `--model` is passed as a command line parameter (see [Run your tests](#heading--run-tests) for more information). + +`ops_test.build_charm` builds the charm with charmcraft. `ops_test.model` is an instance of `python-libjuju` 's [Model](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.model.html#juju.model.Model) class that reference the active model tracked by `pytest-operator` for the current module. + +As an alternative to `wait_for_idle`, you can explicitly block until the application status is `active` or `error` and then assert that it is `active`. + +``` + await ops_test.model.block_until(lambda: app.status in ("active", "error"), timeout=60,) + assert app.status, "active" +``` + +> Example implementations: [charm-coredns](https://github.com/charmed-kubernetes/charm-coredns/blob/b1d83b6a31200924fefcd288336bc1f9323c6a72/tests/integration/test_integration.py#L21), [charm-calico](https://github.com/charmed-kubernetes/charm-calico/blob/e1dfdda92fefdba90b7b7e5247fbc861c34ad146/tests/integration/test_calico_integration.py#L18) + +> See more: +> - [`pytest-operator` | `ops_test.build_charm`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L1020) +> - [`python-libjuju` | `model.deploy `](https://github.com/juju/python-libjuju/blob/2581b0ced1df6201c6b7fd8cc0b20dcfa9d97c51/juju/model.py#L1658) + +

Deploy your charm with resources

+ +> See also: [Resource](/t/5609) + +A charm can require `file` or `oci-image` `resources` to work, that can be provided to `ops_test.model.deploy`. In Charmhub, resources have revision numbers. For file resources already stored in Charmhub, you can use `ops_test.download_resources`: + +``` +async def test_build_and_deploy(ops_test: OpsTest): + charm = await ops_test.build_charm(".") + arch_resources = ops_test.arch_specific_resources(charm) + resources = await ops_test.download_resources(charm, resources=arch_resources) + app = await ops_test.model.deploy(charm, resources=resources) + await ops_test.model.wait_for_idle(status="active", timeout=60) +``` + +You can also reference a file resource on the filesystem. You can also use [`ops_test.build_resources`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L1073) to build file resources from a build script. + +For `oci-images` you can reference an image registry. +``` + ... + resources = {"resource_name": "localhost:32000/image_name:latest"} + app = await ops_test.model.deploy(charm, resources=resources) + ... +``` + +> Example implementations: [kubernetes-control-plane](https://github.com/charmed-kubernetes/charm-kubernetes-control-plane/blob/8769db394bf377a03ce94066307ecf831b88ad17/tests/integration/test_kubernetes_control_plane_integration.py#L41), [synapse-operator](https://github.com/canonical/synapse-operator/blob/eb44f4959a00040f08b98470f8b17cae4cc616da/tests/integration/conftest.py#L119), [prometheus-k8s](https://github.com/canonical/prometheus-k8s-operator/blob/d29f323343a1e4906a8c71104fcd1de817b2c2e6/tests/integration/test_remote_write_with_zinc.py#L27) + +> +> See more: +> - [`pytest-operator` | `build_resources`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L1073) +> - [`pytest-operator` | `download_resources`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L1101) +> - [`python-libjuju` | `model.deploy`](https://github.com/juju/python-libjuju/blob/2581b0ced1df6201c6b7fd8cc0b20dcfa9d97c51/juju/model.py#L1658) + + +

Test a relation

+ +To test an integration between two applications, you can just integrate them through +the model. Both applications have to be deployed beforehand. + +``` + ... +async def test_my_integration(ops_test: OpsTest): + # both application_1 and application_2 have to be deployed + # in the current test or a previous one. + await ops_test.model.integrate("application_1:relation_name_1", "application_2:relation_name_2") + await ops_test.model.wait_for_idle(status="active", timeout=60) + # check any assertion here + .... +``` + +> Example implementations: [slurmd-operator](https://github.com/canonical/slurmd-operator/blob/ffb24b05bec1b10cc512c060a4739358bfea0df0/tests/integration/test_charm.py#L89) + +> See more: [`python-libjuju` | `model.integrate`](https://github.com/juju/python-libjuju/blob/2581b0ced1df6201c6b7fd8cc0b20dcfa9d97c51/juju/model.py#L1476) + +

Test a configuration

+ +> See also: [Configuration](/t/11150) + +You can set a configuration option in your application and check its results. + +``` +async def test_config_changed(ops_test: OpsTest): + ... + await ops_test.model.applications["synapse"].set_config({"server_name": "invalid_name"}) + # In this case, when setting server_name to "invalid_name" + # we could for example expect a blocked status. + await ops_test.model.wait_for_idle(status="blocked", timeout=60) + .... +``` +> See also: https://discourse.charmhub.io/t/how-to-add-a-configuration-option-to-a-charm/4458 +> +> See also: [python-libjuju | application.set_config](https://github.com/juju/python-libjuju/blob/2581b0ced1df6201c6b7fd8cc0b20dcfa9d97c51/juju/application.py#L591) + + + +

Test an action

+ +> See also: [Action](/t/11149) + +You can execute an action on a unit and get its results. + +``` +async def test_run_action(ops_test: OpsTest): + action_register_user = await ops_test.model.applications["myapp"].units[0].run_action("register-user", username="ubuntu") + await action_register_user.wait() + assert action_register_user.status == "completed" + password = action_register_user.results["user-password"] + # We could for example check here that we can login with the new user +``` + +> See also: [python-libjuju | unit.run_action](https://github.com/juju/python-libjuju/blob/2581b0ced1df6201c6b7fd8cc0b20dcfa9d97c51/juju/unit.py#L274) + +

Interact with the workload

+ +To interact with the workload, you need to have access to it. This is dependent on many aspects of your application, environment and network topology. + +You can get information from your application or unit addresses using `await ops_test.model.get_status`. That way, if your application exposes a public address you can reference it. You can also try to connect to a unit address or public address. + +``` +async def test_workload_connectivity(ops_test: OpsTest): + status = await ops_test.model.get_status() + address = status.applications['my_app'].public_address + # Or you can try to connect to a concrete unit + # address = status.applications['my_app'].units['my_app/0'].public_address + # address = status.applications['my_app'].units['my_app/0'].address + appurl = f"http://{address}/" + r = requests.get(appurl) + assert r.status_code == 200 +``` + +How you can connect to a private or public address is dependent on your configuration, so you may need a different approach. + +> Example implementations: [mongodb-k8s-operator](https://github.com/canonical/mongodb-k8s-operator/blob/8b9ebbee3f225ca98175c25781f1936dc4a62a7d/tests/integration/metrics_tests/test_metrics.py#L33), [tempo-k8s-operator](https://github.com/canonical/tempo-k8s-operator/blob/78a1143d99af99a1a56fe9ff82b1a3563e4fd2f7/tests/integration/test_integration.py#L69), [synapse](https://github.com/canonical/synapse-operator/blob/eb44f4959a00040f08b98470f8b17cae4cc616da/tests/integration/conftest.py#L170) + +> See more: +> - [Charm development best practices > Fetching network information](/t/5396#heading--fetching-network-information) +> - [`juju` CLI commands > juju expose](/t/1721) + +

Run a subprocess command within Juju context

+ +You can run a command within the Juju context with: + +``` + ... + command = ["microk8s", "version"] + returncode, stdout, stderr = await ops_test.run(*command, check=True) + ... +``` + +You can similarly invoke the Juju CLI. This can be useful for cases where `python-libjuju` sees things differently than the Juju CLI. By default the environment variable `JUJU_MODEL` is set, +so you don't need to include the `-m` parameter. + +``` + .... + command = ["secrets"] + returncode, stdout, stderr = await ops_test.juju(*command, check=True) + .... +``` + +> Example implementations: [prometheus-k8s-operator](https://github.com/canonical/prometheus-k8s-operator/blob/d29f323343a1e4906a8c71104fcd1de817b2c2e6/tests/integration/conftest.py#L86), [hardware-observer-operator](https://github.com/canonical/hardware-observer-operator/blob/08c50798ca1c133a5d8ba5c889e0bcb09771300b/tests/functional/conftest.py#L14) + + +> See more: +> - [`pytest-operator` | `run`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L576) +> - [`pytest-operator` | `juju`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L624) + +

Use several models

+ +You can use `pytest-operator` with several models, in the same cloud or in +different clouds. This way you can, for example, integrate machine charms +with Kubernetes charms easily. + +You can track a new model with: + +``` + new_model = await ops_test.track_model("model_alias", + cloud_name="cloud_name", + credential_name="credentials") +``` + +`track_model` will track a model with alias `model_alias` (not the real model name). It maybe necessary to use `credential_name` if you do not use the same cloud that the controller. + +Using the new alias, you can switch context to the new created model, similar to `juju switch` command: + +``` + with ops_test.model_context("model_alias"): + # Here ops_test.model relates to the model referred by + # You can now use ops_test.model and it will apply to the model in the context +``` + +`pytest-operator` will handle the new created model by default. If you want to, you can remove it from the controller at any point: + +``` + await ops_test.forget_model("model_alias") +``` + +> Example implementations: [`charm-kubernetes-autoscaler`](https://github.com/charmed-kubernetes/charm-kubernetes-autoscaler/blob/8f4ddf5d66802ade73ed3aab2bb8d09fd9e4d63a/tests/integration/test_kubernetes_autoscaler.py#L31) + +> See more: +> - [Juju offers](/t/1772) +> - [How to manage clouds](/t/1100#heading--add-a-cloud) +> - [pytest-operator | track_model](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L720) +> - [pytest-operator | model_context](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L480) +> - [pytest-operator | forget_model](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L812) + + + +

Deploy a bundle

+ +[note type="caution"] +It is not recommended to use `ops_test.build_bundle` and `ops_test.deploy_bundle` until this [issue](https://github.com/charmed-kubernetes/pytest-operator/issues/98) is closed, as it uses `juju-bundle` which is outdated. You can deploy bundles using `ops_test.model.deploy` or [`ops_test.juju`](/t/12734#heading--run-command). +[/note] + + +

Render bundles and charms

+ +`pytest-operator` has utilities to template your charms and bundles using Jinja2. + +To render a kubernetes bundle with your current charm, create the file `./test/integration/bundle.yaml.j2` with this content: +``` +bundle: kubernetes +applications: + my-app: + charm: {{ charm }} + scale: {{ scale }} +``` + +You can now add the next integration test that will build an deploy the bundle with the current charm: +``` +async def test_build_and_deploy_bundle(ops_test: OpsTest): + charm = await ops_test.build_charm(".") + + bundle = ops_test.render_bundle( + 'tests/integration/bundle.yaml.j2', + charm=charm, + scale=1, + ) + juju_cmd = ["deploy", str(bundle)] + rc, stdout, stderr = await ops_test.juju(*juju_cmd) +``` + + +> Example implementations: [`hardware-observer-operator`](https://github.com/canonical/hardware-observer-operator/blob/47a79eb2872f6222099e7f48b8daafe8d20aa946/tests/functional/test_charm.py#L57) + + + +

Speed up `update_status` with `fast_forward`

+ +If your charm code depends on the `update_status` event, you can speed up its +firing rate with `fast_forward`. Inside the new async context you can put any code that will benefit from the new refresh rate so your test may execute faster. + +``` + ... + app = await ops_test.model.deploy(charm) + + async with ops_test.fast_forward(): + await ops_test.model.wait_for_idle(status="active", timeout=120) + .... +``` + +> Example implementations [`postgresql-k8s-operator`](https://github.com/canonical/postgresql-k8s-operator/blob/69b2c138fa6b974883aa6d3d15a3315189d321d8/tests/integration/ha_tests/test_upgrade.py#L58), [`synapse-operator`](https://github.com/canonical/synapse-operator/blob/05c00bb7666197d04f1c025c36d8339b10b64a1a/tests/integration/test_charm.py#L249) + + +> See more: +> - [Event `update-status`](/t/6484) +> - [`pytest-operator` | `fast_forward`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L1400) + + +

Run your tests

+ +By default you can run all your tests with: + +``` +tox -e integration +``` + +These tests will use the context of the current controller in Juju, and by default will create a new model per module, that will be destroyed when the test is finished. The cloud, controller and model name can be specified with the parameters `--cloud`, `--controller` and `--model` parameters. + +If you specify the model name and do not delete the model on test tear down with the parameter `--keep-models`, you can reuse a model from a previous test run, as in the next example: +``` +# in the initial execution, the new model will be created +tox -e integration -- --keep-models --model test-example-model +# in the next execution it will reuse the model created previously: +tox -e integration -- --keep-models --model test-example-model --no-deploy +``` + +The parameter `--no-deploy` will skip tests decorated with `@pytest.mark.skip_if_deployed`. That way you can iterate faster on integration tests, as applications can be deployed only once. + +There are different ways of specifying a subset of tests to run using `pytest`. With the `-k` option you can specify different expressions. For example, the next command will run all tests in the `test_charm.py` file except `test_one` function. +``` +tox -e integration -- tests/integration/test_charm.py -k "not test_one" +``` + +> Example implementations: [`mysql-k8s-operator`]( https://github.com/canonical/mysql-k8s-operator/blob/39213226192a4cbb5396be755cd320b00b3b02c7/tests/integration/relations/test_database.py#L30 ) + +> See more: +> - [`pytest-operator` | `skip_if_deployed`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L139) +> - [`pytest | How to invoke pytest`](https://docs.pytest.org/en/7.1.x/how-to/usage.html) + +

Generate crash dumps

+ +To generate crash dumps, you need the `juju-crashdump` tool . + + +You can install it with `sudo snap install --classic juju-crashdump`. + +By default, when tests are run, a crash dump file will be created in the current directory if a test fails and if `--keep-models` is `false`. This crash dump file will include the current configuration and also Juju logs. + +You can disable crash dump generation with `--crash-dump=never`. To always create a crash dump file (even when tests do not fail) to a specific location run: + +``` +tox -e integration -- --crash-dump=always --crash-dump-output=/tmp +``` + +> See more: +> - [`juju-crashdump`](https://github.com/juju/juju-crashdump) +> - [`pytest-operator` | `--crash-dump`](https://github.com/charmed-kubernetes/pytest-operator/blob/ab50fc20320d3ea3d8a37495f92a004531a4023f/pytest_operator/plugin.py#L97) + + +> Contributors: @natalia-nowakowska , @javierdelapuente + +------------------------- + +javierdelapuente | 2023-12-05 16:11:06 UTC | #2 + + + +------------------------- + +tmihoc | 2023-12-14 14:18:34 UTC | #3 + + + +------------------------- + +ppasotti | 2024-01-04 09:30:36 UTC | #4 + +When developing integration tests it's sometimes useful to run a specific test without executing all tests/setup functions that come before it. I often forget the syntax to do so because it involves some subtleties due to OpsTest implementation details. + +IIRC doing that requires either: + +- some `{posargs}` trickery in the tox env to let pytest pick up the argument so you can do `tox -e integration -k foo.py::test_bar` +- setting up some tox envvar so that OpsTest *thinks* we're running it from Tox (it will bork otherwise). Something like: `TOXENV=.tox/integration pytest -k foo.py::test_bar` + +Would be nice to add a section on this. When I find out again the details I'll add it + +------------------------- + +javierdelapuente | 2024-01-09 15:23:11 UTC | #5 + +Thanks @ppasotti! + +Just one thing about the OpsTest, I manage to run `pytest` without setting TOXENV. Maybe it is not necessary anymore? + +------------------------- + +ppasotti | 2024-01-09 16:22:50 UTC | #6 + +huh that's news! I'll try it out + +------------------------- + diff --git a/tmp/t/12818.md b/tmp/t/12818.md new file mode 100644 index 000000000..4d2e1ea8f --- /dev/null +++ b/tmp/t/12818.md @@ -0,0 +1,22 @@ +tmihoc | 2023-12-14 14:46:11 UTC | #1 + +The `pytest-operator` library is a Python library that provides Juju plugins for the generic Python +library `pytest` to facilitate the [integration testing](/t/11125#heading--integration-testing) of [charms](/t/5457). + +> See more: [`pytest-operator`](https://github.com/charmed-kubernetes/pytest-operator) + +It builds a fixture called `ops_test` that helps you interact with Juju through constructs that wrap around [`python-libjuju` ](https://pypi.org/project/juju/). + +> See more: +> - [`pytest-operator` > `ops_test`](https://github.com/charmed-kubernetes/pytest-operator/blob/main/docs/reference.md#ops_test) +> - [`pytest` > Fixtures](https://docs.pytest.org/en/6.2.x/fixture.html) + +It also provides convenient markers and command line parameters (e.g., the `@pytest.mark.skip_if_deployed` marker in combination with the `--no-deploy` configuration helps you skip, e.g., a deployment test in the case where you already have a deployment). + + +> See more: +> - [`pytest-operator` > Markers](https://github.com/charmed-kubernetes/pytest-operator/blob/main/docs/reference.md#markers) +> - [`pytest-operator` > Command line parameters](https://github.com/charmed-kubernetes/pytest-operator/blob/main/docs/reference.md#command-line-parameters) + +------------------------- + diff --git a/tmp/t/13005.md b/tmp/t/13005.md new file mode 100644 index 000000000..40509f3dc --- /dev/null +++ b/tmp/t/13005.md @@ -0,0 +1,126 @@ +sed-i | 2024-01-23 13:16:26 UTC | #1 + +One of the goals for COS Lite is to be able to ingest considerable amount of data on modest hardware. Load testing is useful for gaining insight into how to size observability clusters appropriately. + +## Method +The test method is identical to the method used for [load-testing on 4cpu8gb](https://charmhub.io/cos-lite/docs/reference/performance/on-4cpu-8gb-ssd?channel=latest/edge). + +- No k8s resource limits set. The per-pod resource requirements are presented below and can be used by admins to set resource limits. +- MicroK8s 1.27, Juju 3.1.6. +- 20 virtual SREs (dashboard gazers) "looking" at panels with thousands of datapoints and log lines. This mimics an outage response, where 20 people are suddenly looking at a heavy dashboard at the same time. +- No receivers configured for alertmanager and no rules (other than self monitoring) are evaluated by prometheus. +- Load tests that run successfully for over 12h without incidents are marked as “PASSED”, and were used for constructing the datasheet. Passing tests are also used for curve fitting an approximation for resource usage. +- The latest results were obtained with the following charm versions: + +| App | Workload version | Charm revision | +|---------------|------------------|----------------| +| alertmanager | 0.25.0 | 96 | +| catalogue | n/a | 33 | +| cos-config | 3.5.0 | 44 | +| grafana | 9.2.1 | 97 | +| loki | 2.9.2 | 109 | +| prometheus | 2.48.0 | 163 | +| scrape-config | n/a | 45 | +| scrape-target | n/a | 32 | +| traefik | 2.10.4 | 166 | + +- Several IaC fixes were introduced in [cos-lite/84](https://github.com/canonical/cos-lite-bundle/pull/84) to address some necessary changes. +- Loki config changes were made ([loki-k8s/325](https://github.com/canonical/loki-k8s-operator/pull/325)) to improve query performance. + +## Results + +In a "lab" environment, COS Lite on an 8cpu16gb VM with a "performance" SSD disk was able to ingest: +- 6.6 million datapoints per minute +- (6 million datapoints + 3600 log lines) per minute +- (4.5 million data points + 320000 log lines) per minute. + +Note that the results above do not leave any leeway. For production you should probably use >10% margin. + +When COS Lite is in isolation and is only ingesting its own self-monitoring metrics ("idle" mode), it consumes 6% CPU (0.48 vCPU) and 16% memory (2.56 GB). + +![VM sizing from per-pod data (with 10% margin)|690x677](upload://qoQXxP4hKUIzdgKbG387pYTfAxD.png) + + +To calculate dynamically, use: + +``` +disk = 3.011e-4 * L + 3.823e-6 * M + 1.023 +cpu = 1.89 * arctan(1.365e-4 * L) + 1.059e-7 * M + 1.644 +mem = 2.063 * arctan(2.539e-3 * L) + 1.464e-6 * M + 3.3 +``` + +Where: +- disk is in GiB/day +- cpu is in vCPUs +- mem is in GB +- L is the number of ingested log lines per minute +- M is the number of ingested metric datapoints per minute + + +## Discussion +- For Loki, querying (data retrieval) is expensive: the resources required for ingestion were negligible compared to querying. As a result, Loki's resource requirements were constant (independent of ingestion rate). +- Major contributors to load are Loki retrieval (CPU-intensive) and Prometheus ingestion (memory-intensive). + +### Disk usage +- Disk usage as a function of logs ingestion rate has an excellent linear fit. The fit was made using data from Loki 2.9.2 only, using a 12h average. +- The data spread for metrics ingestion rate isn't great, and may be due to a human error of inconsistent recording of 6h vs 12h averages (I switched from 6 to 12h during the experiment). The linear fit gives good coverage for high ingestion rates. More accurate results will be published soon after refactoring the load test. +- Disk usage (GiB/day) can be calculated as follows: `(3.011e-4 * L + 2.447e-3) + (3.823e-6 * M + 1.021)`, where `L` is the number of log lines per minute, and `M` is the number of metrics data points per minute. The time scale of `1m` was chosen to match the default scrape interval of charmed prometheus. Self-monitoring contributes the `1.021` (GiB/day) to the overall usage. The `2.447` (MiB/day) is likely just a minor fitting error (effectively equals zero). + +![Disk size estimation (GiB/day)|690x401](upload://2PCmVMimx5pjB0KAbnLjADdmZMl.png) + + +### Per-pod resource usage +- Per-pod resource usage is interesting because it gives better insight into how compute resources are consumed across components. + +![Per-pod resource usage|690x401](upload://aBZFg4d7VQMSlg6biynbTPclcyB.png) + + +#### Loki +- Ingestion load is negligible to query load. That is why resources saturate at the same level for a broad range of ingestion rate. +- The difference in trend between the current experiment (8cpu16gb) and the previous experiment (4cpu8gb) can be explained by: + - Different Loki configuration. + - Different Loki version (more memory efficient). +- For some reason that is currently unknown to me, Charmed Loki wasn't able to ingest more than 360k log lines per minute, even though VM resources were not exhausted. This is likely to do with a Loki configuration option that I haven't discovered yet. +- For fitting purposes, the `arctan` function was used in order to capture the behaviour near the origin. The choice of `arctan` is arbitrary. +- Resource usage calculation (`L` is the number of log lines per minute): + - CPU usage (vCPUs): `1.442e-1 + 1.89 * arctan(1.365e-4 * L)` + - Memory usage (GB): `4.851e-2 + 2.063 * arctan(2.539e-3 * L)` + +#### Prometheus +- Total load is strongly linear with ingestion rate, indicating that querying is cheaper than ingestion. +- The difference in trend between the current experiment (8cpu16gb) and the previous experiment (4cpu8gb) can be explained by the different Prometheus version used, which included improvements in memory and cpu usage. +- Resource usage calculation (`M` is the number of metric data points per minute): + - CPU usage (vCPUs): `1.059e-7 * M + 1.696e-1` + - Memory usage (GB): `1.464e-6 * M + 2.51e-1` + +#### Everything else +For all other pods, resource consumption is fairly constant: + +| Component | vCPUs | Memory (GB) | +| --------------------------------------------- | ----- | ----------- | +| Grafana | 0.25 | 0.2 | +| Traefik | 0.08 | 0.2 | +| Everything else (alertmanager, MicroK8s, ...) | 1.0 | 2.6 | + + +## Conclusions +- The Grafana Labs stack that COS Lite is based on can ingest a substantial amount of data with fairly moderate compute resources. +- Resource requirements are sensitive to Loki configuration. While default config values for Loki are likely to meet most users' needs, further tweaking will result in better tailored performance. +- Additional work is needed to produce more repeatable and accurate results. + +## Data & code +See https://github.com/canonical/cos-lite-bundle/pull/84. + +## Future plans +- Figure out why loki ingestion rate saturates at around 300k log lines / min, regardless of available resources. +- Switch from flood-element and locust to k6. +- Use the terraform juju provider instead of pure cloud-init runcmd. +- Repeat tests with end-to-end TLS enabled. +- Add juju metrics (juju exporter?) to load test dashboard. + +## References +- [GCP disk io limits for `pd-ssd`](https://cloud.google.com/compute/docs/disks/performance#pd-ssd) +- [cos-lite/84](https://github.com/canonical/cos-lite-bundle/pull/84): Add datasheet for 8cpu16gb + +------------------------- + diff --git a/tmp/t/13019.md b/tmp/t/13019.md new file mode 100644 index 000000000..9c841db86 --- /dev/null +++ b/tmp/t/13019.md @@ -0,0 +1,21 @@ +benhoyt | 2024-02-01 06:08:48 UTC | #1 + +> [Event](/t/6361) > [List of events](/t/6657) > [Lifecycle events](/t/4455) > `-pebble-custom-notice` +> +> Source: [`ops.PebbleCustomNoticeEvent`](https://ops.readthedocs.io/en/latest/index.html#ops.PebbleCustomNoticeEvent) + + + +Juju emits the `-pebble-custom-notice` event when a Pebble notice of type "custom" occurs for the first time or repeats. There is one `-pebble-custom-notice` event for each container defined in `charmcraft.yaml`. This event allows the charm to respond to custom events that happen in the workload container. + +[note] +This event is specific to Kubernetes sidecar charms and is only ever fired on Kubernetes deployments. +[/note] + +> See more: [How to use custom notices from the workload container](/t/4554#heading--use-custom-notices-from-the-workload-container) + +------------------------- + diff --git a/tmp/t/13046.md b/tmp/t/13046.md new file mode 100644 index 000000000..90ac1114a --- /dev/null +++ b/tmp/t/13046.md @@ -0,0 +1,51 @@ +tmihoc | 2024-01-29 09:43:39 UTC | #1 + +## Usage: +```text +charmcraft set-resource-architectures [options] +``` + +## Summary: + +Set the architectures for a resource revision in Charmhub. + +Each resource revision is tagged with one or more architectures. If a revision is incorrectly tagged, this command can modify the architecture tags for that resource revision. + +For example: + +```text +$ charmcraft resource-revisions my-charm my-resource +Revision Created at Size Architectures +1 2020-11-15 T11:13:15Z 183151 riscv64 +$ charmcraft set-resource-architectures my-charm my-resource --revision=1 arm64,armhf +Revision 1 of 'my-resource' on charm 'my-charm' set to architectures: arm64,armhf +$ charmcraft resource-revisions my-charm my-resource +Revision Created at Size Architectures +1 2020-11-15 T11:13:15Z 183151 arm64,armhf +``` + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | +| `-p, --project-dir` | Specify the project's directory (defaults to current) | +| `--format` | Produce the result in the specified format (currently only 'json') | +| `--revision` | A revision to update | + +## See also: +- `close` +- `promote-bundle` +- `release` +- `resource-revisions` +- `resources` +- `revisions` +- `status` +- `upload` +- `upload-resource` + +------------------------- + diff --git a/tmp/t/13070.md b/tmp/t/13070.md new file mode 100644 index 000000000..6dca3232d --- /dev/null +++ b/tmp/t/13070.md @@ -0,0 +1,27 @@ +tmihoc | 2024-02-01 07:48:45 UTC | #1 + +**Pebble** is a lightweight, API-driven process supervisor. In the charm SDK, it is used through [Ops](/t/5527) ([`ops.pebble.Client`](/t/6366)) to give workload containers something akin to an `init` system that will allow the charm container to interact with them. + +> See more: [GitHub | Pebble](https://github.com/canonical/pebble) + + +Pebble is the recommended way to create Kubernetes charms using the sidecar pattern. + +## Pebble notices + +> See also: [How to use custom notices from the workload container](/t/4554#heading--use-custom-notices-from-the-workload-container) + +In Pebble, a **notice** is an aggregated event to record when custom events happen in the workload container or in Pebble itself. + +> See more: [GitHub | Pebble > Notices](https://github.com/canonical/pebble#notices) + +Pebble notices are supported in Juju starting with version 3.4. Juju polls each workload container's Pebble server for new notices, and fires an event to the charm when a notice first occurs as well as each time it repeats. + +Each notice has a *type* and *key*, the combination of which uniquely identifies it. A notice's count of occurrences is incremented every time a notice with that type and key combination occurs. + +Currently, the only notice type is "custom". These are custom notices recorded by a user of Pebble; in future, other notice types may be recorded by Pebble itself. When a custom notice occurs, Juju fires a [`PebbleCustomNoticeEvent`](https://ops.readthedocs.io/en/latest/#ops.PebbleCustomNoticeEvent) event whose [`workload`](https://ops.readthedocs.io/en/latest/#ops.WorkloadEvent.workload) attribute is set to the relevant container. + +Custom notices allow the workload to wake up the charm when something interesting happens with the workload, for example, when a PostgreSQL backup process finishes, or some kind of alert occurs. + +------------------------- + diff --git a/tmp/t/13086.md b/tmp/t/13086.md new file mode 100644 index 000000000..fa9b54b5e --- /dev/null +++ b/tmp/t/13086.md @@ -0,0 +1,15 @@ +tmihoc | 2024-03-14 17:29:51 UTC | #1 + +> See also: [How to manage the client > `terraform`](/t/1083) + +When used with the `juju` provider plugin, the `terraform` CLI is a CLI [client](/t/13146) of Juju that allows you to use Juju through Terraform plans. + +> See more: [Terraform](https://developer.hashicorp.com/terraform/docs), [Terraform | CLI](https://developer.hashicorp.com/terraform/cli/commands), [Terraform | Juju Provider](https://registry.terraform.io/providers/juju/juju/latest/docs) + + +[note type=caution] +The `juju` provider is currently a beta product and not feature-complete. It does not have full parity with the [Juju CLI](/t/5465). +[/note] + +------------------------- + diff --git a/tmp/t/13089.md b/tmp/t/13089.md new file mode 100644 index 000000000..56f88e3fb --- /dev/null +++ b/tmp/t/13089.md @@ -0,0 +1,25 @@ +tmihoc | 2024-07-04 08:13:44 UTC | #1 + +[note type=caution] +**When Juju 4.0 is released, if you upgrade your `juju` client to 4+:** That will introduce a change that will break the `python-libjuju` client. This issue will be addressed in an upcoming `python-libjuju` release. Stay tuned and make sure to update! See more: [Discourse | Juju 4.0 AllWatcher API changes](https://discourse.charmhub.io/t/juju-4-0-allwatcher-api-changes/14614). + +[/note] + +`python-libjuju` is one of the [client](/t/13146) libraries of Juju that allows interaction with a Juju controller via Python scripts. + +Install it on your Python setup (e.g. `pip install juju`), and you have the full control of your infrastructure that you can control via your scripts. `python-libjuju` often proves to be very useful in automated testing, as well as quick interactions with Juju entities using just the Python repl. + +> Source: [GitHub | `python-libjuju`](https://github.com/juju/python-libjuju) +> +> Example uses: [GitHub | python-libjuju/examples](https://github.com/juju/python-libjuju/tree/master/examples) +> +> Reference docs: [Read the docs | `python-libjuju`](https://pythonlibjuju.readthedocs.io/en/latest/) +> +> Project: [PyPi Project | Juju](https://pypi.org/project/juju/) + +
+ +> **Contributors:** @cderici, @simonrichardson , @tmihoc + +------------------------- + diff --git a/tmp/t/13130.md b/tmp/t/13130.md new file mode 100644 index 000000000..a01712626 --- /dev/null +++ b/tmp/t/13130.md @@ -0,0 +1,11 @@ +tmihoc | 2024-02-08 08:34:23 UTC | #1 + +In Juju, **`jujuc`** is a binary that provides a collection of command-line tools that charms can use during their hook executions to interact with the Juju environment. It comes with your Juju installation. + + +
+ +> **Contributors:** @anvial + +------------------------- + diff --git a/tmp/t/13132.md b/tmp/t/13132.md new file mode 100644 index 000000000..cac43320a --- /dev/null +++ b/tmp/t/13132.md @@ -0,0 +1,29 @@ +tmihoc | 2024-02-23 12:39:54 UTC | #1 + +> See also: [How to manage offers](/t/1150) + +In Juju, an **offer** represents an [application](/t/5471) that has been made available for [cross-model integrations](/t/5464). + +When you are integrating an application with an offer, what you're doing is consume + integrate, where consume = validate that your user has permission to consume the offer + create a local application proxy for the application and integrate is the usual local integrate. + + + +------------------------- + diff --git a/tmp/t/13137.md b/tmp/t/13137.md new file mode 100644 index 000000000..83faa9e61 --- /dev/null +++ b/tmp/t/13137.md @@ -0,0 +1,20 @@ +tmihoc | 2024-02-09 12:42:18 UTC | #1 + +> See also: [How to scale an application](/t/5476#heading--scale-an-application) + +In the context of a cloud deployment in general, **scaling** means modifying the amount of resources thrown at an application, which can be done *vertically* (modifying the memory, CPU, or disk for a cloud resource) or *horizontally* (modifying the number of resources), where each can be *up* (more) or down (*less*). In the context of Juju, scaling means exactly the same, with the mention that + +- Vertical scaling is handled through [constraints](/t/6184) and horizontal scaling through [units](/t/5460). +- Horizontal scaling up can be used to achieve [high availability (HA)](/t/1066) -- though, depending on whether the charm delivering the application supports HA natively or not, you may also have to perform additional steps. + + + + +------------------------- + diff --git a/tmp/t/13146.md b/tmp/t/13146.md new file mode 100644 index 000000000..d1dfc9107 --- /dev/null +++ b/tmp/t/13146.md @@ -0,0 +1,14 @@ +tmihoc | 2024-03-14 14:10:58 UTC | #1 + +> See also: [How to manage the client](/t/1083) + +A Juju **client** is any software that implements the Juju client apiserver contract. + +This currently includes: + +- the Juju CLI ([`juju`](/t/5465)) +- the Terraform CLI when used with the `juju` provider plug-in ([`terraform juju`](/t/13086)) +- the `python-libjuju` library ([`python-libjuju`](/t/13089)) + +------------------------- + diff --git a/tmp/t/13249.md b/tmp/t/13249.md new file mode 100644 index 000000000..92e420d87 --- /dev/null +++ b/tmp/t/13249.md @@ -0,0 +1,109 @@ +tmihoc | 2024-03-19 08:40:34 UTC | #1 + +Juju supports both traditional machine clouds as well as Kubernetes clouds. If you are familiar with Kubernetes, there's a mapping between Kubernetes and Juju concepts: + +| Kubernetes | Juju | +|-|-| +| [namespace](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) | [model](/t/5456) | +| [node](https://kubernetes.io/docs/concepts/architecture/nodes/) | [machine](/t/5459); Juju does not manage this for Kubernetes | +| [pod](https://kubernetes.io/docs/concepts/workloads/pods/) | [unit](/t/5460) | +| container | process in a unit | +| [service](https://kubernetes.io/docs/concepts/services-networking/service/) | [application](/t/5471) | + + +The rest of this document expands on this mapping. + +**Contents:** + +- [Namespace and Model](#heading--namespace-and-model) +- [Node and Machine](#heading--node-and-machine) +- [Pod and Unit](#heading--pod-and-unit) +- [Service and Application](#heading--service-and-application) + +

Namespace and Model

+ +Both namespaces and models allow for the aggregation of a set of resources into a common "context". However, a model must be part of a Juju [cloud](/t/5454), whereas a namespace is +not part of any higher grouping; there is not an equivalent concept of a cloud on Kubernetes. + +

Node and Machine

+ +While **nodes** and **machines** are equivalent in definition +(a physical or virtual machine where you can run a workload on), Juju does not internally represent +nodes as machines. Instead, it delegates the work of handling nodes to the Kubernetes cluster, +and only manages **pods** directly. + +

Pod and Unit

+ +**Pods** and **units** are essentially the same, since they +deploy code into a container or process. However, units in an **application** will always have a leader unit, which will be the unit handling the lifecycle of the application. Pods lack this functionality, meaning you would need to manually implement leader election to enable this type of pod architecture in Kubernetes. + +

Service and Application

+ +Both similar in concept, **services** and **applications** allow the integration of other services/applications within the cluster and can also +be exposed to enable access from the external world to the cluster. +A key difference is that applications can be automatically integrated with other applications, provided that the applications' [endpoints](/t/5462) are +compatible with each other. On the other hand, the integration between services must be done +manually, using the services' IP addresses or DNS names as their integration points. + +
+ +**Contributors:** @anvial, @jedel , @tmihoc + +------------------------- + +jedel | 2024-03-14 20:08:12 UTC | #2 + +I think this document could be expanded a bit to explain why those mappings are in place. It should also help on relating both concepts between them. I'm sending a revision of the document here for your review: + +Juju supports both traditional machine clouds as well as Kubernetes clouds. If you are familiar with Kubernetes, there's a mapping between Kubernetes and Juju concepts: + +| Kubernetes | Juju | +|-|-| +| namespace | model | +| node | machine; Juju does not manage this for Kubernetes| +| pod | unit | +| container | process in a unit | +| service | application | + + +## Namespace and Model + +Both [**namespaces**](https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/) +and [**models**](https://discourse.charmhub.io/t/model/5456) allow for the aggregation of a set of +resources into a common "context". However, a model must be part of a +Juju [**cloud**](https://discourse.charmhub.io/t/cloud-substrate/5454), whereas a namespace is +not part of any higher grouping; there is not an equivalent concept of a cloud on Kubernetes. + + +## Node and Machine + +While [**nodes**](https://kubernetes.io/docs/concepts/architecture/nodes/) and +[**machines**](https://discourse.charmhub.io/t/machine/5459) are equivalent in definition +(a physical or virtual machine where you can run a workload on), Juju does not internally represent +nodes as machines. Instead, it delegates the work of handling nodes to the Kubernetes cluster, +and only manages **pods** directly. + + +## Pod and Unit + +[**Pods**](https://kubernetes.io/docs/concepts/workloads/pods/) and +[**units**](https://discourse.charmhub.io/t/unit/5460) are essentially the same, since they +deploy code into a container or process. However, units in an **application** will always have a leader unit, which will be the unit handling the lifecycle of the application. Pods lack this functionality, meaning you would need to manually implement leader election to enable this type of pod architecture in Kubernetes. + + +## Service and Application +Both similar in concept, [**services**](https://kubernetes.io/docs/concepts/services-networking/service/) and +[**applications**](https://discourse.charmhub.io/t/application/5471) allow the integration of other services/applications within the cluster and can also +be exposed to enable access from the external world to the cluster. +A key difference is that applications can be automatically integrated with other applications, provided that the applications' [**endpoints**](https://discourse.charmhub.io/t/endpoint/5462) are +compatible with each other. On the other hand, the integration between services must be done +manually, using the services' IP addresses or DNS names as their integration points. + +------------------------- + +tmihoc | 2024-03-15 16:33:57 UTC | #3 + +@jedel This is great. I'll incorporate it into the doc right away and add your name to the list of contributors. Thanks! + +------------------------- + diff --git a/tmp/t/13276.md b/tmp/t/13276.md new file mode 100644 index 000000000..8f6d5c523 --- /dev/null +++ b/tmp/t/13276.md @@ -0,0 +1,158 @@ +tmihoc | 2024-07-19 14:29:40 UTC | #1 + + + +When Juju creates a controller it needs two critical pieces of information: + +- *(For machine clouds:)* **Metadata regarding the LXD container image and LXD VM image to use:** The unique identifier of the image to use when spawning a new machine (instance). +- *(For all clouds:)* **Metadata regarding the agent binaries:** The URL from which to download the correct Juju agent. + + +This metadata is stored in a JSON format called 'Simplestreams'. The image metadata is available by default for all the public clouds that Juju supports but needs to be generated if you're setting up your own private cloud. The agent binary metadata is available by default for all clouds but developers may want to generate for testing (though an alternative is `juju sync-agent-binaries`). + +This document shows how to manage this metadata in Juju. + + +> See also: [Plugin `juju-metadata`](/t/6877) + +**Contents:** + +- [Generate metadata](#heading--generate-metadata) +- [Validate metadata](#heading--validate-metadata) +- [Add metadata](#heading--add-metadata) +- [Sign metadata](#heading--sign-metadata) +- [View all the known metadata](#heading--view-all-the-known-metadata) +- [Delete metadata](#heading--delete-metadata) + +

Generate metadata

+ +**For cloud images.** To generate metadata for cloud images, use the `metadata` plugin with the `generate-image` subcommand. This is useful for creating metadata for custom images. The metadata is stored in *SimpleStreams*, a data format designed to provide a standardized way to represent and discover metadata about cloud resources. +```text +juju metadata generate-image +``` + +The cloud specification comes from the current Juju model, but it is possible to override certain cloud attributes, including the region, endpoint, and charm base using the command arguments. While "amd64" serves as the default setting for the architecture, this option can also be adjusted to accommodate different architectural requirements. + +> See more: [`juju metadata > generate-image`](/t/6877) + +The generated metadata image can then be used to speed up bootstrap and deployment. + +> See more: [`juju bootstrap`](/t/command-bootstrap/10132), [Bootstrapping](/t/bootstrapping/6209), [VMware vSphere and Juju](/t/vmware-vsphere-and-juju/1099) + + +**For agent binaries.** To create metadata for Juju agent binaries, use the `metadata` plugin with the `generate-agent-binaries` subcommand. This generates simplestreams metadata for agent binaries, facilitating their discovery and use. + +```text +juju metadata generate-agent-binaries -d +``` + +The simplestream stream for which metadata is generated is specified using the `--stream` +parameter (default is "released"). Metadata can be generated for any supported +stream - released, proposed, testing, devel. + +```text +juju metadata generate-agent-binaries -d --stream proposed +``` + +Newly generated metadata will be merged with any existing metadata that is already there. To first remove metadata for the specified stream before generating new metadata, + use the `--clean` option. + +```text +juju metadata generate-agent-binaries -d --stream proposed --clean +``` + +> See more: [`juju metadata > generate-agent-binaries`](/t/6877) + +

Validate metadata

+ +**For images.** To validate image metadata and ensure the specified image or images exist for a model, use the `metadata` plugin with the `validate-images` subcommand. + +```text +juju metadata validate-images +``` + +The key model attributes may be overridden using command arguments, so +that the validation may be performed on arbitrary metadata. + + + +> See more: [`juju metadata > validate-images`](/t/6877) + +**For agent binaries.** To ensure that the compressed tar archives (.tgz) for the Juju agent binaries are available and correct, use the `metadata` plugin with the `validate-agent-binaries` subcommand. For example: + +```text +juju metadata validate-agent-binaries +``` + +It is also possible to indicate the os type for which to validate, the cloud provider, region as well as the endpoint. It is possible to specify a local directory containing agent metadata, in which case cloud attributes like provider type, region etc are optional. + +> See more: [`juju metadata > validate-agent-binaries`](/t/6877) + +

Add metadata

+ +**For images.** To add custom image metadata to your model, use the `metadata` plugin with the `add-image` subcommand followed by the unique image identifier and specifying the base. This is useful when you have specific cloud images that you want Juju to use for creating instances. For example: + +```text +juju metadata add-image --base +``` +It is also possible to pass various options to specify the image architecture, choose a model to operate in, and the cloud region where this image exists, etc. + +> See more: [`juju metadata > add-image`](/t/6877) + +

Sign metadata

+ +If you need to sign your simplestreams metadata for security purposes, use the `metadata` plugin with the `sign` subcommand. + +```text +juju metadata sign -d -k +``` + +The specified keyring file is expected to contain an amored private key. If the key +is encrypted, then a passphrase should be specified using the command option `--passphrase` to decrypt the key. + +> See more: [`juju metadata > sign metadata`](/t/6877) + +

View all the known metadata

+ +**For images.** To view a list of cloud image metadata currently used by Juju, use the `metadata` plugin with the `images` (or its alias `list-images` ) subcommand. This shows the images Juju considers when choosing an image to start. + +```text +juju metadata images +``` +```text +juju metadata list-images --format yaml +``` + +The result list can be filtered in order to show specific images for a region, architecture or a set of bases using the OS name and the version. For example: + +```text +juju metadata images --bases ubuntu@22.04 --region eu-west-1 --model mymodel +``` +> See more: [`juju metadata > images`](/t/6877) + +

Delete metadata

+ +**For images.** To remove previously added image metadata from a Juju environment, use the `metadata` plugin with the `delete-image` subcommand followed by the id of the image. + +```text +juju metadata delete-image +``` +The command also allows you to specify whether this operation should show a verbose output or no informational output at all. The `--model` option can be set in order to specify the model to operate in. + +> See more: [`juju metadata > delete-image`](/t/6877) + + + +
+ +**Contributors: @manadart, @simonrichardson, @tmihoc, @toaksoy** + +------------------------- + diff --git a/tmp/t/13400.md b/tmp/t/13400.md new file mode 100644 index 000000000..d09c7a944 --- /dev/null +++ b/tmp/t/13400.md @@ -0,0 +1,67 @@ +tmihoc | 2024-08-02 22:07:54 UTC | #1 + +To add extra configuration files and/or binaries/libraries in your charm (e.g., to support more functions), in your `charmcraft.yaml`, under the `parts` key, define a part and, in the part properties, set the `plugin` key to `dump` or `nil`. For example: + +```yaml +parts: + libs: + plugin: dump + source: /usr/local/lib/ + organize: + "libxxx.so*": lib/ + prime: + - lib/ +``` + +This libs part will copy the locally built libxxx to the charm lib directory. + +[note type=prime] +**If your charm currently uses the `prime` key in a `charm` part to include extra files:** + +Note that, starting with Charmcraft 3.0, the behaviour of this keyword changes, with changes affecting existing bases. + +An example of how to change an existing charm to work may be found [here](https://github.com/canonical/mongodb-operator/pull/449). + +While in Charmcraft 2.x this was valid: + +```yaml +parts: + my-charm: + plugin: charm + source: . + prime: + - charm_version + - charm_internal_version + - workload_version +``` + +Starting in Charmcraft 3.0, these additional files must be primed using the `dump` plugin: + +```yaml +parts: + my-charm: + plugin: charm + source: . + version_data: + plugin: dump + source: . + prime: + - charm_version + - charm_internal_version + - workload_version +``` + +[details=Why did this change?] +The behaviour in Charmcraft 3 is the intended behaviour of all craft applications. [snapcraft](https://snapcraft.io/docs/snapcraft-parts-metadata#prime) and [rockcraft](https://documentation.ubuntu.com/rockcraft/en/stable/common/craft-parts/reference/part_properties/#prime) both use the behaviour in Charmcraft 3. As Charmcraft 3 contained a major rewrite, the decision was made to change this behaviour. +[/details] + +[/note] + +> See more: [File `charmcraft.yaml` > parts](/t/7132#heading--parts) + +
+ + **Contributors:** @lengau, @syu-w , @tmihoc + +------------------------- + diff --git a/tmp/t/13403.md b/tmp/t/13403.md new file mode 100644 index 000000000..e9e4a215d --- /dev/null +++ b/tmp/t/13403.md @@ -0,0 +1,338 @@ +tmihoc | 2024-09-02 10:00:46 UTC | #1 + +This document shows how to set up a Juju test environment -- complete with a sandbox (Ubuntu VM), a local cloud (LXD for machine charms and MicroK8s for Kubernetes charms), and Juju -- and then how to tear it all down once you're done playing around. + +There are two ways to get all this set up: automatically or manually. + +- [Set up / tear down automatically](#heading--set-up---tear-down-automatically) +- [Set up / tear down manually](#heading--set-up---tear-down-manually) + +

Set up / tear down automatically

+ +- [Set up automatically](#heading--set-up-automatically) +- [Tear down automatically](#heading--tear-down-automatically) + +

Set up automatically

+ +1. [Install Multipass](https://multipass.run/docs/how-to-install-multipass). + +[note type=caution] +**If on Windows:** Note that Multipass can only be installed on Windows 10 Pro or Enterprise. If you are using a different version, please follow the [Set up / tear down manually](#heading--set-up---tear-down-manually) guide, omitting the Multipass step. +[/note] + +For example, on Linux (assumes you have `snapd`): + +```text +sudo snap install multipass +``` + +2. Use Multipass with the `charm-dev` blueprint to launch a Juju-ready Ubuntu VM (below `my-juju-vm`): + +```text +multipass launch --cpus 4 --memory 8G --disk 50G --name my-juju-vm charm-dev +``` + +[note type=information] +This step may take a few minutes to complete (e.g., 10 mins). + +This is because the command downloads, installs, (updates,) and configures a number of packages, and the speed will be affected by network bandwidth (not just your own, but also that of the package sources). + +However, once it’s done, you’ll have everything you’ll need – all in a nice isolated environment that you can clean up easily. + +> See more: [GitHub > `multipass-blueprints` > `charm-dev.yaml`](https://github.com/canonical/multipass-blueprints/blob/ae90147b811a79eaf4508f4776390141e0195fe7/v1/charm-dev.yaml#L134) + +**Troubleshooting:** If this fails, run `multipass delete --purge my-juju-vm` to clean up, then try the `launch` line again. +[/note] + + +3. Open a shell into the VM: + +```text +$ multipass shell my-juju-vm +Welcome to Ubuntu 22.04.4 LTS (GNU/Linux 5.15.0-100-generic x86_64) + + * Documentation: https://help.ubuntu.com + * Management: https://landscape.canonical.com + * Support: https://ubuntu.com/pro + + System information as of Mon Mar 18 17:11:59 CET 2024 + + System load: 0.0 Processes: 117 + Usage of /: 5.6% of 28.89GB Users logged in: 1 + Memory usage: 3% IPv4 address for ens3: 10.238.98.63 + Swap usage: 0% + + * Strictly confined Kubernetes makes edge and IoT secure. Learn how MicroK8s + just raised the bar for easy, resilient and secure K8s cluster deployment. + + https://ubuntu.com/engage/secure-kubernetes-at-the-edge + +Expanded Security Maintenance for Applications is not enabled. + +16 updates can be applied immediately. +1 of these updates is a standard security update. +To see these additional updates run: apt list --upgradable + +Enable ESM Apps to receive additional future security updates. +See https://ubuntu.com/esm or run: sudo pro status + + +Last login: Mon Mar 18 16:09:16 2024 from 10.238.98.1 +To run a command as administrator (user "root"), use "sudo ". +See "man sudo_root" for details. + +ubuntu@my-juju-vm:~$ + +``` + +4. (Optional:) Verify that the VM has indeed come pre-equipped with everything you'll need: + +Verify that you have Juju, MicroK8s (for machine charms) / LXD (for machine charms), a MicroK8s / LXD cloud (`microk8s` / `localhost`), a controller on that cloud (`microk8s` / `lxd`), and a workload model on that controller (`welcome-k8s` / `welcome-lxd`) by switching to the workload model: + +---- +[details=Expand to see the instructions for MicroK8s] +```text +ubuntu@my-juju-vm:~$ juju switch microk8s:welcome-k8s +``` +[/details] + + +---- +[details=Expand to see the instructions for LXD] +```text +ubuntu@my-juju-vm:~$ juju switch lxd:welcome-lxd +``` +[/details] + +----- + +Done! + +[note type=information] + +- Going forward: + - Use the Multipass VM shell to run all commands. + + +- At any point: + - To exit the shell, press `mod key + C` or type `exit`. + - To stop the VM after exiting the VM shell, run `multipass stop my-juju-vm`. + - To restart the VM and re-open a shell into it, type `multipass shell my-juju-vm`. +[/note] + +

Tear down automatically

+ +Delete the Multipass VM (below, `my-juju-vm`): + +```text +multipass delete --purge my-juju-vm +``` + +[Uninstall Multipass](https://multipass.run/docs/install-multipass#uninstall). + +

Set up / tear down manually

+ +- [Set up manually](#heading--set-up-manually) +- [Tear down manually](#heading--tear-down-manually) + +

Set up manually

+ +1. [(Optional) Set up an Ubuntu VM with Multipass](#heading--manual-optional-set-up-an-ubuntu-vm-with-multipass) +1. [Set up your cloud](#heading--manual-set-up-your-cloud) +1. [Set up Juju](#heading--manual-set-up-juju) + +

(Optional) Set up an Ubuntu VM with Multipass

+ +1. Install Multipass: [Linux](https://multipass.run/docs/installing-on-linux) | [macOS](https://multipass.run/docs/installing-on-macos) | [Windows](https://multipass.run/docs/installing-on-windows). On Linux (assumes you have `snapd`): + +```text +sudo snap install multipass +``` + +2. Use Multipass to launch an Ubuntu VM (below, `my-juju-vm`): + +```text +multipass launch --cpus 4 --memory 8G --disk 30G --name my-juju-vm +``` + +3. Open a shell into the VM: + +```text +multipass shell my-juju-vm +``` + +

Set up your cloud

+ +Depending on whether you want to develop a Kubernetes / machine charm, you will have to set up the MicroK8s / LXD localhost cloud. For example, on Linux: + +----------------- +[details=Expand to set up your MicroK8s cloud] +``` +# Install MicroK8s package: +$ sudo snap install microk8s --channel 1.28-strict + +# Add your user to the `microk8s` group for unprivileged access: +$ sudo adduser $USER snap_microk8s + +# Give your user permissions to read the ~/.kube directory: +$ sudo chown -f -R $USER ~/.kube + +# Wait for MicroK8s to finish initialising: +$ sudo microk8s status --wait-ready + +# Enable the 'storage' and 'dns' addons: +# (required for the Juju controller) +$ sudo microk8s enable hostpath-storage dns + +# Alias kubectl so it interacts with MicroK8s by default: +$ sudo snap alias microk8s.kubectl kubectl + +# Ensure your new group membership is apparent in the current terminal: +# (Not required once you have logged out and back in again) +$ newgrp snap_microk8s +``` +[/details] + +-------- + +[details=Expand to set up your LXD cloud] + +```text +# LXD should already be there from the Charmcraft setup step; in case not: +$ lxd init --auto +$ lxc network set lxdbr0 ipv6.address none +``` +[/details] + +---------------------- + +

Set up Juju

+ +On your Ubuntu VM, install Juju. It will automatically recognise your local LXD / MicroK8s cloud. Bootstrap a controller into LXD / MicroK8s, then create a model: + +```text +# Install Juju: +sudo snap install juju --channel 3.1/stable +# >>> juju (3.1/stable) 3.1.2 from Canonical✓ installed + +# Since the juju package is strictly confined, you also need to manually create a path: +mkdir -p ~/.local/share + +# For MicroK8s, if you are working with an existing snap installation, and it is not strictly confined: +# (https://microk8s.io/docs/strict-confinement), you must also: +# +# # Share the MicroK8s config with Juju: +# sudo sh -c "microk8s config | tee /var/snap/juju/current/microk8s/credentials/client.config" +# +# # Give the current user permission to this file: +# sudo chown -f -R $USER:$USER /var/snap/juju/current/microk8s/credentials/client.config + +# Register your MicroK8s / LXD cloud with Juju: +# Not necessary --juju recognises a local MicroK8s / LXD cloud automatically, as you can see by running 'juju clouds'. +juju clouds +# >>> Cloud Regions Default Type Credentials Source Description +# >>> localhost 1 localhost lxd 0 built-in LXD Container Hypervisor +# >>> microk8s 1 localhost k8s 1 built-in A Kubernetes Cluster +# (If for any reason this doesn't happen, you can register it manually using 'juju add-k8s microk8s'.) + +# Replace with 'microk8s' or 'localhost' +# to bootstrap a Juju controller into your MicroK8s / LXD cloud. +# We'll name our controller "my-controller". +juju bootstrap my-controller + +# Create a workspace, or 'model', on this controller. +# We'll call ours "my-model". +# (In Kubernetes this corresponds to a namespace "my-model".) +juju add-model my-model + +# Check status: +juju status +# >>> Model Controller Cloud/Region Version SLA Timestamp +# >>> dev-model tutorial-controller microk8s/localhost 3.0.2 unsupported 16:05:03+01:00 + +# >>> Model "admin/dev-model" is empty. + +# There's your charm model! + +``` + +

Tear down manually

+ +1. Tear down Juju: + +```text +# Destroy any models you've created: +$ juju destroy-model my-model + +# Destroy any controllers you've created: +$ juju destroy-controller my-controller + +# Uninstall juju. For example: +$ sudo snap remove juju +``` + +Tear down the MicroK8s cloud: + +```text +# Reset Microk8s: +$ sudo microk8s reset + +# Uninstall Microk8s: +$ sudo snap remove microk8s + +# Remove your user from the snap_microk8s group: +$ sudo gpasswd -d $USER snap_microk8s +``` + +
**Contributors:** @deezzir, @gbeuzeboc , @gzanchi, @ismailkayi, @jnsgruk , @kos.tsakalozos , @rbarry , @facundo , @saviq , @sed-i , @shrishtikarkera, @tmihoc , @zxhdaze
+ +------------------------- + +zxhdaze | 2024-04-11 19:00:08 UTC | #2 + +Hi! I've just added some instructions for tearing down manually, please let me know if there is any doc bug. + +------------------------- + +tmihoc | 2024-04-15 12:17:11 UTC | #3 + +@zxhdaze I've reordered the steps (tear down juju, then microk8s), but other than that it was good -- thanks! + +------------------------- + +shrishtikarkera | 2024-05-23 21:23:46 UTC | #4 + +I found a typo in the documentation. So I would like to kindly report it. +In the "4. (Optional:)" section, the heading following it- "Verify that the VM has indeed come pre-equipped with you’ll need" should be "all you need" in the end of the statement. + +Thanks! + +------------------------- + +deezzir | 2024-08-29 20:31:25 UTC | #5 + +Another typo in the doc at the end of [Set up Automatically section](https://juju.is/docs/juju/set-up--tear-down-your-test-environment#heading--set-up-automatically): + +- Going forward: + - Use the Multipass VM shell to run all commands. +- At any point: + - To exit the shell, press mod key + C or type exit. + - To stop the VM after exiting the VM shell, run multipass stop charm-dev-vm. + - To restart the VM and re-open a shell into it, type multipass shell charm-dev-vm. + +The block refers to a **charm-dev-vm**, while the actual name of the VM created in the tutorial with multipass is **my-juju-vm**. Thanks + +------------------------- + +tmihoc | 2024-08-30 06:56:49 UTC | #6 + +Fixed, thanks! (Also added you to the list of contributors at the end of the doc.) + +------------------------- + +tmihoc | 2024-08-30 06:58:00 UTC | #7 + +Fixed, thanks! (Also added you to the list of contributors at the end of the doc.) + +------------------------- + diff --git a/tmp/t/13541.md b/tmp/t/13541.md new file mode 100644 index 000000000..96b1bfff0 --- /dev/null +++ b/tmp/t/13541.md @@ -0,0 +1,50 @@ +tony-meyer | 2024-03-20 13:57:47 UTC | #1 + +Deferring an event is a common pattern, and when used appropriately is a convenient tool for charmers. However, there are limitations to `defer()` - in particular, that the charm has no way to specify when the handler will be re-run, and that event ordering and context move away from the expected pattern. Our advice is that `defer()` is a good solution for some problems, but is best avoided for others. + +# Good: retrying on temporary failure + +If the charm encounters a temporary failure (such as working with a container or an external API), and expects that the failure may be very short lived, our recommendation is to retry several times for up to a second. If the failure continues, but the charm still expects that it will be resolved without any intervention from a human, then deferring the event handler is often a good choice - along with placing the unit or app in waiting status. + +Note that it’s important to consider that when the deferred handler is run again, the Juju context may not be exactly the same as it was when the event was first emitted, so the charm code needs to be aware of this. + +If the temporary failure is because the workload is busy, and the charm is deployed to a Kubernetes sidecar controller, you might be able to avoid the defer using a [Pebble custom notice](https://juju.is/docs/sdk/interact-with-pebble#heading--use-custom-notices-from-the-workload-container). For example, if the code can’t continue because the workload is currently restarting, if you can have a post-completion hook for the restart that executes `pebble notify`, then you can ensure that the charm is ‘woken up’ at the right time to handle the work. + +In the future, we hope to see a Juju ‘request re-emit event’ feature that will let the charm tell Juju when it expects the problem to be resolved. + +# Reconsider: sequencing + +There are some situations where sequencing of units needs to be arranged - for example, to restart replicas before a primary is restarted. Deferring a handler can be used to manage this situation. However, sequencing can also be arranged using a peer relation, and there’s a convenient [rolling-ops charm lib](https://github.com/canonical/charm-rolling-ops) that implements this for you, and we recommend using that approach first. + +Using a peer relation to orchestrate the rolling operation allows for more fine-grained control than a simple defer, and avoids the issue of not having control over when the deferred handler will be re-run. + +# Reconsider: waiting for a collection of events + +It’s common for charms to need a collection of information in order to configure the application (for example, to write a configuration file). For example, the configuration might require a user-set config value, a secret provided by a relation, and a Kubernetes sidecar container to be ready. + +Rather than having the handlers for each of these events (`config-changed`, `secret-changed` and/or `relation-changed`, `pebble-ready`) defer if other parts of the configuration are not yet available, it’s best to have the charm observe all three events and set the unit or app state to waiting, maintenance, or blocked status (or have the `collect-status` handler do this) and return. When the last piece of information is available, the handler that notifies the charm of that will complete the work. This is commonly called the "holistic" event handling pattern. + +Avoiding defer means that there isn’t a queue of deferred handlers that all do the same work - for example, if `config-changed`, `relation-changed`, and `pebble-ready` were all deferred then when they were all ready, they would all run successfully. This is particularly important when the work is expensive - such as an application restart after writing the configuration, so should not be done unnecessarily. + +# Ok: waiting without expecting a follow-up event + +In some situations, the charm is waiting for a system to be ready, but it’s not one that will trigger a Juju event (as in the case above). For example, the charm might need the workload application to be fully started up, and that might happen after all of the initial start, `config-changed`, `relation-joined`, `pebble-ready`, etc events. + +Deferring the work here is ok, but it’s important to consider the delay between deferring the event and its eventual re-emitting - it’s not safe to assume that this will be a small period of time, unless you know that another event can be expected. + +For a Kubernetes charm, If the charm is waiting on the workload and it’s possible to have the workload execute a command when it’s ready, then using a [Pebble custom notice](https://juju.is/docs/sdk/interact-with-pebble#heading--use-custom-notices-from-the-workload-container) is much better than deferring. This then becomes another example of “waiting for a collection of events”, described above. + +# Not Possible: actions, shutting down, framework generated events, secrets + +In some situations, it’s not possible to defer an event, and attempting to do so will raise a `RuntimeError`. + +In some cases, this is because the events are run with every Juju hook event, such as `pre-commit`, `commit`, and `update-status`. In others, it’s because Juju provides a built-in retry mechanism, such as `secret-expired` and `secret-rotate`. + +With actions, there’s an expectation that the action either succeeds or fails immediately, and there are mechanisms for communicating directly with the user that initiated the action (`event.log` and `event.set_results`). This means that deferring an action event doesn’t make sense. + +Finally, when doing cleanup during the shutdown phase of a charm’s lifecycle, deferring isn’t practical with the current implementation, where it’s tied to future events. For `remove`, for example, the unit will no longer exist after the event, so there will not be any future events that can trigger the deferred one - if there’s work that has to be done before the unit is gone, then you’ll need to enter an error state instead. The stop event is followed by remove, and possibly a few other events, but likewise has few chances to be re-emitted. + +Note that all deferred events vanish when the unit is removed, so the charm code needs to take this into consideration. + +------------------------- + diff --git a/tmp/t/13557.md b/tmp/t/13557.md new file mode 100644 index 000000000..bb56c6e59 --- /dev/null +++ b/tmp/t/13557.md @@ -0,0 +1,72 @@ +lengau | 2024-07-30 06:03:32 UTC | #1 + +> Starting with Charmcraft 3+ + +## Usage: +```text +charmcraft remote-build [options] +``` + +## Summary: + +Command remote-build sends the current project to be built remotely. After the build is complete, packages for each architecture are retrieved and will be available in the local filesystem. + +Interrupted remote builds can be resumed using the `--recover` option, followed by the build number informed when the remote build was originally dispatched. The current state of the remote build for each architecture can be checked using the `--status` option. + +To set a timeout on the remote-build command, use the option ``--launchpad-timeout=``. The timeout is local, so the build on launchpad will continue even if the local instance is interrupted or times out. + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `--recover` | recover an interrupted build | +| `--launchpad-accept-public-upload` | acknowledge that uploaded code will be publicly available. | +| `--launchpad-timeout` | Time in seconds to wait for Launchpad to build. | + + +## See also: +- [`pack`](/t/6129) + +------------------------- + +samuel_allan | 2024-07-26 02:50:12 UTC | #2 + +Which versions of charmcraft support this? latest/stable reports no such command: + +``` +$ snap list | grep charmcraft +charmcraft 2.7.1 4162 latest/stable canonical** classic + +$ charmcraft version +2.7.1 + +$ charmcraft remote-build +Usage: charmcraft [options] command [args]... +Try 'charmcraft -h' for help. + +Error: no such command 'remote-build', maybe you meant 'promote-bundle' +``` + +------------------------- + +lengau | 2024-07-29 14:32:26 UTC | #3 + +This is only included in Charmcraft 3+ + +------------------------- + +samuel_allan | 2024-07-29 23:30:30 UTC | #4 + +Thanks. Could this version compatibility be added to the doc please? Otherwise it can be misleading. + +------------------------- + +tmihoc | 2024-07-30 06:04:01 UTC | #5 + +Added, thanks! + +------------------------- + diff --git a/tmp/t/13778.md b/tmp/t/13778.md new file mode 100644 index 000000000..4e96f679a --- /dev/null +++ b/tmp/t/13778.md @@ -0,0 +1,37 @@ +tmihoc | 2024-04-04 14:24:33 UTC | #1 + +## Usage: +```text +charmcraft build [options] +``` + +## Summary: + +Build artifacts defined for a part. If part names are specified only those parts will be built, otherwise all parts will be built. + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | +| `--destructive-mode` | Build in the current host | +| `--use-lxd` | Build in a LXD container. | +| `--shell` | Shell into the environment in lieu of the step to run. | +| `--shell-after` | Shell into the environment after the step has run. | +| `--debug` | Shell into the environment if the build fails. | +| `--platform` | Set platform to build for | +| `--build-for` | Set architecture to build for | + +## See also: +- `clean` +- `pack` +- `prime` +- `pull` +- `remote-build` +- `stage` + +------------------------- + diff --git a/tmp/t/13779.md b/tmp/t/13779.md new file mode 100644 index 000000000..bc9997719 --- /dev/null +++ b/tmp/t/13779.md @@ -0,0 +1,28 @@ +tmihoc | 2024-04-04 14:25:21 UTC | #1 + +## Usage: +```text +charmcraft expand-extensions [options] +``` + +## Summary: + +Expand charmcraft.yaml using the extensions specified in the file and output the resulting configuration to the terminal. + +This allows you to see how the extensions used modify your existing charmcraft.yaml file. + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | + +## See also: +- `extensions` +- `list-extensions` + +------------------------- + diff --git a/tmp/t/13780.md b/tmp/t/13780.md new file mode 100644 index 000000000..1c7e886c5 --- /dev/null +++ b/tmp/t/13780.md @@ -0,0 +1,27 @@ +tmihoc | 2024-04-04 14:25:48 UTC | #1 + +## Usage: +```text +charmcraft list-extensions [options] +``` + +## Summary: + +List available extensions and their corresponding bases. + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | +| `--format` | Produce the result in the specified format (currently only 'json') | + +## See also: +- `expand-extensions` +- `extensions` + +------------------------- + diff --git a/tmp/t/13781.md b/tmp/t/13781.md new file mode 100644 index 000000000..cd12ee1d6 --- /dev/null +++ b/tmp/t/13781.md @@ -0,0 +1,37 @@ +tmihoc | 2024-04-04 14:26:47 UTC | #1 + +## Usage: +```text +charmcraft pull [options] +``` + +## Summary: + +Download or retrieve artifacts defined for a part. If part names are specified only those parts will be pulled, otherwise all parts will be pulled. + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | +| `--destructive-mode` | Build in the current host | +| `--use-lxd` | Build in a LXD container. | +| `--shell` | Shell into the environment in lieu of the step to run. | +| `--shell-after` | Shell into the environment after the step has run. | +| `--debug` | Shell into the environment if the build fails. | +| `--platform` | Set platform to build for | +| `--build-for` | Set architecture to build for | + +## See also: +- `build` +- `clean` +- `pack` +- `prime` +- `remote-build` +- `stage` + +------------------------- + diff --git a/tmp/t/13782.md b/tmp/t/13782.md new file mode 100644 index 000000000..8d01c786e --- /dev/null +++ b/tmp/t/13782.md @@ -0,0 +1,37 @@ +tmihoc | 2024-04-04 14:27:11 UTC | #1 + +## Usage: +```text +charmcraft prime [options] +``` + +## Summary: + +Prepare the final payload to be packed, performing additional processing and adding metadata files. If part names are specified only those parts will be primed. The default is to prime all parts. + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | +| `--destructive-mode` | Build in the current host | +| `--use-lxd` | Build in a LXD container. | +| `--shell` | Shell into the environment in lieu of the step to run. | +| `--shell-after` | Shell into the environment after the step has run. | +| `--debug` | Shell into the environment if the build fails. | +| `--platform` | Set platform to build for | +| `--build-for` | Set architecture to build for | + +## See also: +- `build` +- `clean` +- `pack` +- `pull` +- `remote-build` +- `stage` + +------------------------- + diff --git a/tmp/t/13783.md b/tmp/t/13783.md new file mode 100644 index 000000000..6d5c61989 --- /dev/null +++ b/tmp/t/13783.md @@ -0,0 +1,39 @@ +tmihoc | 2024-04-04 14:27:48 UTC | #1 + +## Usage: +```text +charmcraft promote-bundle [options] +``` + +## Summary: + +Promote a bundle to another channel in the Store. + +This command must be run from the bundle project directory to be promoted. + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | +| `--from-channel` | The channel from which to promote the bundle | +| `--to-channel` | The target channel for the promoted bundle | +| `--output-bundle` | A path where the created bundle.yaml file can be written | +| `--exclude` | Any charms to exclude from the promotion process | + +## See also: +- `close` +- `release` +- `resource-revisions` +- `resources` +- `revisions` +- `set-resource-architectures` +- `status` +- `upload` +- `upload-resource` + +------------------------- + diff --git a/tmp/t/13784.md b/tmp/t/13784.md new file mode 100644 index 000000000..aa77fff42 --- /dev/null +++ b/tmp/t/13784.md @@ -0,0 +1,37 @@ +tmihoc | 2024-04-04 14:29:36 UTC | #1 + +## Usage: +```text +charmcraft stage [options] +``` + +## Summary: + +Stage built artifacts into a common staging area. If part names are specified only those parts will be staged. The default is to stage all parts. + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | +| `--destructive-mode` | Build in the current host | +| `--use-lxd` | Build in a LXD container. | +| `--shell` | Shell into the environment in lieu of the step to run. | +| `--shell-after` | Shell into the environment after the step has run. | +| `--debug` | Shell into the environment if the build fails. | +| `--platform` | Set platform to build for | +| `--build-for` | Set architecture to build for | + +## See also: +- `build` +- `clean` +- `pack` +- `prime` +- `pull` +- `remote-build` + +------------------------- + diff --git a/tmp/t/13785.md b/tmp/t/13785.md new file mode 100644 index 000000000..85218a656 --- /dev/null +++ b/tmp/t/13785.md @@ -0,0 +1,40 @@ +tmihoc | 2024-04-04 14:32:50 UTC | #1 + +## Usage: +```text +charmcraft unregister [options] +``` + +## Summary: + +Unregister a name in the Store. + +Unregister a name from Charmhub if no revisions have been uploaded. + +A package cannot be unregistered if something has been uploaded to the name. This command is only for unregistering names that have never been used. Unregistering must be done by the publisher. Attempting to unregister a charm or bundle as a collaborator will fail. + +We discuss registrations on Charmhub's Discourse: + +```text +https://discourse.charmhub.io/c/charm +``` + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | + +## See also: +- `login` +- `logout` +- `names` +- `register` +- `register-bundle` +- `whoami` + +------------------------- + diff --git a/tmp/t/13788.md b/tmp/t/13788.md new file mode 100644 index 000000000..18e282007 --- /dev/null +++ b/tmp/t/13788.md @@ -0,0 +1,214 @@ +tmihoc | 2024-08-06 13:49:50 UTC | #1 + +> See also: [Extension](/t/15011) + +**Contents:** + +- [View all the available extensions](#heading--view-all-the-available-extensions) +- [View details about the extension in use](#heading--view-details-about-the-extension-in-use) + +

View all the available extensions

+ +To view all the available Rockcraft / Charmcraft extensions, run the `rockcraft list-extensions` / `charmcraft list-extensions` command. Sample session: + +```text +$ charmcraft list-extensions +Extension name Supported bases Experimental bases +---------------- ----------------- -------------------- +flask-framework ubuntu@22.04 +``` + +> See more: [Rockcraft | `rockcraft list-extensions`](https://canonical-rockcraft.readthedocs-hosted.com/en/latest/reference/commands/list-extensions/), [`charmcraft list-extensions`](/t/13780) + +

View details about the extension in use

+ +Suppose you've initialised a rock / charm with a profile that comes with an extension (currently, `flask-framework`), and your `rockcraft.yaml` / `charmcraft.yaml > extensions` lists this extension. + +---- +[details=See sample context] + +```text +$ mkdir my-flask-app-k8s +$ cd my-flask-app-k8s/ +$ charmcraft init --profile flask-framework +Charmed operator package file and directory tree initialised. + +Now edit the following package files to provide fundamental charm metadata +and other information: + +charmcraft.yaml +src/charm.py +README.md + +user@ubuntu:~/my-flask-app-k8s$ ls -R +.: +charmcraft.yaml requirements.txt src + +./src: +charm.py + +$ cat charmcraft.yaml +# This file configures Charmcraft. +# See https://juju.is/docs/sdk/charmcraft-config for guidance. + +name: my-flask-app-k8s + +type: charm + +bases: + - build-on: + - name: ubuntu + channel: "22.04" + run-on: + - name: ubuntu + channel: "22.04" + +# (Required) +summary: A very short one-line summary of the flask application. + +# (Required) +description: | + A comprehensive overview of your Flask application. + +extensions: + - flask-framework + +# Uncomment the integrations used by your application +# requires: +# mysql: +# interface: mysql_client +# limit: 1 +# postgresql: +# interface: postgresql_client +# limit: 1 + + +``` +[/details] +----- + +To view details about what that extension is adding to your rock / charm, set the `ROCKCRAFT_ENABLE_EXPERIMENTAL_EXTENSIONS` / `CHARMCRAFT_ENABLE_EXPERIMENTAL_EXTENSIONS` to `1`, then run the `rockcraft expand-extensions` / `charmcraft expand-extensions` command. For example: + + +```text +CHARMCRAFT_ENABLE_EXPERIMENTAL_EXTENSIONS=1 charmcraft expand-extensions +``` + +---- +[details=See effect given sample context] +``` +$ CHARMCRAFT_ENABLE_EXPERIMENTAL_EXTENSIONS=1 charmcraft expand-extensions +*EXPERIMENTAL* extension 'flask-framework' enabled +name: my-flask-app-k8s +summary: A very short one-line summary of the flask application. +description: | + A comprehensive overview of your Flask application. +parts: + charm: + source: . + charm-entrypoint: src/charm.py + charm-binary-python-packages: [] + charm-python-packages: [] + charm-requirements: + - requirements.txt + charm-strict-dependencies: false + plugin: charm +type: charm +bases: +- build-on: + - name: ubuntu + channel: '22.04' + run-on: + - name: ubuntu + channel: '22.04' +actions: + rotate-secret-key: + description: Rotate the flask secret key. Users will be forced to log in again. + This might be useful if a security breach occurs. +assumes: +- k8s-api +containers: + flask-app: + resource: flask-app-image +peers: + secret-storage: + interface: secret-storage +provides: + metrics-endpoint: + interface: prometheus_scrape + grafana-dashboard: + interface: grafana_dashboard +requires: + logging: + interface: loki_push_api + ingress: + interface: ingress + limit: 1 +resources: + flask-app-image: + type: oci-image + description: flask application image. +config: + options: + webserver-keepalive: + type: int + description: Time in seconds for webserver to wait for requests on a Keep-Alive + connection. + webserver-threads: + type: int + description: Run each webserver worker with the specified number of threads. + webserver-timeout: + type: int + description: Time in seconds to kill and restart silent webserver workers. + webserver-workers: + type: int + description: The number of webserver worker processes for handling requests. + flask-application-root: + type: string + description: Path in which the application / web server is mounted. This configuration + will set the FLASK_APPLICATION_ROOT environment variable. Run `app.config.from_prefixed_env()` + in your Flask application in order to receive this configuration. + flask-debug: + type: boolean + description: Whether Flask debug mode is enabled. + flask-env: + type: string + description: What environment the Flask app is running in, by default it's 'production'. + flask-permanent-session-lifetime: + type: int + description: Time in seconds for the cookie to expire in the Flask application + permanent sessions. This configuration will set the FLASK_PERMANENT_SESSION_LIFETIME + environment variable. Run `app.config.from_prefixed_env()` in your Flask application + in order to receive this configuration. + flask-preferred-url-scheme: + type: string + default: HTTPS + description: Scheme for generating external URLs when not in a request context + in the Flask application. By default, it's "HTTPS". This configuration will + set the FLASK_PREFERRED_URL_SCHEME environment variable. Run `app.config.from_prefixed_env()` + in your Flask application in order to receive this configuration. + flask-secret-key: + type: string + description: The secret key used for securely signing the session cookie and + for any other security related needs by your Flask application. This configuration + will set the FLASK_SECRET_KEY environment variable. Run `app.config.from_prefixed_env()` + in your Flask application in order to receive this configuration. + flask-session-cookie-secure: + type: boolean + description: Set the secure attribute in the Flask application cookies. This + configuration will set the FLASK_SESSION_COOKIE_SECURE environment variable. + Run `app.config.from_prefixed_env()` in your Flask application in order to + receive this configuration. + +``` +[/details] +----- + +> See more: [`rockcraft expand-extensions`](https://canonical-rockcraft.readthedocs-hosted.com/en/latest/reference/commands/expand-extensions/), [`charmcraft expand-extensions`](/t/13779) + +
+ +> **Contributors:** @lengau , @tmihoc + +------------------------- + diff --git a/tmp/t/13789.md b/tmp/t/13789.md new file mode 100644 index 000000000..ed0c45e5f --- /dev/null +++ b/tmp/t/13789.md @@ -0,0 +1,117 @@ +jose | 2024-06-17 20:31:35 UTC | #1 + + +**Table of Contents** + +- [Single-node MicroCeph installation](#single-node-microceph-installation) + - [Install MicroCeph](#install-microceph) + - [Add storage](#add-storage) + - [Connect MicroCeph to MicroK8s](#connect-microceph-to-microk8s) + + + + +[MicroCeph](https://canonical-microceph.readthedocs-hosted.com/) is a lightweight way of deploying a Ceph cluster with a focus on reduced ops that can be used with MicroK8s. + + +# Single-node MicroCeph installation + +## Install MicroCeph + + +Install the most recent stable release of MicroCeph: + +```shell +sudo snap install microceph +``` + +Next, as [MicroCeph creator recommend](https://canonical-microceph.readthedocs-hosted.com/en/latest/tutorial/single-node/#install-the-software), prevent the software from being auto-updated: + +```shell +sudo snap refresh --hold microceph +``` + + +> :warning: Allowing the snap to be auto-updated can lead to unintended consequences. In enterprise environments especially, it is better to research the ramifications of software changes before those changes are implemented. + +We need to bootstrap the Ceph cluster: + +```shell +sudo microceph cluster bootstrap +``` + + +At this point we can check the status of the cluster and query the list of available disks that should be empty. The disk status is queried with: +``` +sudo microceph.ceph status +``` + +Its output should look like: +``` + cluster: + id: 9539a8ee-825a-462a-94fa-15613c09cab1 + health: HEALTH_WARN + mon charm-dev-juju-34 is low on available space + + services: + mon: 1 daemons, quorum charm-dev-juju-34 (age 8s) + mgr: charm-dev-juju-34(active, since 3s) + osd: 0 osds: 0 up, 0 in + + data: + pools: 0 pools, 0 pgs + objects: 0 objects, 0 B + usage: 0 B used, 0 B / 0 B avail + pgs: +``` + + +## Add storage + + +Three OSDs will be required to form a minimal Ceph cluster. In a production system, typically we would [assign a physical block device to an OSD](https://canonical-microceph.readthedocs-hosted.com/en/latest/tutorial/multi-node/#add-storage). However for this tutorial, we will make use of file backed OSDs for simplicity. + +Add the three file-backed OSDs to the cluster by using the disk add command. In the example, three 4GiB files are being created: + +```shell +sudo microceph disk add loop,4G,3 +``` + +## Connect MicroCeph to MicroK8s + +Enable `rook-ceph` plugin by executing: + +```shell +sudo microk8s enable rook-ceph +``` + +> See more: [rook-ceph addon](https://microk8s.io/docs/addon-rook-ceph) + +As we have already setup MicroCeph having it managed by rook is done with just: + +```shell +sudo microk8s connect-external-ceph +``` + +At the end of this process you should have a storage class ready to use: + +```shell +NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE +ceph-rbd rook-ceph.rbd.csi.ceph.com Delete Immediate true 1s +``` + +## See also +- [Charmed Ceph](https://ubuntu.com/ceph/docs) +- [Charmed Microceph](https://charmhub.io/microceph) + +## References +- [How to setup MicroK8s with (Micro)Ceph storage](https://microk8s.io/docs/how-to-ceph) + +------------------------- + +jose | 2024-04-04 19:44:21 UTC | #2 + + + +------------------------- + diff --git a/tmp/t/13899.md b/tmp/t/13899.md new file mode 100644 index 000000000..cbaf9c2ac --- /dev/null +++ b/tmp/t/13899.md @@ -0,0 +1,547 @@ +tmihoc | 2024-10-29 09:36:30 UTC | #1 + +Imagine you have a Flask application backed up by a database such as PostgreSQL and need to deploy it. In a traditional setup, this can be quite a challenge, but with Juju you’ll find yourself deploying, configuring, scaling, integrating, monitoring, etc., your Flask application in no time. Let’s get started! + +In this tutorial we will build a rock and Kubernetes charm for a Flask application using the charm SDK, so we can have a Flask application up and running with Juju in about 90 minutes. + +[note type=information status] + +:open_book: **rock**
An Ubuntu LTS-based OCI compatible container image designed to meet security, stability, and reliability requirements for cloud-native software. + +:open_book: **charm**
+ +A package consisting of YAML files + Python code that will automate every aspect of an application's lifecycle so it can be easily orchestrated with Juju. + +:open_book: **Juju**
An orchestration engine for software operators that enables the deployment, integration and lifecycle management of applications using charms. +[/note] + +**What you’ll need:** + +- A workstation, e.g., a laptop, with amd64 or arm64 architecture which has sufficient resources to launch a virtual machine with 4 CPUs, 4 GB RAM, and a 50 GB disk +- Familiarity with Linux + +**What you’ll do:** + +- [Set things up](#heading--set-things-up) +- [Create the Flask application](#heading--create-the-flask-application) +- [Run the Flask application locally](#heading--run-the-flask-application-locally) +- [Pack the Flask application into a rock](#heading--pack-the-flask-application-into-a-rock) +- [Create the charm](#heading--create-the-charm) +- [Deploy the Flask application](#heading--deploy-the-flask-application) +- [Enable `juju config flask-hello-world greeting=`](#heading--enable-juju-config-flask-hello-world-greetinggreeting) +- [Integrate with a database](#heading--integrate-with-a-database) +- [Expose the app using ingress](#heading--expose-the-app-using-ingress) +- [Clean up environment](#heading--clean-up-environment) + +[note type=positive status="At any point, to give feedback or ask for help"] +Don't hesitate to get in touch on [Matrix](https://matrix.to/#/#12-factor-charms:ubuntu.com) or [Discourse](https://discourse.charmhub.io/) (or follow the "Help improve this document in the forum" on the bottom of this doc to comment directly on the doc). +[/note] + + +

Set things up

+ +Install Multipass. + +> See more: [Multipass | How to install Multipass](https://multipass.run/docs/install-multipass) + +Use Multipass to launch an Ubuntu VM with the name `charm-dev` from the 24.04 blueprint: + +```bash +multipass launch --cpus 4 --disk 50G --memory 4G --name charm-dev 24.04 +``` + +Once the VM is up, open a shell into it: + +```bash +multipass shell charm-dev +``` + +In order to create the rock, you'll need to install Rockcraft: + +```bash +sudo snap install rockcraft --classic +``` + +`LXD` will be required for building the rock. Make sure it is installed and initialised: + +```bash +sudo snap install lxd +lxd init --auto +``` + +In order to create the charm, you'll need to install Charmcraft: + +```bash +sudo snap install charmcraft --channel latest/edge --classic +``` + +[note type=information status] +This tutorial requires version `3.0.0` or later of Charmcraft. Check the version of Charmcraft using `charmcraft --version` If you have an older version of Charmcraft installed, use `sudo snap refresh charmcraft --channel latest/edge` to get the latest edge version of Charmcraft. +[/note] + +MicroK8s is required to deploy the Flask application on Kubernetes. Install MicroK8s: + +```bash +sudo snap install microk8s --channel 1.31-strict/stable +sudo adduser $USER snap_microk8s +newgrp snap_microk8s +``` + +Wait for MicroK8s to be ready using `sudo microk8s status --wait-ready`. Several MicroK8s add-ons are required for deployment: + +```bash +sudo microk8s enable hostpath-storage +# Required to host the OCI image of the Flask application +sudo microk8s enable registry +# Required to expose the Flask application +sudo microk8s enable ingress +``` + +Juju is required to deploy the Flask application. Install Juju and bootstrap a development controller: + +```bash +sudo snap install juju --channel 3.5/stable +mkdir -p ~/.local/share +juju bootstrap microk8s dev-controller +``` + +Finally, create a new directory for this tutorial and go inside it: + +```bash +mkdir flask-hello-world +cd flask-hello-world +``` + +

Create the Flask application

+ +Start by creating the "Hello, world" Flask application that will be used for this tutorial. + +Create a `requirements.txt` file, copy the following text into it and then save it: + +``` +Flask +``` + +In the same directory, copy and save the following into a text file called `app.py`: + +```python +import flask + +app = flask.Flask(__name__) + +@app.route("/") +def index(): + return "Hello, world!\n" + +if __name__ == "__main__": + app.run() +``` + +

Run the Flask application locally

+ +Install `python3-venv` and create a virtual environment: + +```bash +sudo apt-get update && sudo apt-get install python3-venv -y +python3 -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt +``` + +Now that we have a virtual environment with all the dependencies, let's run the Flask application to verify that it works: + +```bash +flask run -p 8000 +``` + +Test the Flask application by using `curl` to send a request to the root endpoint. You may need a new terminal for this; if you are using Multipass use `multipass shell charm-dev` to get another terminal: + +```bash +curl localhost:8000 +``` + +The Flask application should respond with `Hello, world!`. The Flask application looks good, so we can stop for now using Ctrl + c. + +

Pack the Flask application into a rock

+ +First, we'll need a `rockcraft.yaml` file. Rockcraft will automate its creation and tailoring for a Flask application by using the `flask-framework` profile: + +```bash +rockcraft init --profile flask-framework +``` + +The `rockcraft.yaml` file will automatically be created and set the name based on your working directory. Open the file in a text editor and check that the `name` is `flask-hello-world`. Ensure that `platforms` includes the architecture of your host. For example, if your host uses the ARM architecture, include `arm64` in `platforms`. + +[note type=information status] +For this tutorial, we'll use the `name` "flask-hello-world" and assume you are on the `amd64` platform. Check the architecture of your system using `dpkg --print-architecture`. Choosing a different name or running on a different platform will influence the names of the files generated by Rockcraft. +[/note] + +Pack the rock: + +```bash +rockcraft pack +``` + +[note type=information status] +Depending on your system and network, this step can take a couple of minutes to finish. +[/note] + +Once Rockcraft has finished packing the Flask rock, you'll find a new file in your working directory with the `.rock` extension: + +```bash +ls *.rock -l +``` + +[note type=information status] +If you changed the `name` or `version` in `rockcraft.yaml` or are not on an `amd64` platform, the name of the `.rock` file will be different for you. +[/note] + +The rock needs to be copied to the Microk8s registry so that it can be deployed in the Kubernetes cluster: + +```bash +rockcraft.skopeo --insecure-policy copy --dest-tls-verify=false \ + oci-archive:flask-hello-world_0.1_amd64.rock \ + docker://localhost:32000/flask-hello-world:0.1 +``` + +> See more: [skopeo^](https://manpages.ubuntu.com/manpages/jammy/man1/skopeo.1.html) + +

Create the charm

+ +Create a new directory for the charm and go inside it: + +```bash +mkdir charm +cd charm +``` + +We'll need a `charmcraft.yaml`, `requirements.txt` and source code for the charm. The source code contains the logic required to operate the Flask application. Charmcraft will automate the creation of these files by using the `flask-framework` profile: + +```bash +charmcraft init --profile flask-framework --name flask-hello-world +``` + +The files will automatically be created in your working directory. Pack the charm: + +```bash +charmcraft pack +``` + +[note type=information status] +Depending on your system and network, this step can take a couple of minutes to finish. +[/note] + +Once Charmcraft has finished packing the charm, you'll find a new file in your working directory with the `.charm` extension: + +```bash +ls *.charm -l +``` + +[note type=information status] +If you changed the name in charmcraft.yaml or are not on the amd64 platform, the name of the `.charm` file will be different for you. +[/note] + +

Deploy the Flask application

+ +A Juju model is needed to deploy the application. Let's create a new model: + +```bash +juju add-model flask-hello-world +``` + +[note type=information status] +If you are not on a host with the amd64 architecture, you will need to include a constraint to the Juju model to specify your architecture. For example, for the arm64 architecture, use `juju set-model-constraints -m flask-hello-world arch=arm64`. Check the architecture of your system using `dpkg --print-architecture`. +[/note] + +Now the Flask application can be deployed using [Juju](/t/1087): + +```bash +juju deploy ./flask-hello-world_ubuntu-22.04-amd64.charm \ + flask-hello-world --resource \ + flask-app-image=localhost:32000/flask-hello-world:0.1 +``` + +[note type=information status] +It will take a few minutes to deploy the Flask application. You can monitor the progress using `juju status --watch 5s`. Once the status of the App has gone to `active`, you can stop watching using ctrl + c. + +> See more: [Command 'juju status'](/t/10173) +[/note] + +The Flask application should now be running. We can monitor the status of the deployment using `juju status` which should be similar to the following output: + +``` +Model Controller Cloud/Region Version SLA Timestamp +flask-hello-world dev-controller microk8s/localhost 3.1.8 unsupported 17:04:11+10:00 + +App Version Status Scale Charm Channel Rev Address Exposed Message +flask-hello-world active 1 flask-hello-world 0 10.152.183.166 no + +Unit Workload Agent Address Ports Message +flask-hello-world/0* active idle 10.1.87.213 +``` + +The deployment is finished when the status shows `active`. Let's expose the application using ingress. Deploy the `nginx-ingress-integrator` charm and integrate it with the Flask app: + +```bash +juju deploy nginx-ingress-integrator --channel=latest/edge +juju integrate nginx-ingress-integrator flask-hello-world +``` + +The hostname of the app needs to be defined so that it is accessible via the ingress. We will also set the default route to be the root endpoint: + +```bash +juju config nginx-ingress-integrator \ + service-hostname=flask-hello-world path-routes=/ +``` + +Monitor `juju status` until everything has a status of `active`. Test the deployment using `curl http://flask-hello-world --resolve flask-hello-world:80:127.0.0.1` to send a request via the ingress to the root endpoint. It should still be returning the `Hello, world!` greeting. + +[note type=information status] +The `--resolve flask-hello-world:80:127.0.0.1` option to the `curl` command is a way of resolving the hostname of the request without setting a DNS record. +[/note] + +

Configure the Flask application

+ +Now let's customise the greeting using a configuration option. We will expect this configuration option to be available in the Flask app configuration under the keyword `GREETING`. Go back out to the root directory of the project using `cd ..` and copy the following code into `app.py`: + +```python +import flask + +app = flask.Flask(__name__) +app.config.from_prefixed_env() + + +@app.route("/") +def index(): + greeting = app.config.get("GREETING", "Hello, world!") + return f"{greeting}\n" + + +if __name__ == "__main__": + app.run() +``` + +Open `rockcraft.yaml` and update the version to `0.2`. Run `rockcraft pack` again, then upload the new OCI image to the MicroK8s registry: + +```bash +rockcraft.skopeo --insecure-policy copy --dest-tls-verify=false \ + oci-archive:flask-hello-world_0.2_amd64.rock \ + docker://localhost:32000/flask-hello-world:0.2 +``` + +Change back into the charm directory using `cd charm`. The `flask-framework` Charmcraft extension supports adding configurations to `charmcraft.yaml` which will be passed as environment variables to the Flask application. Add the following to the end of the `charmcraft.yaml` file: + +```yaml +config: + options: + greeting: + description: | + The greeting to be returned by the Flask application. + default: "Hello, world!" + type: string +``` + +[note type=information status] +Configuration options are automatically capitalised and `-` are replaced by `_`. A `FLASK_` prefix will also be added which will let Flask identify which environment variables to include when running `app.config.from_prefixed_env()` in `app.py`. +[/note] + +Run `charmcraft pack` again. The deployment can now be refreshed to make use of the new code: + +```bash +juju refresh flask-hello-world \ + --path=./flask-hello-world_ubuntu-22.04-amd64.charm \ + --resource flask-app-image=localhost:32000/flask-hello-world:0.2 +``` + +[note type=information status] +For the refresh command, the `--constraints` option is not required if you are not running on an `amd64` host as Juju will remember the constraint for the life of the application deployment. +[/note] + +Wait for `juju status` to show that the App is `active` again. Verify that the new configuration has been added using `juju config flask-hello-world | grep -A 6 greeting:` which should show the configuration option. + +[note type=information status] +The `grep` command extracts a portion of the configuration to make it easier to check whether the configuration option has been added. +[/note] + +Using `curl http://flask-hello-world --resolve flask-hello-world:80:127.0.0.1` shows that the response is still `Hello, world!` as expected. The greeting can be changed using Juju: + +```bash +juju config flask-hello-world greeting='Hi!' +``` + +`curl http://flask-hello-world --resolve flask-hello-world:80:127.0.0.1` now returns the updated `Hi!` greeting. + +[note type=information status] +It might take a short time for the configuration to take effect. +[/note] + +

Integrate with a database

+ +Now let's keep track of how many visitors your application has received. This will require integration with a database to keep the visitor count. This will require a few changes: + +* We will need to create a database migration that creates the `visitors` table +* We will need to keep track how many times the root endpoint has been called in the database +* We will need to add a new endpoint to retrieve the number of visitors from the database + +The charm created by the `flask-framework` extension will execute the `migrate.py` script if it exists. This script should ensure that the database is initialised and ready to be used by the application. We will create a `migrate.py` file containing this logic. + +Go back out to the tutorial root directory using `cd ..`. Open the `migrate.py` file using a text editor and paste the following code into it: + +```python +import os + +import psycopg2 + + +DATABASE_URI = os.environ["POSTGRESQL_DB_CONNECT_STRING"] + + +def migrate(): + with psycopg2.connect(DATABASE_URI) as conn, conn.cursor() as cur: + cur.execute(""" + CREATE TABLE IF NOT EXISTS visitors ( + timestamp TIMESTAMP NOT NULL, + user_agent TEXT NOT NULL + ); + """) + conn.commit() + + +if __name__ == "__main__": + migrate() +``` + +[note type=information status] +The charm will pass the Database connection string in the `POSTGRESQL_DB_CONNECT_STRING` environment variable once postgres has been integrated with the charm. +[/note] + +Open the `rockcraft.yaml` file in a text editor and update the version to `0.3`. + +To be able to connect to postgresql from the Flask app the `psycopg2-binary` dependency needs to be added in `requirements.txt`. The app code also needs to be updated to keep track of the number of visitors and to include a new endpoint to retrieve the number of visitors to the app. Open `app.py` in a text editor and replace its contents with the following code: + +```python +import datetime +import os + +import flask +import psycopg2 + +app = flask.Flask(__name__) +app.config.from_prefixed_env() + +DATABASE_URI = os.environ["POSTGRESQL_DB_CONNECT_STRING"] + + +@app.route("/") +def index(): + with psycopg2.connect(DATABASE_URI) as conn, conn.cursor() as cur: + user_agent = flask.request.headers.get('User-Agent') + timestamp = datetime.datetime.now() + + cur.execute( + "INSERT INTO visitors (timestamp, user_agent) VALUES (%s, %s)", + (timestamp, user_agent) + ) + conn.commit() + + + greeting = app.config.get("GREETING", "Hello, world!") + return f"{greeting}\n" + + +@app.route("/visitors") +def visitors(): + with psycopg2.connect(DATABASE_URI) as conn, conn.cursor() as cur: + cur.execute("SELECT COUNT(*) FROM visitors") + total_visitors = cur.fetchone()[0] + + return f"{total_visitors}\n" + + +if __name__ == "__main__": + app.run() +``` + +Run `rockcraft pack` and upload the newly created rock to the MicroK8s registry: + +```bash +rockcraft.skopeo --insecure-policy copy --dest-tls-verify=false \ + oci-archive:flask-hello-world_0.3_amd64.rock \ + docker://localhost:32000/flask-hello-world:0.3 +``` + +Go back into the charm directory using `cd charm`. The Flask app now requires a database which needs to be declared in the `charmcraft.yaml` file. Open `charmcraft.yaml` in a text editor and add the following section to the end: + +```yaml +requires: + postgresql: + interface: postgresql_client + optional: false +``` + +Pack the charm using `charmcraft pack` and refresh the deployment using Juju: + +```bash +juju refresh flask-hello-world \ + --path=./flask-hello-world_ubuntu-22.04-amd64.charm \ + --resource flask-app-image=localhost:32000/flask-hello-world:0.3 +``` + +Deploy `postgresql-k8s` using Juju and integrate it with `flask-hello-world`: + +```bash +juju deploy postgresql-k8s --trust +juju integrate flask-hello-world postgresql-k8s +``` + +Wait for `juju status` to show that the App is `active` again. Running `curl http://flask-hello-world --resolve flask-hello-world:80:127.0.0.1` should still return the `Hi!` greeting. To check the total visitors, use `curl http://flask-hello-world/visitors --resolve flask-hello-world:80:127.0.0.1` which should return `1` after the previous request to the root endpoint and should be incremented each time the root endpoint is requested. If we perform another request to `curl http://flask-hello-world --resolve flask-hello-world:80:127.0.0.1`, `curl http://flask-hello-world/visitors --resolve flask-hello-world:80:127.0.0.1` will return `2`. + +

Clean up environment

+ +We've reached the end of this tutorial. We have created a Flask application, deployed it locally, exposed it via ingress and integrated it with a database! + +If you'd like to reset your working environment, you can run the following in the root directory for the tutorial: + +```bash +# exit and delete the virtual environment +deactivate +rm -rf charm .venv __pycache__ +# delete all the files created during the tutorial +rm flask-hello-world_0.1_amd64.rock flask-hello-world_0.2_amd64.rock \ + flask-hello-world_0.3_amd64.rock rockcraft.yaml app.py \ + requirements.txt migrate.py +# Remove the juju model +juju destroy-model flask-hello-world --destroy-storage +``` + +If you created an instance using Multipass, you can also clean it up. Start by exiting it: + +```bash +exit +``` + +And then you can proceed with its deletion: + +```bash +multipass delete charm-dev +multipass purge +``` + + +

Next steps

+ +| If you are wondering...| visit...| +|---------------------------------|----------------------------------------------------| +| "How do I...?" | [SDK How-to docs](/t/5521) | +| "How do I debug?" | [Charm debugging tools](/t/8047) | +| "What is...?" | [SDK Reference docs](/t/5522) | +| "Why...?", "So what?" | [SDK Explanation docs](/t/5523) | + +
+ +**Contributors:** @econley, @jdkandersson , @tmihoc, @weii-wang + +------------------------- + +------------------------- + diff --git a/tmp/t/13926.md b/tmp/t/13926.md new file mode 100644 index 000000000..cc9801d6a --- /dev/null +++ b/tmp/t/13926.md @@ -0,0 +1,258 @@ +sed-i | 2024-04-17 17:33:51 UTC | #1 + +In COS Lite, Grafana Loki is the storage and querying backend for logs. Loki is optimised for write performance (ingestion speed), at the cost of slower random reads. This means that filtering structured logs by labels is fast, but full-text search is slower. + +Log lines must be pushed into Loki, as Loki does not actively collect anything on its own. + +Charmed operators are programmed to automatically add [juju topology labels](/t/juju-topology-labels/8874) to all telemetry, including logs. This enables to differentiate telemetry and associated alerts, if you happen to have multiple deployments of the same application. + +## Send logs to Loki + +In a typical COS Lite deployment, Loki would be running in a separate model from the monitored applications. While charms can we related directly to Loki using multiple cross-model relations (CMRs), we recommend funnelling all model telemetry through regular in-model relations to grafana agent, and only one CMR from grafana agent to Loki. + +```mermaid +flowchart LR + +subgraph COS +loki[Charmed Loki] +end + +subgraph K8s model +loki-client["Loki client\n(LokiPushApiConsumer)"] ---|"logging\n(loki_push_api)"| grafana-agent-k8s +non-loki-client["Any workload + promtail\n(LogProxyConsumer)"] ---|"logging\n(loki_push_api)"| grafana-agent-k8s +end + + +grafana-agent-k8s[Charmed\ngrafana-agent] ---|"logging\n(loki_push_api)"| loki + +click grafana-agent-k8s "https://charmhub.io/grafana-agent-k8s" +click loki "https://charmhub.io/loki-k8s" + + +subgraph VM model +vm-charm[VM charm] ---|"cos-agent\n(cos_agent)"| grafana-agent[Charmed\ngrafana agent] +any-vm-charm[Any VM charm] ---|"juju-info\n(juju-info)"| grafana-agent +legacy-vm-charm[Legacy VM charm] ---|"filebeat\n(elastic-beats)"| cos-proxy +end + +grafana-agent ---|"logging\n(loki_push_api)"| loki +cos-proxy ---|"logging\n(loki_push_api))"| loki + +click grafana-agent "https://charmhub.io/grafana-agent" +click cos-proxy "https://charmhub.io/cos-proxy" +``` + +### Send logs from k8s charms +Depending on your workload, you could choose one of the following [charm libraries](https://charmhub.io/loki-k8s/libraries/loki_push_api): +- `LokiPushApiConsumer`, for workloads that can speak Loki's push api. +- `LogProxyConsumer`, which would automatically inject a promtail binary into the workload containers of interest. +- `LogForwarder`: This object can be used by any Charmed Operator which needs to send the workload standard output (stdout) through Pebble's log forwarding mechanism. + +#### Example: postgresql +[Charmed postgresql-k8s](https://charmhub.io/postgresql-k8s) is [using LogProxyConsumer](https://github.com/canonical/postgresql-k8s-operator/blob/978080424255e109c7a7c4f4d23a5b3d5aba12a6/src/charm.py#L188) to tell promtail to [collect logs from](https://github.com/canonical/postgresql-k8s-operator/blob/978080424255e109c7a7c4f4d23a5b3d5aba12a6/src/constants.py#L23): +```text +[ + "/var/log/pgbackrest/*", + "/var/log/postgresql/patroni.log", + "/var/log/postgresql/postgresql*.log", +] +``` + +When related to loki, +```yaml +bundle: kubernetes +applications: + loki: + charm: loki-k8s + channel: edge + scale: 1 + trust: true + pgsql: + charm: postgresql-k8s + channel: 14/edge + scale: 1 + trust: true +relations: +- - pgsql:logging + - loki:logging +``` + +this results in an auto-render promtail config file with three scrape jobs, one for each "filename": +```bash +$ juju ssh --container postgresql pgsql/0 cat /etc/promtail/promtail_config.yaml +``` + +```yaml +clients: +- url: http://loki-0.loki-endpoints.test.svc.cluster.local:3100/loki/api/v1/push +positions: + filename: /opt/promtail/positions.yaml +scrape_configs: +- job_name: system + static_configs: + - labels: + __path__: /var/log/pgbackrest/* + job: juju_test_6a8318db_pgsql + juju_application: pgsql + juju_charm: postgresql-k8s + juju_model: test + juju_model_uuid: 6a8318db-33e9-487b-8065-4d95bfdabbdb + juju_unit: pgsql/0 + targets: + - localhost + - labels: + __path__: /var/log/postgresql/patroni.log + job: juju_test_6a8318db_pgsql + juju_application: pgsql + juju_charm: postgresql-k8s + juju_model: test + juju_model_uuid: 6a8318db-33e9-487b-8065-4d95bfdabbdb + juju_unit: pgsql/0 + targets: + - localhost + - labels: + __path__: /var/log/postgresql/postgresql*.log + job: juju_test_6a8318db_pgsql + juju_application: pgsql + juju_charm: postgresql-k8s + juju_model: test + juju_model_uuid: 6a8318db-33e9-487b-8065-4d95bfdabbdb + juju_unit: pgsql/0 + targets: + - localhost +server: + grpc_listen_port: 9095 + http_listen_port: 9080 +``` + +### Send logs from VM (“machine”) models + +Use charmed [grafana-agent](https://charmhub.io/grafana-agent), which is a subordinate charm. +- When related over `juju-info`, it will pick up all logs from `/var/log/*` without any additional setup. +- When related over `cos-agent`, it will collect the logs specified in charm code, as well as built-in alert rules and dashboards. +#### Example: nova-compute +[nova-compute](https://charmhub.io/nova-compute) does not make use of the charm libraries provided by charmed loki, so the method of integration is over the `juju-info` interface. + +```yaml +series: jammy +applications: + agent: + charm: grafana-agent + channel: edge + nc: + charm: nova-compute + channel: yoga/stable + num_units: 1 +relations: +- - agent:juju-info + - nc:juju-info +``` + +This results in an auto-generated `grafana-agent.yaml` config file with juju topology labels and the default scrape jobs for `/var/log/**/*log` and `journalctl`: + +```bash +$ juju ssh agent/0 cat /etc/grafana-agent.yaml +``` +```yaml +integrations: + agent: + enabled: true + relabel_configs: + - regex: (.*) + replacement: juju_test_608018cd-d625-40c8-8e27-8ac7eef7d94f_agent_self-monitoring + target_label: job + - regex: (.*) + replacement: juju_f7d94f_0_lxd + target_label: instance + - replacement: grafana-agent + source_labels: + - __address__ + target_label: juju_charm + - replacement: test + source_labels: + - __address__ + target_label: juju_model + - replacement: 608018cd-d625-40c8-8e27-8ac7eef7d94f + source_labels: + - __address__ + target_label: juju_model_uuid + - replacement: agent + source_labels: + - __address__ + target_label: juju_application + - replacement: agent/0 + source_labels: + - __address__ + target_label: juju_unit + node_exporter: + # ... + prometheus_remote_write: [] +logs: + configs: + - clients: [] + name: log_file_scraper + scrape_configs: + - job_name: varlog + pipeline_stages: + - drop: + expression: .*file is a directory.* + static_configs: + - labels: + __path__: /var/log/**/*log + instance: juju_f7d94f_0_lxd + juju_application: agent + juju_model: test + juju_model_uuid: 608018cd-d625-40c8-8e27-8ac7eef7d94f + juju_unit: agent/0 + targets: + - localhost + - job_name: syslog + journal: + labels: + instance: juju_f7d94f_0_lxd + juju_application: agent + juju_model: test + juju_model_uuid: 608018cd-d625-40c8-8e27-8ac7eef7d94f + juju_unit: agent/0 + pipeline_stages: + - drop: + expression: .*file is a directory.* + positions_directory: ${SNAP_DATA}/grafana-agent-positions +metrics: + # ... +server: + log_level: info +``` +### Send logs from legacy charms +Legacy charms are charms that do not have COS relations in place, and are using older, "legacy" relations instead, such as "http", "prometheus", etc. Legacy charms relate to COS via the [cos-proxy](https://charmhub.io/cos-proxy) charm. + +### Send logs manually (no-juju solution) +You can set up [any client](https://grafana.com/docs/loki/latest/send-data/) that can speak Loki's [push api], for example: [grafana-agent snap](https://snapcraft.io/grafana-agent). + +## Inspecting log lines ingested by Loki + +### Manually querying Loki API endpoints + +You can [query loki](/t/loki-k8s-docs-http-api/13440) to obtain logs via [HTTP API](https://grafana.com/docs/loki/latest/reference/api/#query-logs-within-a-range-of-time). + +### Displaying in a grafana panel +A Loki [datasource](https://grafana.com/docs/grafana/latest/datasources/loki/) is automatically created in grafana when a relation is formed [between loki and grafana](https://charmhub.io/interfaces/grafana_datasource). + +You can visualise logs in grafana using [LogQL expressions](https://grafana.com/docs/loki/latest/query/). Grafana does not keep a copy of the Loki database. It queries loki for data, based on the `expr` in the panels. + +## Retention policy for logs in Loki + +Loki does not have a size-based retention policy. Instead, they rely on a retention period. This makes sense from operational approach standpoint, especially since the design assumption is that user would be using S3 storage. + +The default retention period is 30 days. At the moment the loki charmed operator does not support modifying this. + + +[push api]: https://grafana.com/docs/loki/latest/reference/api/ + +## References +- [Collect logs with Promtail](https://grafana.com/docs/grafana-cloud/send-data/logs/collect-logs-with-promtail/) +- [Collect logs with Grafana Agent](https://grafana.com/docs/grafana-cloud/send-data/logs/collect-logs-with-agent/) +- [Loki HTTP API][push api] + +------------------------- + diff --git a/tmp/t/13927.md b/tmp/t/13927.md new file mode 100644 index 000000000..792297347 --- /dev/null +++ b/tmp/t/13927.md @@ -0,0 +1,113 @@ +sed-i | 2024-04-15 20:41:43 UTC | #1 + +A COS Lite deployment is made up of numerous juju relation. + +For clarity and readability, the bundle topology is depicted here using several separate diagrams. Each line indicates a separate juju relation. + +## Ingress view +The workloads that make up COS Lite are servers that need to be reachable from outside the model they are deployed in. +- Grafana ("ingress-to-leader") is the main UI, amalgamating telemetry from all datasources into dashboards. +- Prometheus and Loki (both "ingress-per-unit"), ingest telemetry pushed from grafana agent from another model. +- Alertmanager ("ingress per app"), has a UI for acknowledging or silencing alerts. + +```mermaid +graph LR + +subgraph cos_lite["COS Lite"] + + alrt[Alertmanager] + click alrt "https://github.com/canonical/alertmanager-k8s-operator" + + graf[Grafana] + click graf "https://github.com/canonical/grafana-k8s-operator" + + prom[Prometheus] + click prom "https://github.com/canonical/prometheus-k8s-operator" + + loki[Loki] + click loki "https://github.com/canonical/loki-k8s-operator" + + trfk[Traefik] + click trfk "https://github.com/canonical/traefik-k8s-operator" + + ctlg[Catalogue] + click ctlg "https://github.com/canonical/catalogue-k8s-operator" + + trfk --- |ipu| loki + trfk --- |ipu| prom + trfk --- |route| graf + trfk --- |ipa| alrt + + prom --- |catalogue| ctlg + alrt --- |catalogue| ctlg + graf --- |catalogue| ctlg + +end +``` + +## Datasource view +The workloads that make up COS Lite are datasources for each other: +- Grafana queries loki, prometheus for telemetry and alertmanager for alerts. +- Promtetheus and loki evaluate alert rules and post alerts to alertmanager. + +```mermaid +graph LR + +subgraph cos_lite["COS Lite"] + + alrt[Alertmanager] + click alrt "https://github.com/canonical/alertmanager-k8s-operator" + + graf[Grafana] + click graf "https://github.com/canonical/grafana-k8s-operator" + + prom[Prometheus] + click prom "https://github.com/canonical/prometheus-k8s-operator" + + loki[Loki] + click loki "https://github.com/canonical/loki-k8s-operator" + + prom --- |alerting| alrt + loki --- |alerting| alrt + graf --- |source| prom + graf --- |source| alrt + graf --- |source| loki +end +``` + +## Self-monitoring view +We need to be made aware if the observability solution itself is functioning properly. Self-monitoring relations within COS Lite, together with [cos-alerter](https://github.com/canonical/cos-alerter), are meant to alert for outages of the observability stack itself. + +```mermaid +graph TD + +subgraph cos_lite["COS Lite"] + + alrt[Alertmanager] + click alrt "https://github.com/canonical/alertmanager-k8s-operator" + + graf[Grafana] + click graf "https://github.com/canonical/grafana-k8s-operator" + + prom[Prometheus] + click prom "https://github.com/canonical/prometheus-k8s-operator" + + loki[Loki] + click loki "https://github.com/canonical/loki-k8s-operator" + + trfk[Traefik] + click trfk "https://github.com/canonical/traefik-k8s-operator" + + trfk --- |metrics| prom + alrt --- |metrics| prom + loki --- |metrics| prom + graf --- |metrics| prom + + graf --- |dashboard| loki + graf --- |dashboard| prom + graf --- |dashboard| alrt +end +``` + +------------------------- + diff --git a/tmp/t/13953.md b/tmp/t/13953.md new file mode 100644 index 000000000..6029448e0 --- /dev/null +++ b/tmp/t/13953.md @@ -0,0 +1,128 @@ +tmihoc | 2024-07-02 07:05:14 UTC | #1 + +When you build and own a charm, you wear multiple hats -- developer, QA specialist, technical author, marketing specialist. This document walks you through these roles. + +[note type=information] + +Although the rough steps associated with these roles are presented sequentially, reflecting the logic "you need to develop something before test, document, and publish", we strongly recommend you iterate -- develop something, test right away, document right away, market right away, repeat. +[/note] + + + + + + +## Set things up + +> See more: +> - [How to set up your development environment](/t/4450) +> - [How to set up a charm project](/t/5547) +> - [How to create a bundle](/t/1058#heading--create-a-bundle) + +## Develop + +For a charm, consider resources, application lifecycle management, actions, configurations, relations (integrations), secrets. + +> See more: +> - [How to use resources](/t/4468) +> - [How to run workloads - machines](/t/4457), [How to run workloads - Kubernetes](/t/4554) +> - [How to support actions](/t/4459) +> - [How to support configurations](/t/4458) +> - [How to support relations (integrations)](/t/1051) +> - [How to support secrets](/t/7221) + +For a bundle, iterate on the `bundle.yaml` file to optimise. + + +## Test and debug + +For a charm, consider unit, scenario, integration, and end-to-end tests. + +> See more: +> - [How to get started with charm testing](/t/6894) +> - [How to write unit test for a charm](/t/4461) +> - [How to write scenario tests for a charm](/t/10585) +> - [How to write integration tests for a charm](/t/12734) +> - the testing sections at the end of some of the "support [feature]" docs +> - [How to pack](/t/5548) +> - [How to deploy](/t/5597) +> - [How to debug](/t/4837) + + + +## Document + +You should always document each feature of your charm as you develop (e.g., as you define an action in `charmcraft.yaml`, make sure to provide a useful description). However, at the end, also take stock of your charm overall and add any other materials that you think might be needed. + +> See more: [How to add docs to a charm on Charmhub](/t/3784) + + +## Market + +### Add an icon + +> See more: [How to add an icon](/t/1041) + +### Register a name in Charmhub + +> See more: [How to publish> Register](/t/4462) + +### Create a channel track + +> See more: +> - [How to create a channel track for a charm](/t/11102) + + +### Publish on Charmhub + +> See more: +> - [How to publish > Upload](/t/4462) +> - [How to publish > Release](/t/4462) + +### Promote to a more stable channel risk level + +To promote a charm to a more stable channel risk level of the same track, release it again specifying the revision number and the channel, including track and, especially, the target risk level. For example: + +```text +charmcraft release --revision 118 --channel=5/candidate +``` + +To promote a bundle to a more stable risk level of the same track, run `charmcraft promote-bundle`. + +### Request review and public listing + +> See more: [Requirements for public listing](/t/10632#heading--requirements-for-public-listing) + +### Advertise + +[Write a Discourse post to announce your release.](https://discourse.charmhub.io/tags/c/announcements-and-community/33/none) + +[Schedule a community workshop to demo your charm's capabilities.](https://discourse.charmhub.io/tag/community-workshop) + +[Chat about it with your charmer friends.](https://matrix.to/#/#charmhub-charmdev:ubuntu.com) + + +
+ +> Contributors: @lengau, @jdkandersson, @mmkay, @tmihoc, @weii-wang + +------------------------- + diff --git a/tmp/t/13990.md b/tmp/t/13990.md new file mode 100644 index 000000000..9fc3f51dc --- /dev/null +++ b/tmp/t/13990.md @@ -0,0 +1,8 @@ +tmihoc | 2024-04-22 14:09:15 UTC | #1 + +Rockcraft is a command line tool that you can use to package an application as a 'rock'. In Juju, it is the recommended way to create a container image for a workload you want to charm. + +> See more: [Rockcraft | Docs](https://canonical-rockcraft.readthedocs-hosted.com/en/latest/) + +------------------------- + diff --git a/tmp/t/14018.md b/tmp/t/14018.md new file mode 100644 index 000000000..28e2ac472 --- /dev/null +++ b/tmp/t/14018.md @@ -0,0 +1,23 @@ +tmihoc | 2024-10-15 19:30:19 UTC | #1 + +A **12-Factor app charm** is a [charm](/t/5457) that has been created using certain coordinated pairs of [Rockcraft](/t/5528) and [Charmcraft](/t/5528) [profiles](/t/15010) designed to give you most of the content you will need to generate a [rock^](https://documentation.ubuntu.com/rockcraft/en/latest/explanation/rocks/) for a charm, and then the charm itself, for a particular type of workload (e.g., an application developed with Flask). + +[note type=positive] +**Did you know?** The OCI images produced by the 12-Factor-app-geared Rockcraft extension are designed to work standalone and are also well integrated with the rest of the Flask framework tooling. +[/note] + +When you initialise a rock with a 12-Factor-app-charm-geared profile, the initialisation will generate all the basic structure and content you'll need for the rock, including a [`rockcraft.yaml`^](https://canonical-rockcraft.readthedocs-hosted.com/en/latest/reference/rockcraft.yaml/#) prepopulated with an extension matching the profile. Similarly, when you initialise a charm with a 12-Factor-app-charm-geared profile, that will generate all the basic structure content you'll need for the charm, including a [`charmcraft.yaml`](/t/7132) pre-populated with an extension matching the profile as well as a `src/charm.py` pre-loaded with a library (`paas_charm`) with constructs matching the profile and the extension. + + +At present, there are four pairs of profiles: +- `flask-framework` ([Rockcraft extension 'flask-framework'](/t/15017), [Charmcraft extension 'flask-framework'](/t/15012)) +- `django-framework` ([Rockcraft extension 'django-framework'](/t/15652), [Charmcraft extension 'django-framework'](/t/15653)) +- `fastapi-framework` ([Rockcraft extension 'fastapi-framework'](/t/15752), [Charmcraft extension 'fastapi-framework'](/t/15753)) +- `go-framework` (Rockcraft extension 'go-framework', [Charmcraft extension 'go-framework'](/t/15809)) + +
+ +> **Contributors:** @econley, @jdkandersson, @javierdelapuente, @tmihoc + +------------------------- + diff --git a/tmp/t/14043.md b/tmp/t/14043.md new file mode 100644 index 000000000..9ece729c3 --- /dev/null +++ b/tmp/t/14043.md @@ -0,0 +1,33 @@ +tmihoc | 2024-04-26 09:01:24 UTC | #1 + +## Usage: +```text +charmcraft fetch-libs [options] +``` + +## Summary: + +Fetch charm libraries defined in charmcraft.yaml. + +For each library in the top-level `charm-libs` key, fetch the latest library version matching those requirements. + +For example: charm-libs: # Fetch lib with API version 0. # If `fetch-libs` is run and a newer minor version is available, # it will be fetched from the store. - lib: postgresql.postgres_client version: "0" # Always fetch precisely version 0.57. - lib: mysql.client version: "0.57" + +## Options: +| | | +|-|-| +| `-h, --help` | Show this help message and exit | +| `-v, --verbose` | Show debug information and be more verbose | +| `-q, --quiet` | Only show warnings and errors, not progress | +| `--verbosity` | Set the verbosity level to 'quiet', 'brief', 'verbose', 'debug' or 'trace' | +| `-V, --version` | Show the application version and exit | +| `--format` | Produce the result in the specified format (currently only 'json') | + +## See also: +- `create-lib` +- `fetch-lib` +- `list-lib` +- `publish-lib` + +------------------------- + diff --git a/tmp/t/14056.md b/tmp/t/14056.md new file mode 100644 index 000000000..807045ca0 --- /dev/null +++ b/tmp/t/14056.md @@ -0,0 +1,78 @@ +benhoyt | 2024-04-28 23:28:11 UTC | #1 + + +Charm developers have had many discussion about "holistic" charms compared to "delta" charms, and which approach is better. First, let's define those terms: + +* A *delta-based* charm is when the charm handles each kind of Juju hook with a separate handler function, which does the minimum necessary to process that kind of event. +* A *holistic* charm handles some or all Juju hooks using a common code path such as `_update_charm`, which queries the charm config and relation data and "rewrites the world", that is, rewrites application configuration and restarts necessary services. + +Juju itself nudges charm authors in the direction of delta-based charms, because it provides specific event kinds that signal that one "thing" changed: `config-changed` says that a config value changed, `relation-changed` says that relation data has changed, `pebble-ready` signals that the Pebble container is ready, and so on. + +However, this only goes so far: `config-changed` doesn't tell the charm which config keys changed, and `relation-changed` doesn't tell the charm how the relation data changed. + +In addition, the charm may receive an event like `config-changed` before it's ready to handle it, for example, if the container is not yet ready (`pebble-ready` has not yet been triggered). In such cases, charms could try to wait for both events to occur, possibly storing state to track which events have occurred -- but that is error-prone. + +Alternatively, a charm can use a holistic approach and handle both `config-changed` and `pebble-ready` with a single code path, as in this example: + +```python +class MyCharm(ops.CharmBase): + def __init__(self, framework: ops.Framework): + super().__init__(framework) + framework.observe(self.on.config_changed, self._update_charm) + framework.observe(self.on['redis'].pebble_ready, self._update_charm) + + def _update_charm(self, _: ops.EventBase): # event parameter isn't used + redis_port = self.config.get('redis-port') + if not redis_port: + # pebble-ready happened first, wait for config-changed + return + + # If both the Pebble container and config are ready, rewrite the + # container's config file and restart Redis if needed. + container = self.unit.get_container('redis') + try: + self._update_redis_config(container, redis_port) + except ops.pebble.ConnectionError: + # config-changed happened first, wait for pebble-ready + return +``` + + +## When to use the holistic approach + +If a charm is waiting for a collection of events, as in the example above, it makes sense to group those events together and handle them holistically, with a single code path. + +In other words, when writing a charm, it's not so much "should the *charm* be holistic?" as "does it make sense for *these events* to be handled holistically?" + +Using the holistic approach is normally centred around configuring an application. Various events that affect configuration use a common handler, to simplify writing an application config file and restarting the application. This is common for events like `config-changed`, `relation-changed`, `secret-changed`, and `pebble-ready`. + +Many existing charms use holistic event handling. A few examples are: + +- [`alertmanager-k8s` uses a `_common_exit_hook` method to unify several event handlers](https://github.com/canonical/alertmanager-k8s-operator/blob/561f1d8eb1dc6e4511c1c0b3cba444a3ec399464/src/charm.py#L390) +- [`hello-kubecon` is a simple charm that handles `config-changed` and `pebble-cready` holistically](https://github.com/jnsgruk/hello-kubecon/blob/dbd133466dde59ee64f20a732a8f3d2e560ec3b8/src/charm.py#L32-L33) +- [`prometheus-k8s` uses a common `_configure` method to handle various events](https://github.com/canonical/prometheus-k8s-operator/blob/84c6a406ed585cdb7ba40e01a258864987d6f67f/src/charm.py#L221-L230) +- [`sdcore-gnbsim-k8s` also uses a common `_configure` method](https://github.com/canonical/sdcore-gnbsim-k8s-operator/blob/ea2afe069346757b1eb6c02de5b4f50f90e81698/src/charm.py#L84-L92) + + +## Which events can be handled holistically? + +Only some events make sense to handle holistically. For example, `remove` is triggered when a unit is about to be terminated, so it doesn't make sense to handle it holistically. + +Similarly, events like `secret-expired` and `secret-rotate` don't make sense to handle holistically, because the charm must do something specific in response to the event. For example, Juju will keep triggering `secret-expired` until the charm creates a new secret revision by calling [`event.secret.set_content()`](https://ops.readthedocs.io/en/latest/#ops.Secret.set_content). + +This is very closely related to [which events can be `defer`red](https://juju.is/docs/sdk/how-and-when-to-defer-events). A good rule of thumb is this: if an event can be deferred, it may make sense to handle it holistically. + +On the other hand, if an event cannot be deferred, the charm cannot handle it holistically. This applies to action "events", `stop`, `remove`, `secret-expired`, `secret-rotate`, and Ops-emitted events such as `collect-status`. + +------------------------- + +ca-scribner | 2024-05-07 16:21:11 UTC | #2 + +Love this. It is easy to mentally subscribe to one pattern or the other, but they're complimentary. + +For me, I usually default to doing everything holistic unless it feels like it wont work. The reason being that I've found: +* things that **should be handled as a delta** usually feel obviously wrong when you try to handle them holistically, so you notice the problem and fix it +* things that **should be handled holistically** often result in subtle, hard to find bugs when handled as a delta, so you don't notice the problem until it is too late + +------------------------- + diff --git a/tmp/t/14367.md b/tmp/t/14367.md new file mode 100644 index 000000000..78153bdf6 --- /dev/null +++ b/tmp/t/14367.md @@ -0,0 +1,137 @@ +tmihoc | 2024-07-05 13:47:12 UTC | #1 + + + +To take your deployment offline (to be more precise, in a proxy-restricted environment): + + +1. Use a private cloud. + +> See more: [List of supported clouds](/t/6665) + +2. Figure out the list of external services required for your deployment and set up proxies / local mirrors for them. Depending on whether your deployment is on machines or Kubernetes, and on a localhost cloud or not, and which one, these services may include: + + - [https://streams.canonical.com](https://streams.canonical.com/) for agent binaries and LXD container and VM images; + - [https://charmhub.io/](https://charmhub.io/) for charms, including the Juju controller charm; + - [https://snapcraft.io/store](https://snapcraft.io/store) for Juju's internal database; + - [http://cloud-images.ubuntu.com](http://cloud-images.ubuntu.com/) for base Ubuntu cloud machine images, and [http://archive.ubuntu.com](http://archive.ubuntu.com/) and [http://security.ubuntu.com](http://security.ubuntu.com/) for machine image upgrades; + - a container image registry: + - [https://hub.docker.com/](https://hub.docker.com/) + - [public.ecr.aws/juju](public.ecr.aws/juju) + - [https://ghcr.io/juju](https://ghcr.io/juju) + +> See more: [Bootstrapping](/t/6209), [Deploying](/t/11285) + +3. Configure Juju to make use of the proxies / local mirrors you've set up by means of the following model configuration keys: + +- [`agent-metadata-url`](/t/7068#heading--agent-metadata-url) +- [`apt-ftp-proxy`](/t/7068#heading--agent-metadata-url) +- [`apt-http-proxy`](/t/7068#heading--apt-http-proxy) +- [`apt-https-proxy`](/t/7068#heading--apt-https-proxy) +- [`apt-mirror`](/t/7068#heading--apt-mirror) +- [`apt-no-proxy`](/t/7068#heading--apt-no-proxy) +- [`container-image-metadata-url`](/t/7068#heading--container-image-metadata-url) +- [`ftp-proxy`](/t/7068#heading--ftp-proxy) +- [`http-proxy`](/t/7068#heading--http-proxy) +- [`https-proxy`](/t/7068#heading--https-proxy) +- [`image-metadata-url`](/t/7068#heading--image-metadata-url) +- [`juju-ftp-proxy`](/t/7068#heading--juju-ftp-proxy) +- [`juju-http-proxy`](/t/7068#heading--juju-http-proxy) +- [`juju-https-proxy`](/t/7068#heading--juju-https-proxy) +- [`juju-no-proxy`](/t/7068#heading--juju-no-proxy) +- [`no-proxy`](/t/7068#heading--no-proxy) +- [`snap-http-proxy`](/t/7068#heading--snap-http-proxy) +- [`snap-https-proxy`](/t/7068#heading--snap-https-proxy) +- [`snap-store-assertions`](/t/7068#heading--snap-store-assertions) +- [`snap-store-proxy`](/t/7068#heading--snap-store-proxy) +- [`snap-store-proxy-url`](/t/7068#heading--snap-store-proxy-url) + +**Examples:** + +[details=Configure the client to use an HTTP proxy] + +Set up an HTTP proxy, export it to an environment variable, then use the `http-proxy` model configuration key to point the client to that value. + + +[/details] + +[details="Configure all models to use an APT mirror"] + +Set up an APT mirror, export it to the environment variable $MIRROR_APT, then set the `apt-mirror` model config key to point to that environment variable. For example, for a controller on AWS: + +``` text +juju bootstrap --model-default apt-mirror=$MIRROR_APT aws +``` +[/details] + +[details="Have all models use local resources for both Juju agent binaries and cloud images"] + +Get the resources for Juju agent binaries and cloud images locally; define and export export environment variables pointing to them; then set the `agent-metadata-url` and `image-metadata-url` model configuration keys to point to those environment variables. For example: + +``` text +juju bootstrap \ + --model-default agent-metadata-url=$LOCAL_AGENTS \ + --model-default image-metadata-url=$LOCAL_IMAGES \ + localhost +``` +[/details] + + +[details="Set up HTTP and HTTPS proxies but exclude the localhost cloud"] + +Set up HTTP and HTTPS proxies and define and export environment variables pointing to them (below, `PROXY_HTTP` and `PROXY_HTTPS`); define and export a variable pointing to the IP addresses for your `localhost` cloud to the environment variable (below,`PROXY_NO`); then bootstrap setting the `http_proxy`, `https_proxy`, and `no-proxy` model configuration keys to the corresponding environment variable. For example: + +```text +$ export PROXY_HTTP=http://squid.internal:3128 +$ export PROXY_HTTPS=http://squid.internal:3128 +$ export PROXY_NO=$(echo localhost 127.0.0.1 10.245.67.130 10.44.139.{1..255} | sed 's/ /,/g') + +$ export http_proxy=$PROXY_HTTP +$ export https_proxy=$PROXY_HTTP +$ export no_proxy=$PROXY_NO + +$ juju bootstrap \ +--model-default http-proxy=$PROXY_HTTP \ +--model-default https-proxy=$PROXY_HTTPS \ +--model-default no-proxy=$PROXY_NO \ +localhost lxd +``` +[/details] + +
+ +> **Contributors:** @nvinuesa , @tmihoc + +------------------------- + diff --git a/tmp/t/14532.md b/tmp/t/14532.md new file mode 100644 index 000000000..5d34a3b79 --- /dev/null +++ b/tmp/t/14532.md @@ -0,0 +1,72 @@ +sed-i | 2024-06-24 20:46:24 UTC | #1 + +Integrating a charm with [COS](https://charmhub.io/topics/canonical-observability-stack) means: +- having your app's metrics and corresponding alert rules reach [prometheus](https://charmhub.io/prometheus-k8s/) +- having your app's logs and corresponding alert rules reach [loki](https://charmhub.io/loki-k8s/) +- having your app's dashboards reach [grafana](https://charmhub.io/grafana-k8s/) + +The COS team is responsible for some aspects of testing, and some aspects of testing belong to the charms integrating with COS. + +## Tests for the built-in alert rules +### Unit tests +- You can use [`promtool test rules`](https://prometheus.io/docs/prometheus/latest/configuration/unit_testing_rules/) to make sure they fire when you expect them to fire. As part of the test you hard-code the time series values you are testing for. +- [`promtool check rules`](https://prometheus.io/docs/prometheus/latest/command-line/promtool/#promtool-check) +- [`cos-tool validate`](https://github.com/canonical/cos-tool). The advantage of cos-tool is that the same executable can validate both prometheus and loki rules. +- Make sure your alerts manifest matches the output of: + - `juju ssh prometheus/0 curl localhost:9090/api/v1/rules | jq -r '.data.groups | .[] | .rules | .[] | .name'`. + - `juju ssh loki/0 curl localhost:3100/loki/api/v1/rules` + +### Integration tests +- A fresh deployment shouldn't fire alerts, e.g. due to missing past data that is interpreted as 0. + +## Tests for the metrics endpoint and scrape job +### Integration tests +- [`promtool check metrics`](https://prometheus.io/docs/prometheus/latest/command-line/promtool/#promtool-check) to lint the the metrics endpoint, e.g. `curl -s http://localhost:8080/metrics | promtool check metrics`. +- For scrape targets: when related to prometheus, and after a scrape interval elapses (default: 1m), all prometheus targets listed in `GET /api/v1/targets` should be `"health": "up"`. Repeat the test with/without ingress, TLS. +- For remote-write (and scrape targets): when related to prometheus, make sure that `GET /api/v1/labels` and `GET /api/v1/label/juju_unit` have your charm listed. +- Make sure that the metric names in your alert rules have matching metrics in your own `/metrics` endpoint. + +## Tests for log lines +### Integration tests +- When related to loki, make sure your logging sources are listed in: + - `GET /loki/api/v1/label/filename/values` + - `GET /loki/api/v1/label/juju_unit/values`. + +## Tests for dashboards +### Unit tests +- json lint + +### Integration tests +- Make sure the dashboards manifest you have in the charm matches `juju ssh grafana/0 curl http://admin:password@localhost:3000/api/search`. + +## Additional thoughts +- A rock's CI could dump a record of the `/metrics` endpoint each time the rock is built. This way some integration tests could turn into unit tests. + +## See also +- https://discourse.charmhub.io/t/prometheus-k8s-docs-troubleshooting-integrations/14351 +- https://discourse.charmhub.io/t/loki-k8s-docs-troubleshooting-missing-logs/14187 + +------------------------- + +dnplas | 2024-06-25 20:23:13 UTC | #2 + +Thanks @sed-i for this useful information! + +What would be the best recommendation for integration tests: to deploy prometheus, grafana and loki charms alongside the charms under test or just deploy the grafana-agent charm, knowing it will go into `BlockedStatus` if it is not connected to any of the consumers? + +For our team it is important to know this because we recommend our users to deploy the grafana-agent charm in the same model as our application (Charmed Kubeflow) and then deploy COS in its own model, so we'd like to keep our integration tests closer to what users would get, but it does seem weird to test with a charm that is "blocked". + +------------------------- + +sed-i | 2024-06-26 03:51:28 UTC | #3 + +Tough call. On one hand, having itests depend on a series of two external charms (grafana agent and prometheus/loki/grafana) is heavy and risky. On the other hand, a blocked grafana agent may in the future stop the pebble service, so it's not ideal to rely on grafana-agent workload running in blocked. + +Possible approach: +- Accept that you cannot test everything. +- Remove unnecessary tests. +- Move as much as possible to unit/static tests. +- Instead of multiple semi-heavy integration tests, have one heavier end-to-end test that runs separately. + +------------------------- + diff --git a/tmp/t/146.md b/tmp/t/146.md new file mode 100644 index 000000000..6ff73c939 --- /dev/null +++ b/tmp/t/146.md @@ -0,0 +1,116 @@ +thumper | 2024-08-22 13:12:47 UTC | #1 + +> See also: [Agent introspection](/t/117) + +The engine report is a window into the internals of the agent. This is primarily useful to developers to help debug problems that may be occurring in deployed systems. + +In order to manage complexity in the juju agents, there are *workers* that have very distinct and limited purpose. Workers can have dependencies on other workers. The [dependency engine](https://godoc.org/gopkg.in/juju/worker.v1/dependency) is the entity that runs the workers and deals with those dependencies. The `juju_engine_report` is the current view into the dependency engine running the agent's workers. + +# Usage +Can be run on any juju machine, expected state is different for controller machines, ha, and machines running workloads. +```code +juju_engine_report +``` +## Example output +```text +manifolds: + agent: + inputs: [] + report: + agent: machine-0 + model-uuid: 1b13f1f5-c0cf-47c5-86ae-55c393e19405 + resource-log: [] + start-count: 1 + started: 2018-08-09 22:01:39 + state: started + api-address-updater: + inputs: + - agent + - api-caller + - migration-fortress + - migration-inactive-flag + report: + servers: + - - 10.173.141.131:17070 + - 127.0.0.1:17070 + - '[::1]:17070' + resource-log: + - name: migration-inactive-flag + type: '*engine.Flag' + - name: migration-fortress + type: '*fortress.Guest' + - name: agent + type: '*agent.Agent' + - name: api-caller + type: '*base.APICaller' + start-count: 1 + started: 2018-08-09 22:01:41 + state: started + api-caller: + inputs: + - agent + - api-config-watcher + resource-log: + - name: agent + type: '*agent.Agent' + start-count: 1 + started: 2018-08-09 22:01:40 + state: started +# and many more +``` + +# Interesting Output + +* Dependencies with a larger `start_count` than others. This can indicate that the worker is bouncing. + +* Dependencies that are stopped when they should be started. Perhaps the `inputs` are not starting. + +* Dependencies that are started which should be stopped. Can prevent a unit from upgrading or migrating if the workers do not quiesce. + +* A controllers engine report will contain the model cache contents as of 2.9 + +* The report from an individual unit contains the local-state and relation, formerly in a file on the unit: +``` + report: + local-state: + hook-kind: continue + hook-step: pending + installed: true + leader: true + removed: false + started: true + stopped: false + relations: + "0": + application-members: + ntp: 0 + dying: false + endpoint: ntp-peers + is-peer: false + members: {} + relation: ntp:ntp-peers + "1": + application-members: + ubuntu: 0 + dying: false + endpoint: juju-info + is-peer: false + members: + ubuntu/0: 0 + relation: ntp:juju-info ubuntu:juju-info +``` + +------------------------- + +pedroleaoc | 2022-04-07 09:25:34 UTC | #2 + + + +------------------------- + +pedroleaoc | 2022-10-14 11:32:27 UTC | #3 + + + +------------------------- + diff --git a/tmp/t/14612.md b/tmp/t/14612.md new file mode 100644 index 000000000..ab45a2848 --- /dev/null +++ b/tmp/t/14612.md @@ -0,0 +1,26 @@ +barrettj12 | 2024-08-26 15:33:17 UTC | #1 + +Commonly, you may have to work with multiple versions of Juju at once. This document describes the compatibility rules between different versions of Juju. + +## [`juju` CLI client](/t/5465), controllers and [agents](https://juju.is/docs/juju/agent) + +Juju controllers, agents, and the `juju` CLI client all are [semantically versioned](https://semver.org/). This means: +- Controllers/agents/clients **in the same major/minor series** (e.g. 3.5.0 and 3.5.2) are fully compatible. +- Controllers/agents/clients **in the same major series** (e.g. 3.4 and 3.5) are compatible, but older versions may be lacking features present in newer versions. +- Controllers/agents/clients with different major versions (e.g. 2.8 and 3.1) are **not guaranteed to be compatible.** The one exception is that we guarantee a basic set of operations (e.g. status and migration) is compatible between **the last minor in a major series** and the next major. This enables users to upgrade their existing deployments to the next major version. Related to that: A Juju client only bootstraps a Juju controller of the same major/minor version. + + + +## [python-libjuju](/t/13089) + +- For a 2.9.x controller, you should use the latest python-libjuju in the 2.9 track. +- For 3.x controllers, you should use the latest version of python-libjuju. + +## [Terraform Juju provider](/t/13086) + +The latest version of the Terraform Juju provider should be compatible with all Juju controllers. + +------------------------- + diff --git a/tmp/t/14936.md b/tmp/t/14936.md new file mode 100644 index 000000000..fb8c184e7 --- /dev/null +++ b/tmp/t/14936.md @@ -0,0 +1,159 @@ +tony-meyer | 2024-09-26 07:32:57 UTC | #1 + +> See first: [Juju | Leader](https://juju.is/docs/juju/leader) + +**Contents:** + +- [Observe the `leader-elected` event and define an event handler](#heading--observe-the-leader-elected-event-and-define-an-event-handler) +- [Test leadership management](#heading--test-leadership-management) + +

Observe the `leader-elected` event and define an event handler

+ +In the `src/charm.py` file, in the `__init__` function of your charm, set up an observer for the `leader-elected` event and pair that with an event handler. For example: + +```python +self.framework.observe(self.on.leader_elected, self._on_leader_elected) +``` + +> See more: [`ops.LeaderElectedEvent`](https://ops.readthedocs.io/en/latest/#ops.LeaderElectedEvent) + +Now, in the body of the charm definition, define the event handler. For example, the handler below will update a configuration file: + +```python +def _on_leader_elected(self, event: ops.LeaderElectedEvent): + self.reconfigure(leader=self.unit) +``` + +> Examples: [Tempo reconfiguring ingress on leadership change](https://github.com/canonical/tempo-k8s-operator/blob/3f94027b6173f436968a4736a1f2d89a1f17b2e1/src/charm.py#L263), [Kubeflow Dashboard using a holistic handler to configure on leadership change and other events](https://github.com/canonical/kubeflow-dashboard-operator/blob/02caa736a6ea8986b8cba23b63c08a12aaedb86c/src/charm.py#L82) + +To have the leader notify other units about leadership changes, change data in a peer relation. + +> See more: [Peer Relations](https://juju.is/docs/juju/relation#heading--peer) + +[note status="Use the peer relation rather than `leader-setting-changed`"] +In the past, this was done by observing a `leader-setting-changed` event, which is now deprecated. +[/note] + +Commonly, other event handlers will need to check for leadership. For example, +only the leader unit can change charm application secrets, so checks for +leadership are needed to guard against non-leaders. For example: + +```python +if self.unit.is_leader(): + secret = self.model.get_secret(label="my-label") + secret.set_content({"username": "user", "password": "pass"}) +``` + +Note that Juju guarantees leadership for only 30 seconds after a `leader-elected` +event or an `is-leader` check. If the charm code may run longer, then extra +`is_leader()` calls should be made to ensure that the unit is still the leader. + +

Test leadership management

+ +> See first: [Get started with charm testing](/t/6894) + +You'll want to add three levels of tests: + +- [Write unit tests](#heading--write-unit-tests) +- [Write scenario tests](#heading--write-scenario-tests) +- [Write integration tests](#heading--write-integration-tests) + +

Write unit tests

+ +> See first: [How to write unit tests for a charm](/t/4461) + +When using Harness for unit tests, use the `set_leader()` method to control whether the unit is the leader. For example, to verify that leadership change is handled correctly: + +```python +@pytest.fixture() +def harness(): + yield ops.testing.Harness(MyCharm) + harness.cleanup() + + +def test_new_leader(harness): + # Before the test, the unit is not leader. + harness.set_leader(False) + harness.begin() + # Simulate Juju electing the unit as leader. + harness.set_leader(True) + # Assert that it was handled correctly. + assert ... + + +def test_leader_sets_secrets(harness): + # The unit is the leader throughout the test, and no leader-elected event + # is emitted. + harness.set_leader(True) + harness.begin() + secret_id = harness.add_model_secret(APP_NAME, content={"secret": "sssh"}) + harness.update_config(secret_option=secret_id) + # Assert that the config-changed handler set additional secret metadata: + assert ... +``` + +> See more: [`ops.testing.Harness.set_leader`](https://ops.readthedocs.io/en/latest/harness.html#ops.testing.Harness.set_leader) + +

Write scenario tests

+ +> See first: [How to write scenario tests for a charm](/t/10585) + +When using Scenario for unit tests, pass the leadership status to the `State`. For example: + +```python +class MyCharm(ops.CharmBase): + def __init__(self, framework): + super().__init__(framework) + framework.observe(self.on.start, self._on_start) + + def _on_start(self, _): + if self.unit.is_leader(): + self.unit.status = ops.ActiveStatus('I rule') + else: + self.unit.status = ops.ActiveStatus('I am ruled') + + +@pytest.mark.parametrize('leader', (True, False)) +def test_status_leader(leader): + ctx = scenario.Context(MyCharm, meta={"name": "foo"}) + out = ctx.run('start', scenario.State(leader=leader)) + assert out.unit_status == ops.ActiveStatus('I rule' if leader else 'I am ruled') +``` + +

Write integration tests

+ +> See first: [How to write integration tests for a charm](/t/12734) + +Juju is in sole control over which unit is the leader, so leadership changes are +not usually tested with integration tests. If this is required, then the test +needs to remove the leader unit (machine charms) or run `juju_stop_unit` in the +charm container (Kubernetes charms). The test then needs to wait up to 60 seconds +for Juju to elect a new leader. + +More commonly, an integration test might want to verify that leader and non-leader behaviour is +as expected. For example: + +```python +async def get_leader_unit(ops_test, app, model=None): + """Utility method to get the current leader unit.""" + leader_unit = None + if model is None: + model = ops_test.model + for unit in model.applications[app].units: + if await unit.is_leader_from_status(): + leader_unit = unit + break + + return leader_unit +``` + +> Examples: [Zookeeper testing upgrades](https://github.com/canonical/zookeeper-operator/blob/106f9c2cd9408a172b0e93f741d8c9f860c4c38e/tests/integration/test_upgrade.py#L22), [postgresql testing password rotation action](https://github.com/canonical/postgresql-k8s-operator/blob/62645caa89fd499c8de9ac3e5e9598b2ed22d619/tests/integration/test_password_rotation.py#L38) + +> See more: [`juju.unit.Unit.is_leader_from_status`](https://pythonlibjuju.readthedocs.io/en/latest/api/juju.unit.html#juju.unit.Unit.is_leader_from_status) + +
+ +> **Contributors:**@tmihoc, @tony-meyer + +------------------------- + diff --git a/tmp/t/14937.md b/tmp/t/14937.md new file mode 100644 index 000000000..dc5ffa633 --- /dev/null +++ b/tmp/t/14937.md @@ -0,0 +1,264 @@ +tony-meyer | 2024-09-26 07:24:20 UTC | #1 + +> See first: [Juju | How to manage storage](https://juju.is/docs/juju/manage-storage) + +**Contents:** + +- [Use storage volumes](#heading--use-storage-volumes) +- [Test storage volume use](#heading--test-storage-volume-use) + +

Use storage volumes

+ +- [Declare the storage in `charmcraft.yaml`](#heading--declare-the-storage-in-charmcraft-yaml) +- [Observe the `storage-attached` event and define an event handler](#heading--observe-the-attached-event-and-define-an-event-handler) + +

Declare the storage in `charmcraft.yaml`

+ +To define the storage that can be provided to the charm, define a `storage` section in `charmcraft.yaml` that lists the storage volumes and information about each storage. For example, for a transient filesystem storage mounted to `/cache/` that is at least 1GB in size: + +```yaml +storage: + local-cache: + type: filesystem + description: Somewhere to cache files locally. + location: /cache/ + minimum-size: 1G + properties: + - transient +``` + +> See more: [File `charmcraft.yaml` > `storage`](/t/7132#heading--storage) + +For Kubernetes charms, you also need to define where on the workload container the volume will be mounted. For example, to mount a similar cache filesystem in `/var/cache/`: + +```yaml +storage: + local-cache: + type: filesystem + description: Somewhere to cache files locally. + # The location is not required here, because it defines the location on + # the charm container, not the workload container. + minimum-size: 1G + properties: + - transient + +containers: + web-service: + resource: app-image + mounts: + - storage: local-cache + location: /var/cache +``` + +> See more: [File `charmcraft.yaml` > `containers`](/t/7132#heading--containers) + +

Observe the `storage-attached` event and define an event handler

+ +In the `src/charm.py` file, in the `__init__` function of your charm, set up an observer for the `storage-attached` event associated with your storage and pair that with an event handler, typically a holistic one. For example: + +``` +self.framework.observe(self.on.cache_storage_attached, self._update_configuration) +``` + +> See more: [`ops.StorageAttachedEvent`](https://ops.readthedocs.io/en/latest/#ops.StorageAttachedEvent), [Juju SDK | Holistic vs delta charms](https://juju.is/docs/sdk/holistic-vs-delta-charms) + +Storage volumes will be automatically mounted into the charm container at either the path specified in the `location` field in the metadata, or the default location `/var/lib/juju/storage/`. However, your charm code should not hard-code the location, and should instead use the `.location` property of the storage object. + +Now, in the body of the charm definition, define the event handler, or adjust an existing holistic one. For example, to provide the location of the attached storage to the workload configuration: + +``` +def _update_configuration(self, event: ops.EventBase): + """Update the workload configuration.""" + cache = self.model.storages["cache"] + if cache.location is None: + # This must be one of the other events. Return and wait for the storage-attached event. + logger.info("Storage is not yet ready.") + return + try: + self.push_configuration(cache_dir=cache.location) + except ops.pebble.ConnectionError: + # Pebble isn't ready yet. Return and wait for the pebble-ready event. + logger.info("Pebble is not yet ready.") + return +``` + +> Examples: [ZooKeeper ensuring that permission and ownership is correct](https://github.com/canonical/zookeeper-operator/blob/106f9c2cd9408a172b0e93f741d8c9f860c4c38e/src/charm.py#L247), [Kafka configuring additional storage](https://github.com/canonical/kafka-k8s-operator/blob/25cc5dd87bc2246c38fc511ac9c52f35f75f6513/src/charm.py#L298) + +

Observe the detaching event and define an event handler

+ +In the `src/charm.py` file, in the `__init__` function of your charm, set up an observer for the detaching event associated with your storage and pair that with an event handler. For example: + +``` +self.framework.observe(self.on.cache_storage_detaching, self._on_storage_detaching) +``` + +> See more: [`ops.StorageDetachingEvent`](https://ops.readthedocs.io/en/latest/#ops.StorageDetachingEvent) + +Now, in the body of the charm definition, define the event handler, or adjust an existing holistic one. For example, to warn users that data won't be cached: + +``` +def _on_storage_detaching(self, event: ops.StorageDetachingEvent): + """Handle the storage being detached.""" + self.unit.status = ops.ActiveStatus("Caching disabled; provide storage to boost performance) +``` + +> Examples: [MySQL handling cluster management](https://github.com/canonical/mysql-k8s-operator/blob/4c575b478b7ae2a28b09dde9cade2d3370dd4db6/src/charm.py#L823), [MongoDB updating the set before storage is removed](https://github.com/canonical/mongodb-operator/blob/b33d036173f47c68823e08a9f03189dc534d38dc/src/charm.py#L596) + +

Request additional storage

+ +[note type=information status="Machine charms only"] +Juju only supports adding multiple instances of the same storage volume on machine charms. Kubernetes charms may only have a single instance of each volume. +[/note] + +If the charm needs additional units of a storage, it can request that with the `storages.request` +method. The storage must be defined in the metadata as allowing multiple, for +example: + +```yaml +storage: + scratch: + type: filesystem + location: /scratch + multiple: 1-10 +``` + +For example, if the charm needs to request two additional units of this storage: + +```python +self.model.storages.request("scratch", 2) +``` + +The storage will not be available immediately after that call - the charm should +observe the `storage-attached` event and handle any remaining setup once Juju +has attached the new storage. + +

Test storage volume use

+ +> See first: [Get started with charm testing](/t/6894) + +You'll want to add three levels of tests: + +- [Write unit tests](#heading--write-unit-tests) +- [Write scenario tests](#heading--write-scenario-tests) +- [Write integration tests](#heading--write-integration-tests) + +

Write unit tests

+ +> See first: [How to write unit tests for a charm](/t/4461) + +When using Harness for unit tests, use the `add_storage()` method to simulate Juju adding storage to the charm. You can either have the method also simulate attaching the storage, or do that explicitly with the `attach_storage()` method. In this example, we verify that the charm responds as expected to storage attached and detaching events: + +```python +@pytest.fixture() +def harness(): + yield ops.testing.Harness(MyCharm) + harness.cleanup() + + +def test_storage_attached(harness): + # Add one instance of the expected storage to the charm. This is before `.begin()` is called, + # so will not trigger any events. + storage_id = harness.add_storage("cache", 1) + harness.begin() + # Simulate Juju attaching the storage, which will trigger a storage-attached event on the charm. + harness.attach_storage(storage_id) + # Assert that it was handled correctly. + assert ... + + +def test_storage_detaching(harness): + storage_id = harness.add_storage("cache", 1, attach=True) + harness.begin() + # Simulate the harness being detached (.remove_storage() would simulate it being removed + # entirely). + harness.remove_storage(storage_id) + # Asser that it was handled correctly. + assert ... +``` + +> See more: [`ops.testing.Harness.add_storage`](https://ops.readthedocs.io/en/latest/harness.html#ops.testing.Harness.add_storage), [`ops.testing.Harness.attach_storage`](https://ops.readthedocs.io/en/latest/harness.html#ops.testing.Harness.attach_storage), [`ops.testing.Harness.detach_storage`](https://ops.readthedocs.io/en/latest/harness.html#ops.testing.Harness.detach_storage), [`ops.testing.harness.remove_storage`](https://ops.readthedocs.io/en/latest/harness.html#ops.testing.Harness.remove_storage) + +

Write scenario tests

+ +> See first: [How to write scenario tests for a charm](/t/10585) + +When using Scenario for unit tests, to verify that the charm state is as expected after storage changes, use the `run` method of the Scenario `Context` object. For example, to provide the charm with mock storage: + +```python +# Some charm with a 'foo' filesystem-type storage defined in its metadata: +ctx = scenario.Context(MyCharm) +storage = scenario.Storage("foo") + +# Set up storage with some content: +(storage.get_filesystem(ctx) / "myfile.txt").write_text("helloworld") + +with ctx.manager("update-status", scenario.State(storage=[storage])) as mgr: + foo = mgr.charm.model.storages["foo"][0] + loc = foo.location + path = loc / "myfile.txt" + assert path.exists() + assert path.read_text() == "helloworld" + + myfile = loc / "path.py" + myfile.write_text("helloworlds") + +# Verify that the contents are as expected afterwards. +assert ( + storage.get_filesystem(ctx) / "path.py" +).read_text() == "helloworlds" +``` + +If a charm requests adding more storage instances while handling some event, you +can inspect that from the `Context.requested_storage` API. + +```python +ctx = scenario.Context(MyCharm) +ctx.run('some-event-that-will-request-more-storage', scenario.State()) + +# The charm has requested two 'foo' storage volumes to be provisioned: +assert ctx.requested_storages['foo'] == 2 +``` + +Requesting storage volumes has no other consequence in Scenario. In real life, +this request will trigger Juju to provision the storage and execute the charm +again with foo-storage-attached. So a natural follow-up Scenario test suite for +this case would be: + +``` +ctx = scenario.Context(MyCharm) +foo_0 = scenario.Storage('foo') +# The charm is notified that one of the storage volumes it has requested is ready: +ctx.run(foo_0.attached_event, State(storage=[foo_0])) + +foo_1 = scenario.Storage('foo') +# The charm is notified that the other storage is also ready: +ctx.run(foo_1.attached_event, State(storage=[foo_0, foo_1])) +``` + +> See more: [Scenario storage testing](https://github.com/canonical/ops-scenario/#storage) + +

Write integration tests

+ +> See first: [How to write integration tests for a charm](/t/12734) + +To verify that adding and removing storage works correctly against a real Juju instance, write an integration test with `pytest_operator`. For example: + +```python +# This assumes there is a previous test that handles building and deploying. +async def test_storage_attaching(ops_test): + # Add a 1GB "cache" storage: + await ops_test.model.applications[APP_NAME].units[0].add_storage("cache", size=1024*1024) + + await ops_test.model.wait_for_idle( + apps=[APP_NAME], status="active", timeout=600 + ) + + # Assert that the storage is being used appropriately. +``` + +
+ +> **Contributors:**@tmihoc, @tony-meyer + +------------------------- + diff --git a/tmp/t/14983.md b/tmp/t/14983.md new file mode 100644 index 000000000..5599b5e6a --- /dev/null +++ b/tmp/t/14983.md @@ -0,0 +1,36 @@ +ppasotti | 2024-09-04 09:48:02 UTC | #1 + +This document provides cryptographic documentation for the COS-Lite bundle. +Its purpose is to track the exposure of charm code to cryptographic attack vectors. + +What is not included in this document and regarded as out of scope: +- Workload code (refer to the workloads' cryptographic documentation) +- Data at rest encryption + +The COS-Lite charms have a very similar exposure. Unless specified otherwise in the charm's own documentation, this cryptographic documentation applies to all. + + +# Usage of cryptographic technology +COS-Lite charm code uses cryptographic technology for mainly two purposes: +- enabling TLS communication between their workloads +- securing admin login to their workloads + +## Cryptographic use internal to cos-lite +COS-Lite charm code can use cryptographic technology to generate a private key to sign their TLS certificate requests. They do so via the [tls-certificates-interface](https://github.com/canonical/tls-certificates-interface) which in turn uses the [cryptography](https://pypi.org/project/cryptography/) python library and Juju secrets to exchange data with the CA. + +Also, charms that deal with large configuration files use sha256 to efficiently detect diffs in them. + +## Cryptographic use in how cos-lite communicates externally +COS-Lite charm users use passwords generated by charm code that depends on python's [`secrets`](https://docs.python.org/3/library/secrets.html) module. No configuration is exposed to the user. +These passwords secure admin login to the user-facing server provided by the workload. For example, in [grafana-k8s](https://github.com/canonical/grafana-k8s-operator/blob/main/src/charm.py#L1289). + +Additionally, charms supporting BasicAuth [such as traefik](/t/15407) accept a `:` config option by which the user can configure basic authentication. The supported hashing algorithm are MD5, SHA1, or BCrypt, [as per official documentation](https://doc.traefik.io/traefik/middlewares/http/basicauth/#general). Also following the official guidelines, we recommend cloud admins to use `htpasswd` for hashing the password and formatting the configuration string. + +# List of packages and cryptographic tech used +- to generate private keys for setting up TLS communication: the `rsa.generate_private_key` function from the [`rsa` package](https://stuvel.eu/software/rsa/). They use the following parameters (hardcoded, not user-configurable): + - `key_size = 2048` + - `public_exponent = 65537` +- to generate admin passwords for user admin login: the [`secrets`](https://docs.python.org/3/library/secrets.html) module from the python standard library. See for example: [usage in grafana](https://github.com/canonical/grafana-k8s-operator/blob/main/src/charm.py#L1289). + +------------------------- + diff --git a/tmp/t/15010.md b/tmp/t/15010.md new file mode 100644 index 000000000..b63feab40 --- /dev/null +++ b/tmp/t/15010.md @@ -0,0 +1,10 @@ +tmihoc | 2024-08-06 14:00:37 UTC | #1 + +In the context of building a charm, in Rockcraft and Charmcraft, a **profile** is a name you can pass as an argument during rock / charm initialisation that will create all the usual rock/charm project files in a form customised for a specific purpose -- i.e., for a Kubernetes charm, for a Kubernetes charm for a Flask application etc. -- in order to speed up development. + +The customisation often takes the shape of a specific [extension](/t/15011) in the charm's `charmcraft.yaml` file. + +> See more: [How to set up a charm project](/t/5547) + +------------------------- + diff --git a/tmp/t/15011.md b/tmp/t/15011.md new file mode 100644 index 000000000..09b1dcb2d --- /dev/null +++ b/tmp/t/15011.md @@ -0,0 +1,10 @@ +tmihoc | 2024-08-06 13:54:25 UTC | #1 + +> See also: [How to manage extensions](/t/13788) + +In the context of building a charm, in Rockcraft and Charmcraft, an **extension** is a name you can pass to the `extensions` key of a rock's `rockcraft.yaml` file / a charm's `charmcraft.yaml` file that will include the usual keys, however, customised for a particular purpose. + +An extension is usually associated with a particular [profile](/t/15010). + +------------------------- + diff --git a/tmp/t/15012.md b/tmp/t/15012.md new file mode 100644 index 000000000..566710ca3 --- /dev/null +++ b/tmp/t/15012.md @@ -0,0 +1,233 @@ +tmihoc | 2024-10-17 13:22:16 UTC | #1 + +The `flask-framework` Charmcraft [extension](/t/15011) includes configuration options customised for a Flask application. This document describes all the keys that a user may interact with. + +[note type=positive] +**If you'd like to see the full contents contributed by this extension:**
See [How to manage extensions](/t/13788). +[/note] + +## `charmcraft.yaml` > `config` > `options` + +You can use the predefined options (run `charmcraft expand-extensions` for details) but also add your own, as needed. + +In the latter case, any option you define will be used to generate environment variables; a user-defined option `config-option-name` will generate an environment variable named `FLASK_CONFIG_OPTION_NAME` where the option name is converted to upper case and dashes are converted to underscores. + +In either case, you will be able to set it in the usual way by running `juju config