hydro_deploy/
azure.rs

1use std::any::Any;
2use std::collections::HashMap;
3use std::fmt::Debug;
4use std::sync::{Arc, Mutex, OnceLock};
5
6use anyhow::Result;
7use nanoid::nanoid;
8use serde_json::json;
9
10use super::terraform::{TERRAFORM_ALPHABET, TerraformOutput, TerraformProvider};
11use super::{ClientStrategy, Host, HostTargetType, LaunchedHost, ResourceBatch, ResourceResult};
12use crate::ssh::LaunchedSshHost;
13use crate::{BaseServerStrategy, HostStrategyGetter, PortNetworkHint};
14
15pub struct LaunchedVirtualMachine {
16    resource_result: Arc<ResourceResult>,
17    user: String,
18    pub internal_ip: String,
19    pub external_ip: Option<String>,
20}
21
22impl LaunchedSshHost for LaunchedVirtualMachine {
23    fn get_external_ip(&self) -> Option<String> {
24        self.external_ip.clone()
25    }
26
27    fn get_internal_ip(&self) -> String {
28        self.internal_ip.clone()
29    }
30
31    fn get_cloud_provider(&self) -> String {
32        "Azure".to_string()
33    }
34
35    fn resource_result(&self) -> &Arc<ResourceResult> {
36        &self.resource_result
37    }
38
39    fn ssh_user(&self) -> &str {
40        self.user.as_str()
41    }
42}
43
44pub struct AzureHost {
45    /// ID from [`crate::Deployment::add_host`].
46    id: usize,
47
48    project: String,
49    os_type: String, // linux or windows
50    machine_size: String,
51    image: Option<HashMap<String, String>>,
52    target_type: HostTargetType,
53    region: String,
54    user: Option<String>,
55    pub launched: OnceLock<Arc<LaunchedVirtualMachine>>, // TODO(mingwei): fix pub
56    external_ports: Mutex<Vec<u16>>,
57}
58
59impl AzureHost {
60    #[expect(clippy::too_many_arguments, reason = "used via builder pattern")]
61    pub fn new(
62        id: usize,
63        project: String,
64        os_type: String, // linux or windows
65        machine_size: String,
66        image: Option<HashMap<String, String>>,
67        target_type: HostTargetType,
68        region: String,
69        user: Option<String>,
70    ) -> Self {
71        Self {
72            id,
73            project,
74            os_type,
75            machine_size,
76            image,
77            target_type,
78            region,
79            user,
80            launched: OnceLock::new(),
81            external_ports: Mutex::new(Vec::new()),
82        }
83    }
84}
85
86impl Debug for AzureHost {
87    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
88        f.write_fmt(format_args!("AzureHost({})", self.id))
89    }
90}
91
92impl Host for AzureHost {
93    fn target_type(&self) -> HostTargetType {
94        self.target_type
95    }
96
97    fn request_port_base(&self, bind_type: &BaseServerStrategy) {
98        match bind_type {
99            BaseServerStrategy::UnixSocket => {}
100            BaseServerStrategy::InternalTcpPort(_) => {}
101            BaseServerStrategy::ExternalTcpPort(port) => {
102                let mut external_ports = self.external_ports.lock().unwrap();
103                if !external_ports.contains(port) {
104                    if self.launched.get().is_some() {
105                        todo!("Cannot adjust firewall after host has been launched");
106                    }
107                    external_ports.push(*port);
108                }
109            }
110        }
111    }
112
113    fn request_custom_binary(&self) {
114        self.request_port_base(&BaseServerStrategy::ExternalTcpPort(22));
115    }
116
117    fn id(&self) -> usize {
118        self.id
119    }
120
121    fn collect_resources(&self, resource_batch: &mut ResourceBatch) {
122        if self.launched.get().is_some() {
123            return;
124        }
125
126        let project = self.project.as_str();
127
128        // first, we import the providers we need
129        resource_batch
130            .terraform
131            .terraform
132            .required_providers
133            .insert(
134                "azurerm".to_string(),
135                TerraformProvider {
136                    source: "hashicorp/azurerm".to_string(),
137                    version: "3.67.0".to_string(),
138                },
139            );
140
141        resource_batch
142            .terraform
143            .terraform
144            .required_providers
145            .insert(
146                "local".to_string(),
147                TerraformProvider {
148                    source: "hashicorp/local".to_string(),
149                    version: "2.3.0".to_string(),
150                },
151            );
152
153        resource_batch
154            .terraform
155            .terraform
156            .required_providers
157            .insert(
158                "tls".to_string(),
159                TerraformProvider {
160                    source: "hashicorp/tls".to_string(),
161                    version: "4.0.4".to_string(),
162                },
163            );
164
165        // we use a single SSH key for all VMs
166        resource_batch
167            .terraform
168            .resource
169            .entry("tls_private_key".to_string())
170            .or_default()
171            .insert(
172                "vm_instance_ssh_key".to_string(),
173                json!({
174                    "algorithm": "RSA",
175                    "rsa_bits": 4096
176                }),
177            );
178
179        resource_batch
180            .terraform
181            .resource
182            .entry("local_file".to_string())
183            .or_default()
184            .insert(
185                "vm_instance_ssh_key_pem".to_string(),
186                json!({
187                    "content": "${tls_private_key.vm_instance_ssh_key.private_key_pem}",
188                    "filename": ".ssh/vm_instance_ssh_key_pem",
189                    "file_permission": "0600"
190                }),
191            );
192
193        let vm_key = format!("vm-instance-{}", self.id);
194        let vm_name = format!("hydro-vm-instance-{}", nanoid!(8, &TERRAFORM_ALPHABET));
195
196        // Handle provider configuration
197        resource_batch.terraform.provider.insert(
198            "azurerm".to_string(),
199            json!({
200                "skip_provider_registration": "true",
201                "features": {},
202            }),
203        );
204
205        // Handle resources
206        resource_batch
207            .terraform
208            .resource
209            .entry("azurerm_resource_group".to_string())
210            .or_default()
211            .insert(
212                vm_key.to_string(),
213                json!({
214                    "name": project,
215                    "location": self.region.clone(),
216                }),
217            );
218
219        resource_batch
220            .terraform
221            .resource
222            .entry("azurerm_virtual_network".to_string())
223            .or_default()
224            .insert(
225                vm_key.to_string(),
226                json!({
227                    "name": format!("{vm_key}-network"),
228                    "address_space": ["10.0.0.0/16"],
229                    "location": self.region.clone(),
230                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}")
231                }),
232            );
233
234        resource_batch
235            .terraform
236            .resource
237            .entry("azurerm_subnet".to_string())
238            .or_default()
239            .insert(
240                vm_key.to_string(),
241                json!({
242                    "name": "internal",
243                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
244                    "virtual_network_name": format!("${{azurerm_virtual_network.{vm_key}.name}}"),
245                    "address_prefixes": ["10.0.2.0/24"]
246                }),
247            );
248
249        resource_batch
250            .terraform
251            .resource
252            .entry("azurerm_public_ip".to_string())
253            .or_default()
254            .insert(
255                vm_key.to_string(),
256                json!({
257                    "name": "hydropubip",
258                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
259                    "location": format!("${{azurerm_resource_group.{vm_key}.location}}"),
260                    "allocation_method": "Static",
261                }),
262            );
263
264        resource_batch
265            .terraform
266            .resource
267            .entry("azurerm_network_interface".to_string())
268            .or_default()
269            .insert(
270                vm_key.to_string(),
271                json!({
272                    "name": format!("{vm_key}-nic"),
273                    "location": format!("${{azurerm_resource_group.{vm_key}.location}}"),
274                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
275                    "ip_configuration": {
276                        "name": "internal",
277                        "subnet_id": format!("${{azurerm_subnet.{vm_key}.id}}"),
278                        "private_ip_address_allocation": "Dynamic",
279                        "public_ip_address_id": format!("${{azurerm_public_ip.{vm_key}.id}}"),
280                    }
281                }),
282            );
283
284        // Define network security rules - for now, accept all connections
285        resource_batch
286            .terraform
287            .resource
288            .entry("azurerm_network_security_group".to_string())
289            .or_default()
290            .insert(
291                vm_key.to_string(),
292                json!({
293                    "name": "primary_security_group",
294                    "location": format!("${{azurerm_resource_group.{vm_key}.location}}"),
295                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
296                }),
297            );
298
299        resource_batch
300            .terraform
301            .resource
302            .entry("azurerm_network_security_rule".to_string())
303            .or_default()
304            .insert(
305                vm_key.to_string(),
306                json!({
307                    "name": "allowall",
308                    "priority": 100,
309                    "direction": "Inbound",
310                    "access": "Allow",
311                    "protocol": "Tcp",
312                    "source_port_range": "*",
313                    "destination_port_range": "*",
314                    "source_address_prefix": "*",
315                    "destination_address_prefix": "*",
316                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
317                    "network_security_group_name": format!("${{azurerm_network_security_group.{vm_key}.name}}"),
318                })
319            );
320
321        resource_batch
322            .terraform
323            .resource
324            .entry("azurerm_subnet_network_security_group_association".to_string())
325            .or_default()
326            .insert(
327                vm_key.to_string(),
328                json!({
329                    "subnet_id": format!("${{azurerm_subnet.{vm_key}.id}}"),
330                    "network_security_group_id": format!("${{azurerm_network_security_group.{vm_key}.id}}"),
331                })
332            );
333
334        let user = self.user.as_ref().cloned().unwrap_or("hydro".to_string());
335        let os_type = format!("azurerm_{}_virtual_machine", self.os_type.clone());
336        let image = self.image.as_ref().cloned().unwrap_or(HashMap::from([
337            ("publisher".to_string(), "Canonical".to_string()),
338            (
339                "offer".to_string(),
340                "0001-com-ubuntu-server-jammy".to_string(),
341            ),
342            ("sku".to_string(), "22_04-lts".to_string()),
343            ("version".to_string(), "latest".to_string()),
344        ]));
345
346        resource_batch
347            .terraform
348            .resource
349            .entry(os_type.clone())
350            .or_default()
351            .insert(
352                vm_key.clone(),
353                json!({
354                    "name": vm_name,
355                    "resource_group_name": format!("${{azurerm_resource_group.{vm_key}.name}}"),
356                    "location": format!("${{azurerm_resource_group.{vm_key}.location}}"),
357                    "size": self.machine_size.clone(),
358                    "network_interface_ids": [format!("${{azurerm_network_interface.{vm_key}.id}}")],
359                    "admin_ssh_key": {
360                        "username": user,
361                        "public_key": "${tls_private_key.vm_instance_ssh_key.public_key_openssh}",
362                    },
363                    "admin_username": user,
364                    "os_disk": {
365                        "caching": "ReadWrite",
366                        "storage_account_type": "Standard_LRS",
367                    },
368                    "source_image_reference": image,
369                }),
370            );
371
372        resource_batch.terraform.output.insert(
373            format!("{vm_key}-public-ip"),
374            TerraformOutput {
375                value: format!("${{azurerm_public_ip.{vm_key}.ip_address}}"),
376            },
377        );
378
379        resource_batch.terraform.output.insert(
380            format!("{vm_key}-internal-ip"),
381            TerraformOutput {
382                value: format!("${{azurerm_network_interface.{vm_key}.private_ip_address}}"),
383            },
384        );
385    }
386
387    fn launched(&self) -> Option<Arc<dyn LaunchedHost>> {
388        self.launched
389            .get()
390            .map(|a| a.clone() as Arc<dyn LaunchedHost>)
391    }
392
393    fn provision(&self, resource_result: &Arc<ResourceResult>) -> Arc<dyn LaunchedHost> {
394        self.launched
395            .get_or_init(|| {
396                let id = self.id;
397
398                let internal_ip = resource_result
399                    .terraform
400                    .outputs
401                    .get(&format!("vm-instance-{id}-internal-ip"))
402                    .unwrap()
403                    .value
404                    .clone();
405
406                let external_ip = resource_result
407                    .terraform
408                    .outputs
409                    .get(&format!("vm-instance-{id}-public-ip"))
410                    .map(|v| v.value.clone());
411
412                Arc::new(LaunchedVirtualMachine {
413                    resource_result: resource_result.clone(),
414                    user: self.user.as_ref().cloned().unwrap_or("hydro".to_string()),
415                    internal_ip,
416                    external_ip,
417                })
418            })
419            .clone()
420    }
421
422    fn strategy_as_server<'a>(
423        &'a self,
424        client_host: &dyn Host,
425        network_hint: PortNetworkHint,
426    ) -> Result<(ClientStrategy<'a>, HostStrategyGetter)> {
427        if matches!(network_hint, PortNetworkHint::Auto)
428            && client_host.can_connect_to(ClientStrategy::UnixSocket(self.id))
429        {
430            Ok((
431                ClientStrategy::UnixSocket(self.id),
432                Box::new(|_| BaseServerStrategy::UnixSocket),
433            ))
434        } else if matches!(
435            network_hint,
436            PortNetworkHint::Auto | PortNetworkHint::TcpPort(_)
437        ) && client_host.can_connect_to(ClientStrategy::InternalTcpPort(self))
438        {
439            Ok((
440                ClientStrategy::InternalTcpPort(self),
441                Box::new(move |_| {
442                    BaseServerStrategy::InternalTcpPort(match network_hint {
443                        PortNetworkHint::Auto => None,
444                        PortNetworkHint::TcpPort(port) => port,
445                    })
446                }),
447            ))
448        } else if matches!(network_hint, PortNetworkHint::Auto)
449            && client_host.can_connect_to(ClientStrategy::ForwardedTcpPort(self))
450        {
451            Ok((
452                ClientStrategy::ForwardedTcpPort(self),
453                Box::new(|me| {
454                    me.downcast_ref::<AzureHost>()
455                        .unwrap()
456                        .request_port_base(&BaseServerStrategy::ExternalTcpPort(22)); // needed to forward
457                    BaseServerStrategy::InternalTcpPort(None)
458                }),
459            ))
460        } else {
461            anyhow::bail!("Could not find a strategy to connect to Azure instance")
462        }
463    }
464
465    fn can_connect_to(&self, typ: ClientStrategy) -> bool {
466        match typ {
467            ClientStrategy::UnixSocket(id) => {
468                #[cfg(unix)]
469                {
470                    self.id == id
471                }
472
473                #[cfg(not(unix))]
474                {
475                    let _ = id;
476                    false
477                }
478            }
479            ClientStrategy::InternalTcpPort(target_host) => {
480                if let Some(provider_target) = <dyn Any>::downcast_ref::<AzureHost>(target_host) {
481                    self.project == provider_target.project
482                } else {
483                    false
484                }
485            }
486            ClientStrategy::ForwardedTcpPort(_) => false,
487        }
488    }
489}