Deploy Proxmox Physical Host
Warning
Reserve data and iscsi IP addresses in Netbox in order to avoid any IP address conflicts
Install Proxmox
- Login to the vmc console of the physical host
- Keep with the naming conventions
- Example: jnb1srvdscocsdellpeprx90.cloudlet.cloud
- Edit the internal DNS to
10.10.2.199 - Edit the default gateway to be
10.10.2.254 - Set a strong password that can be remembered
Setup network interfaces
1. Setup basic network interfaces config
Edit the file /etc/network/interfaces and change the conent to
auto lo
iface lo inet loopback
auto networkcard1
iface networkcard1 inet manual
mtu 9000
auto networkcard2
iface networkcard2 inet manual
mtu 9000
auto bond0
iface bond0 inet manual
bond-slaves networkcard1 networkcard2
bond-miimon 100
bond-mode 802.3ad
mtu 9000
auto bond0.1
iface bond0.1 inet manual
auto vmbr5
iface vmbr5 inet static
address yourip
gateway 10.10.2.254
bridge-ports bond0.1
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 1
#VLAN1 - Internal Control
2. Access server using ssh
3. Setup full network interface config
Edit the file /etc/network/interfaces and change the content to the following as well as changing the network card names to the corresponding host as well as the ip adresses of vmnr5, vmbr8, and vmbr9 to the host ip adress.
Warning
Copy this network file from another host, not the provided example, to ensure all of the required bonds and bridges are the same whilst making note of the network card naming and to change the data and iscsi ip addresses to the new host ip address to avoid conflicts with the production sytem
Shortened example of the network card names and data/iscsi ip addressses
auto lo
iface lo inet loopback
auto eno1np0
iface eno1np0 inet manual
mtu 9000
auto eno2np1
iface eno2np1 inet manual
mtu 9000
auto eno3np2
iface eno3np2 inet manual
mtu 9000
auto eno4np3
iface eno4np3 inet manual
mtu 9000
auto bond0
iface bond0 inet manual
bond-slaves eno1np0 eno2np1
bond-miimon 100
bond-mode 802.3ad
mtu 9000
auto bond0.1
iface bond0.1 inet manual
auto bond0.10
iface bond0.10 inet manual
auto vmbr0
iface vmbr0 inet static
bridge-ports bond0.10
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 10
mtu 1500
#VLAN 10 - Guest (GenPop)
auto vmbr1
iface vmbr1 inet manual
bridge-ports bond0.101
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 101
mtu 1500
#VLAN 101 - Public (Echo)
auto vmbr4
iface vmbr4 inet manual
bridge-ports bond0.100
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 100
mtu 1500
#VLAN 100 - Public - (Touchnet)
auto vmbr5
iface vmbr5 inet static
address 10.10.2.223/24
gateway 10.10.2.254
bridge-ports bond0.1
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 1
#VLAN 1 - Internal Control
auto vmbr8
iface vmbr8 inet manual
address 10.13.1.223/24
bridge-ports eno3np2.13
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 13
mtu 9000
#VLAN 13 - iSCSI A
auto vmbr9
iface vmbr9 inet manual
address 10.14.1.223/24
bridge-ports eno4np3.14
bridge-stp off
bridge-fd 0
bridge-vlan-aware yes
bridge-vids 14
mtu 9000
#VLAN 14 - iSCSI B
source /etc/network/interfaces.d/*
4. Restart the networking
Setup Proxmox caching server
Edit etc/hosts and add the following below the top list of ip adresses
Setting up Multipath
1. Install multipath
2. Setting up multipath config
Edit/create /etc/multipath.conf and add
# This is a basic configuration file with some examples, for device mapper
# multipath.
#
# For a complete list of the default configuration values, run either
# multipath -t
# or
# multipathd show config
#
# For a list of configuration options with descriptions, see the multipath.conf
# man page
## By default, devices with vendor = "IBM" and product = "S/390.*" are
## blacklisted. To enable mulitpathing on these devies, uncomment the
## following lines.
#blacklist_exceptions {
# device {
# vendor "IBM"
# product "S/390.*"
# }
#}
## Use user friendly names, instead of using WWIDs as names.
defaults {
user_friendly_names yes
find_multipaths yes
}
##
## Here is an example of how to configure some standard options.
##
#
#defaults {
# polling_interval 10
# path_selector "round-robin 0"
# path_grouping_policy multibus
# uid_attribute ID_SERIAL
# prio alua
# path_checker readsector0
# rr_min_io 100
# max_fds 8192
# rr_weight priorities
# failback immediate
# no_path_retry fail
# user_friendly_names yes
#}
##
## The wwid line in the following blacklist section is shown as an example
## of how to blacklist devices by wwid. The 2 devnode lines are the
## compiled in default blacklist. If you want to blacklist entire types
## of devices, such as all scsi devices, you should use a devnode line.
## However, if you want to blacklist specific devices, you should use
## a wwid line. Since there is no guarantee that a specific device will
## not change names on reboot (from /dev/sda to /dev/sdb for example)
## devnode lines are not recommended for blacklisting specific devices.
##
#blacklist {
# wwid 26353900f02796769
# devnode "^(ram|raw|loop|fd|md|dm-|sr|scd|st)[0-9]*"
# devnode "^hd[a-z]"
#}
#multipaths {
# multipath {
# wwid 3600508b4000156d700012000000b0000
# alias yellow
# path_grouping_policy multibus
# path_selector "round-robin 0"
# failback manual
# rr_weight priorities
# no_path_retry 5
# }
# multipath {
# wwid 1DEC_____321816758474
# alias red
# }
#}
#devices {
# device {
# vendor "COMPAQ "
# product "HSV110 (C)COMPAQ"
# path_grouping_policy multibus
# path_checker readsector0
# path_selector "round-robin 0"
# hardware_handler "0"
# failback 15
# rr_weight priorities
# no_path_retry queue
# }
# device {
# vendor "COMPAQ "
# product "MSA1000 "
# path_grouping_policy multibus
# }
#}
devices {
device {
vendor "PURE"
product "FlashArray"
path_grouping_policy "multibus"
path_selector "queue-length 0"
path_checker "tur"
features "0"
hardware_handler "0"
prio "const"
failback immediate
fast_io_fail_tmo 10
dev_loss_tmo 60
user_friendly_names yes
}
}
3. Check multipath
Restart multipath
Connecting to Pure
1. Add host to pure
- Create a host
- Retrieve iscsi initiator name with
- Add initiator name into host ports
- Configure IQNs
- Add host to host group
2. Connect iscsi to pure
Change the settings in /etc/iscsi/iscsid.conf startup settings to look like the below (Comment manual and uncomment automatic)
#*****************
# Startup settings
#*****************
# To request that the iscsi service scripts startup a session, use "automatic":
node.startup = automatic
#
# To manually startup the session, use "manual". The default is manual.
#node.startup = manual
Depending on the Pure that you are trying to connect to, run the appropriate command listed bellow:
-
Pure01
ip_addresses=("10.15.1.108" "10.15.1.109" "10.15.1.118" "10.15.1.119" "10.16.1.108" "10.16.1.109" "10.16.1.118" "10.16.1.119") target_iqn="iqn.2010-06.com.purestorage:flasharray.6b8e2cb1e2a96bb2" for ip in "${ip_addresses[@]}" do echo "Connecting to target iSCSI IP Address: $ip" iscsiadm --mode node --targetname $target_iqn -p $ip -o new iscsiadm --mode node --targetname $target_iqn -p $ip --op update -n node.startup -v automatic iscsiadm --mode node --targetname $target_iqn -p $ip -n discovery.sendtargets.use_discoveryd -v Yes iscsiadm --mode node --targetname $target_iqn -p $ip -n discovery.sendtargets.use_discoveryd_poll_inval -v 30 done -
Pure02
ip_addresses=("10.15.1.104", "10.15.1.114" , "10.16.1.105" , "10.16.1.115") target_iqn="iqn.2010-06.com.purestorage:flasharray.3b51fa4b419dd6cc" for ip in "${ip_addresses[@]}" do echo "Connecting to target iSCSI IP Address: $ip" iscsiadm --mode node --targetname $target_iqn -p $ip -o new iscsiadm --mode node --targetname $target_iqn -p $ip --op update -n node.startup -v automatic iscsiadm --mode node --targetname $target_iqn -p $ip -n discovery.sendtargets.use_discoveryd -v Yes iscsiadm --mode node --targetname $target_iqn -p $ip -n discovery.sendtargets.use_discoveryd_poll_inval -v 30 done -
Pure04
ip_addresses=("10.13.1.108", "10.13.1.109" , "10.13.1.118" , "10.13.1.119", "10.14.1.108", "10.14.1.109" , "10.14.1.118" , "10.14.1.119") target_iqn="iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789" for ip in "${ip_addresses[@]}" do echo "Connecting to target iSCSI IP Address: $ip" iscsiadm --mode node --targetname $target_iqn -p $ip -o new iscsiadm --mode node --targetname $target_iqn -p $ip --op update -n node.startup -v automatic iscsiadm --mode node --targetname $target_iqn -p $ip -n discovery.sendtargets.use_discoveryd -v Yes iscsiadm --mode node --targetname $target_iqn -p $ip -n discovery.sendtargets.use_discoveryd_poll_inval -v 30 done
Log into pure from the host
Example of what the output should look like
Logging in to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.14.1.109,3260]
Logging in to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.13.1.108,3260]
Logging in to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.13.1.119,3260]
Logging in to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.14.1.118,3260]
Logging in to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.13.1.118,3260]
Logging in to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.14.1.119,3260]
Logging in to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.13.1.109,3260]
Logging in to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.14.1.108,3260]
Login to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.14.1.109,3260] successful.
Login to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.13.1.108,3260] successful.
Login to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.13.1.119,3260] successful.
Login to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.14.1.118,3260] successful.
Login to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.13.1.118,3260] successful.
Login to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.14.1.119,3260] successful.
Login to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.13.1.109,3260] successful.
Login to [iface: default, target: iqn.2010-06.com.purestorage:flasharray.4c3e35a66f7dd789, portal: 10.14.1.108,3260] successful.
Check if multipath is running correctly
Example of expected output
mpatha (3624a93704ac249c7bd574702001e4952) dm-72 PURE,FlashArray
size=50T features='0' hwhandler='0' wp=rw
`-+- policy='queue-length 0' prio=0 status=active
|- 15:0:0:254 sdb 8:16 active undef running
|- 16:0:0:254 sdc 8:32 active undef running
|- 17:0:0:254 sdd 8:48 active undef running
|- 18:0:0:254 sde 8:64 active undef running
|- 19:0:0:254 sdf 8:80 active undef running
|- 20:0:0:254 sdg 8:96 active undef running
|- 21:0:0:254 sdh 8:112 active undef running
`- 22:0:0:254 sdi 8:128 active undef running
Change Password
Using ssh update the password using
Change the password and saved to Bitwarden with the naming convention: - "Proxmox Host - jnb1srvdscocsdellpeprx90"The Website should be set for https://ipaddress:8006
Add the password to the DataSciences backend collection
Post Proxmox Installation
Run the following command on the new server:
bash -c "$(curl -fsSL https://raw.githubusercontent.com/community-scripts/ProxmoxVE/main/tools/pve/post-pve-install.sh)"
The following options will be asked (Headings Listed Below): - Sources: yes - PVE-Enterprise: yes - PVE-no-subscription yes - ceph-package-repos: yes - pvetest (Add PVE test to repo): no - Subscription-nag: yes - HA-Disable: no - Update: yes
Add box to Proxmox
Log into Proxmox cluster - Click on Cluster -> Join Info -> Copy Information
Log into Host Proxmox https://ipaddress:8006
- Click on CLuster -> Join Cluster
- Paste Information
- Add all other required information
- Click join button to join cluster
- Check other nodes firewall groups and add that to the new server
- Add the server to the corresponding HA group
Adding IP and ssh to management server
- Log into management server
- Add the ip address to the file
servername.confin the directory.ocs/config/ - Copy root ssh key into the new host
Add Physical and Virtual configuration to Netbox
1. Add Physical configuration
Add the new divice to Netbox - Add device - Input Device name - Select Device role - Select Device type - Add all physical server details - Add it to the correct cluster
Add IP - Add a new IP - Import new IPs and paste the following with the corresponding server IP
address,status,tenant,device,interface,is_primary,is_oob,dns_name
10.10.2.90/24,active,Global,jnb1srvdscocsdellpeprx90,vmbr5,true,false,
10.13.1.90/24,active,Global,jnb1srvdscocsdellpeprx90,vmbr8,false,false,
10.14.1.90/24,active,Global,jnb1srvdscocsdellpeprx90,vmbr9,false,false,
10.10.1.90/24,active,Global,jnb1srvdscocsdellpeprx90,jnb1srvdscocsdellpeprx90-BMC,false,true,
Add Interfaces - Add a new interface - Import new interfaces and paste the following with the corresponding network card names
name,device,enabled,type,mgmt_only,mtu,speed,mode,mark_connected,parent,bridge,lag
vmbr0,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr1,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr4,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr5,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr6,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr7,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr8,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr9,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr10,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr303,jnb1srvdscocsdellpeprx90,true,bridge,false,9000,,tagged,false,,,
vmbr304,jnb1srvdscocsdellpeprx90,true,bridge,false,9000,,tagged,false,,,
vmbr305,jnb1srvdscocsdellpeprx90,true,bridge,false,9000,,tagged,false,,,
vmbr306,jnb1srvdscocsdellpeprx90,true,bridge,false,9000,,tagged,false,,,
vmbr309,jnb1srvdscocsdellpeprx90,true,bridge,false,9000,,tagged,false,,,
vmbr310,jnb1srvdscocsdellpeprx90,true,bridge,false,9000,,tagged,false,,,
vmbr311,jnb1srvdscocsdellpeprx90,true,bridge,false,9000,,tagged,false,,,
vmbr312,jnb1srvdscocsdellpeprx90,true,bridge,false,9000,,tagged,false,,,
vmbr601,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr602,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr609,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr610,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr611,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr612,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr613,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr614,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr615,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr616,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr617,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr618,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr619,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr620,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
vmbr702,jnb1srvdscocsdellpeprx90,true,bridge,false,1500,,tagged,false,,,
bond0,jnb1srvdscocsdellpeprx90,true,lag,false,1500,20000000,,false,,,
bond0.1,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr5,
bond0.10,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr0,
bond0.52,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr6,
bond0.100,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr4,
bond0.101,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr1,
bond0.106,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr10,
bond0.502,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr7,
bond0.601,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr601,
bond0.602,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr602,
bond0.609,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr609,
bond0.610,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr610,
bond0.611,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr611,
bond0.612,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr612,
bond0.613,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr613,
bond0.614,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr614,
bond0.615,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr615,
bond0.616,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr616,
bond0.617,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr617,
bond0.618,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr618,
bond0.619,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr619,
bond0.620,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,bond0,vmbr620,
eno1np0,jnb1srvdscocsdellpeprx90,true,10gbase-t,false,1500,10000000,,false,,,bond0
eno2np1,jnb1srvdscocsdellpeprx90,true,10gbase-t,false,1500,10000000,,false,,,bond0
eno3np2,jnb1srvdscocsdellpeprx90,true,10gbase-t,false,1500,10000000,,false,,,
eno4np3,jnb1srvdscocsdellpeprx90,true,10gbase-t,false,1500,10000000,,false,,,
eno3np2.13,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno3np2,vmbr8,
eno4np3.14,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno4np3,vmbr9,
eno3np2.303,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno3np2,vmbr303,
eno4np3.304,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno4np3,vmbr304,
eno3np2.305,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno3np2,vmbr305,
eno4np3.306,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno4np3,vmbr306,
eno3np2.309,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno3np2,vmbr309,
eno4np3.310,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno4np3,vmbr310,
eno3np2.311,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno3np2,vmbr311,
eno4np3.312,jnb1srvdscocsdellpeprx90,true,virtual,false,,,tagged,false,eno4np3,vmbr312,
jnb1srvdscocsdellpeprx90-BMC,jnb1srvdscocsdellpeprx90,true,1000base-t,true,,1000000,access,false,,,
Install node_exporter
Run the following commands: