Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
322 changes: 322 additions & 0 deletions scenarios/reproducers/dt-sharded-compact.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,322 @@
---
cifmw_architecture_scenario: dt-sharded

# Automation section. Most of those parameters will be passed to the
# controller-0 as-is and be consumed by the `deploy-va.sh` script.
# Please note, all paths are on the controller-0, meaning managed by the
# Framework. Please do not edit them!
_arch_repo: "{{ cifmw_architecture_repo }}"
cifmw_ceph_client_vars: /tmp/ceph_client.yml
cifmw_ceph_client_values_post_ceph_path_src: >-
{{ _arch_repo }}/examples/dt/dt-sharded/values.yaml
cifmw_ceph_client_values_post_ceph_path_dst: >-
{{ cifmw_ceph_client_values_post_ceph_path_src }}
cifmw_ceph_client_service_values_post_ceph_path_src: >-
{{ _arch_repo }}/examples/dt/dt-sharded/control-plane/service-values.yaml
cifmw_ceph_client_service_values_post_ceph_path_dst: >-
{{ cifmw_ceph_client_service_values_post_ceph_path_src }}


# workaround https://issues.redhat.com/browse/OSPRH-6675
cifmw_ceph_spec_public_network: "{{ cifmw_networking_definition.networks.ctlplane.network }}"

# HERE if you want to override kustomization, you can uncomment this parameter
# and push the data structure you want to apply.
# cifmw_architecture_user_kustomize:
# stage_0:
# 'network-values':
# data:
# starwars: Obiwan

# HERE, if you want to stop the deployment loop at any stage, you can uncomment
# the following parameter and update the value to match the stage you want to
# reach. Known stages are:
# pre_kustomize_stage_INDEX
# pre_apply_stage_INDEX
# post_apply_stage_INDEX
#
# cifmw_deploy_architecture_stopper:

cifmw_allow_vms_to_reach_osp_api: true

# Full networking definition including designate network
cifmw_networking_definition:
networks:
ctlplane:
network: "192.168.122.0/24"
gateway: "192.168.122.1"
dns:
- "192.168.122.1"
mtu: 1500
tools:
multus:
ranges:
- start: 30
end: 70
netconfig:
ranges:
- start: 100
end: 120
- start: 150
end: 170
metallb:
ranges:
- start: 80
end: 90
internalapi:
network: "172.17.0.0/24"
vlan: 20
mtu: 1500
tools:
multus:
ranges:
- start: 30
end: 70
metallb:
ranges:
- start: 80
end: 99
netconfig:
ranges:
- start: 100
end: 250
storage:
network: "172.18.0.0/24"
vlan: 21
mtu: 1500
tools:
multus:
ranges:
- start: 30
end: 70
metallb:
ranges:
- start: 80
end: 90
netconfig:
ranges:
- start: 100
end: 250
tenant:
network: "172.19.0.0/24"
vlan: 22
mtu: 1500
tools:
multus:
ranges:
- start: 30
end: 70
metallb:
ranges:
- start: 80
end: 90
netconfig:
ranges:
- start: 100
end: 250
storagemgmt:
network: "172.20.0.0/24"
vlan: 23
mtu: 1500
tools:
netconfig:
ranges:
- start: 100
end: 250
designate:
network: "172.26.0.0/24"
vlan: 24
mtu: 1500
tools:
multus:
ranges:
- start: 30
end: 70
metallb:
ranges:
- start: 80
end: 90
netconfig:
ranges:
- start: 100
end: 250
designateext:
network: "172.34.0.0/24"
vlan: 34
mtu: 1500
tools:
multus:
ranges:
- start: 30
end: 70
metallb:
ranges:
- start: 80
end: 90
netconfig:
ranges:
- start: 100
end: 250
external:
network: "10.0.0.0/24"
vlan: 22
mtu: 1500
tools:
netconfig:
ranges:
- start: 100
end: 250
group-templates:
ocps:
network-template:
range:
start: 10
length: 10
networks:
ctlplane: {}
internalapi:
trunk-parent: ctlplane
tenant:
trunk-parent: ctlplane
storage:
trunk-parent: ctlplane
designate:
trunk-parent: ctlplane
designateext:
trunk-parent: ctlplane
computes:
network-template:
range:
start: 100
length: 21
networks:
ctlplane: {}
internalapi:
trunk-parent: ctlplane
tenant:
trunk-parent: ctlplane
storage:
trunk-parent: ctlplane
storagemgmt:
trunk-parent: ctlplane
cephs:
network-template:
range:
start: 150
length: 21
networks:
ctlplane: {}
internalapi:
trunk-parent: ctlplane
storage:
trunk-parent: ctlplane
storagemgmt:
trunk-parent: ctlplane
tenant:
trunk-parent: ctlplane
instances:
controller-0:
networks:
ctlplane:
ip: "192.168.122.9"

# HCI requires bigger size to hold OCP on OSP disks
cifmw_block_device_size: 100G
cifmw_libvirt_manager_compute_disksize: 160
cifmw_libvirt_manager_compute_memory: 50
cifmw_libvirt_manager_compute_cpus: 8

cifmw_libvirt_manager_configuration:
networks:
osp_trunk: |
<network>
<name>osp_trunk</name>
<forward mode='nat'/>
<bridge name='osp_trunk' stp='on' delay='0'/>
<dns enable="no"/>
<ip family='ipv4'
address='{{ cifmw_networking_definition.networks.ctlplane.network |
ansible.utils.nthhost(1) }}'
prefix='{{ cifmw_networking_definition.networks.ctlplane.network |
ansible.utils.ipaddr('prefix') }}'>
</ip>
</network>
ocpbm: |
<network>
<name>ocpbm</name>
<forward mode='nat'/>
<bridge name='ocpbm' stp='on' delay='0'/>
<dns enable="no"/>
<ip family='ipv4' address='192.168.111.1' prefix='24'>
</ip>
</network>
ocppr: |
<network>
<name>ocppr</name>
<forward mode='bridge'/>
<bridge name='ocppr'/>
</network>
vms:
ocp:
amount: 3
admin_user: core
image_local_dir: "{{ cifmw_basedir }}/images/"
disk_file_name: "ocp_master"
disksize: "100"
cpus: 16
memory: 64
root_part_id: 4
uefi: true
nets:
- ocppr
- ocpbm
- osp_trunk
compute:
uefi: "{{ cifmw_use_uefi }}"
root_part_id: "{{ cifmw_root_partition_id }}"
amount: "{{ [cifmw_libvirt_manager_compute_amount|int, 3] | max }}"
image_url: "{{ cifmw_discovered_image_url }}"
sha256_image_name: "{{ cifmw_discovered_hash }}"
image_local_dir: "{{ cifmw_basedir }}/images/"
disk_file_name: "compute-base-os.qcow2"
disksize: "{{ [cifmw_libvirt_manager_compute_disksize|int, 50] | max }}"
memory: "{{ [cifmw_libvirt_manager_compute_memory|int, 8] | max }}"
cpus: "{{ [cifmw_libvirt_manager_compute_cpus|int, 4] | max }}"
nets:
- ocpbm
- osp_trunk
controller:
uefi: "{{ cifmw_use_uefi }}"
root_part_id: "{{ cifmw_root_partition_id }}"
image_url: "{{ cifmw_discovered_image_url }}"
sha256_image_name: "{{ cifmw_discovered_hash }}"
image_local_dir: "{{ cifmw_basedir }}/images/"
disk_file_name: "base-os.qcow2"
disksize: 50
memory: 8
cpus: 4
nets:
- ocpbm
- osp_trunk

## devscript support for OCP deploy
cifmw_devscripts_config_overrides:
fips_mode: "{{ cifmw_fips_enabled | default(false) | bool }}"

# Note: with that extra_network_names "osp_trunk", we instruct
# devscripts role to create a new network, and associate it to
# the OCP nodes. This one is a "private network", and will hold
# the VLANs used for network isolation.

# Please create a custom env file to provide:
# cifmw_devscripts_ci_token:
# cifmw_devscripts_pull_secret:

# Test Ceph file and object storage (block is enabled by default)
cifmw_ceph_daemons_layout:
rgw_enabled: true
dashboard_enabled: false
cephfs_enabled: true
ceph_nfs_enabled: true

cifmw_deploy_obs: true
Loading