Skip to content
Snippets Groups Projects
Commit c1ce4d29 authored by kaiyou's avatar kaiyou
Browse files

Prepare for more versatile CI jobs

parent 660b34b7
No related branches found
No related tags found
No related merge requests found
Pipeline #29260 passed
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
environment: environment:
KUBECONFIG: "{{ kubeconfig }}" KUBECONFIG: "{{ kubeconfig }}"
retries: 10 retries: 10
delay: 15 delay: 5
- name: Dump cluster info - name: Dump cluster info
ansible.builtin.shell: | ansible.builtin.shell: |
...@@ -27,3 +27,4 @@ ...@@ -27,3 +27,4 @@
helm upgrade --install hepto /tmp/hepto-bootstrap -f /tmp/cluster_info helm upgrade --install hepto /tmp/hepto-bootstrap -f /tmp/cluster_info
environment: environment:
KUBECONFIG: "{{ kubeconfig }}" KUBECONFIG: "{{ kubeconfig }}"
---
# These tasks uploads the build artifact from CI to some S#
# bucket, for two main reasons:
# - it is complex and unsupported to download the artifact of a current build directly from Gitlab
# - uploading from the CI to many cloud providers can become costly
- name: Try and get the url
amazon.aws.s3_object:
endpoint_url: "{{ s3_endpoint }}"
bucket: "{{ s3_bucket }}"
region: "{{ s3_region }}"
access_key: "{{ s3_access_key }}"
secret_key: "{{ s3_secret_key }}"
object: "hepto.{{ lookup('env', 'CI_PIPELINE_ID') }}"
mode: geturl
register: get
ignore_errors: true
- name: Upload the file when necessary
when: get.failed
amazon.aws.s3_object:
endpoint_url: "{{ s3_endpoint }}"
bucket: "{{ s3_bucket }}"
region: "{{ s3_region }}"
access_key: "{{ s3_access_key }}"
secret_key: "{{ s3_secret_key }}"
object: "hepto.{{ lookup('env', 'CI_PIPELINE_ID') }}"
src: "{{ lookup('env', 'PWD') }}/hepto"
mode: put
encrypt: false
register: put
# This is hacky as hell, yet required for the fact to be properly altered in
# all hosts. The when clause makes it possible to call this outsite the playbook
# during CI warmup
- name: Set the hepto download url for nodes
delegate_to: "{{ item }}"
delegate_facts: true
when: "'nodes' in groups"
with_items: "{{ groups['nodes'] }}"
set_fact:
hepto_url: "{{ put.url if put.changed else get.url }}"
---
- hosts: localhost
tasks:
- name: Cleanup cloud deployment
include_role:
name: cloud
tasks_from: cleanup.yaml
--- ---
# This play merely creates nodes and/or probes them, for inclusion
# when deploying or using the cluster
- hosts: localhost - hosts: localhost
roles: roles:
- cloud - cloud
- import_playbook: deploy.yaml
--- ---
# This play will do nothing if no cloud deploying is specified
- import_playbook: cloud.yaml
# If this is a CI deployment, upload hepto to a cloud URL
- hosts: localhost
tasks:
- when: "lookup('env', 'CI_PIPELINE_ID') != ''"
include_tasks: ./ciupload.yaml
# Deploy the nodes, either epxlicitely declared or deployed to cloud
- hosts: nodes - hosts: nodes
roles: roles:
- hepto - hepto
- ansible.builtin.import_playbook: bootstrap.yaml # Bootstrap the cluster
- import_playbook: bootstrap.yaml
- name: "Get servers information"
community.general.scaleway_server_info:
api_token: "{{ scaleway_token }}"
region: "{{ scaleway_region }}"
register: raw_servers
- name: "Index servers information"
ansible.builtin.set_fact:
servers: "{{ raw_servers['scaleway_server_info']
| map(attribute='name')
| zip(raw_servers['scaleway_server_info'])
| community.general.dict }}"
- name: "Delete nodes" - name: "Delete nodes"
community.general.scaleway_compute: community.general.scaleway_compute:
api_token: "{{ scaleway_token }}" api_token: "{{ scaleway_token }}"
...@@ -20,14 +7,28 @@ ...@@ -20,14 +7,28 @@
project: "{{ scaleway_project }}" project: "{{ scaleway_project }}"
region: "{{ scaleway_region }}" region: "{{ scaleway_region }}"
state: absent state: absent
register: deleted register: servers
with_dict: "{{ nodes }}" with_dict: "{{ nodes }}"
- name: Debug
debug:
msg: "{{ servers }}"
# Scaleway module does not offer to delete volumes when deleting nodes,
# so we loop and try to delete all unattached volumes
- name: "Get volume infos"
community.general.scaleway_volume_info:
api_token: "{{ scaleway_token }}"
region: "{{ scaleway_region }}"
register: volumes
- name: "Delete volumes" - name: "Delete volumes"
community.general.scaleway_volume: community.general.scaleway_volume:
api_token: "{{ scaleway_token }}" api_token: "{{ scaleway_token }}"
name: "{{ servers[item.invocation.module_args.name].volumes[0].name }}" name: "{{ item.name }}"
project: "{{ scaleway_project }}" project: "{{ scaleway_project }}"
region: "{{ scaleway_region }}" region: "{{ scaleway_region }}"
state: absent state: absent
with_dict: "{{ deleted.results }}" with_items: "{{ volumes.scaleway_volume_info }}"
when: item.state == "available"
...@@ -24,10 +24,15 @@ ...@@ -24,10 +24,15 @@
add_host: add_host:
name: "{{ item.item.key }}" name: "{{ item.item.key }}"
groups: "{{ ['nodes'] + (nodes[item.item.key]|d([])) }}" groups: "{{ ['nodes'] + (nodes[item.item.key]|d([])) }}"
# Hcloud does not return the node ip, it is always ::1 in the prefix however
ansible_host: "{{ item.hcloud_server.ipv6 | ansible.utils.ipaddr('net') | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address') }}" ansible_host: "{{ item.hcloud_server.ipv6 | ansible.utils.ipaddr('net') | ansible.utils.ipaddr('1') | ansible.utils.ipaddr('address') }}"
ansible_user: root ansible_user: root
# Hcloud provides a generic ip for the v6 gateway
node_gw: "fe80::1" node_gw: "fe80::1"
# Use the ::2 address inside allocated prefix for hepto
node_ip: "{{ item.hcloud_server.ipv6 | ansible.utils.ipaddr('net') | ansible.utils.ipaddr('2') }}" node_ip: "{{ item.hcloud_server.ipv6 | ansible.utils.ipaddr('net') | ansible.utils.ipaddr('2') }}"
# Hcloud overrides the default interface to eth0 on every OS
node_iface: eth0
ansible_ssh_extra_args: "-o StrictHostKeyChecking=no" ansible_ssh_extra_args: "-o StrictHostKeyChecking=no"
with_items: "{{ servers.results }}" with_items: "{{ servers.results }}"
...@@ -8,37 +8,40 @@ ...@@ -8,37 +8,40 @@
region: "{{ scaleway_region }}" region: "{{ scaleway_region }}"
enable_ipv6: true enable_ipv6: true
state: running state: running
register: created
with_dict: "{{ nodes }}" with_dict: "{{ nodes }}"
- name: "Get servers information" # We start nodes again so we get their IP address, which is
community.general.scaleway_server_info: # not yet available at creation time
- name: "Probe nodes"
community.general.scaleway_compute:
api_token: "{{ scaleway_token }}" api_token: "{{ scaleway_token }}"
name: "{{ node_prefix }}-{{ item.key }}"
commercial_type: "{{ scaleway_type }}"
image: "{{ scaleway_image }}"
project: "{{ scaleway_project }}"
region: "{{ scaleway_region }}" region: "{{ scaleway_region }}"
register: raw_servers state: running
register: servers
- name: "Index servers information" with_dict: "{{ nodes }}"
ansible.builtin.set_fact:
servers: "{{ raw_servers['scaleway_server_info']
| map(attribute='name')
| zip(raw_servers['scaleway_server_info'])
| community.general.dict }}"
- name: Wait for nodes to be ready - name: Wait for nodes to be ready
ansible.builtin.wait_for: ansible.builtin.wait_for:
port: 22 port: 22
host: "{{ servers[item.invocation.module_args.name].ipv6.address }}" host: "{{ item.msg.ipv6.address }}"
delay: 2 delay: 2
with_items: "{{ created.results }}" with_items: "{{ servers.results }}"
- name: "Add nodes to inventory" - name: "Add nodes to inventory"
add_host: add_host:
name: "{{ item.item.key }}" name: "{{ item.item.key }}"
groups: "{{ ['nodes'] + (nodes[item.item.key]|d([])) }}" groups: "{{ ['nodes'] + (nodes[item.item.key]|d([])) }}"
ansible_host: "{{ servers[item.invocation.module_args.name].ipv6.address }}" ansible_host: "{{ item.msg.ipv6.address }}"
ansible_user: root ansible_user: root
node_gw: "{{ servers[item.invocation.module_args.name].ipv6.gateway }}" node_gw: "{{ item.msg.ipv6.gateway }}"
node_ip: "{{ servers[item.invocation.module_args.name].ipv6.address | ansible.utils.ipmath(1) }}/{{ servers[item.invocation.module_args.name].ipv6.netmask }}" # We use the next (usually ::2) available ip for hepto
node_ip: "{{ item.msg.ipv6.address | ansible.utils.ipmath(1) }}/{{ item.msg.ipv6.netmask }}"
# This is specific to scaleway
node_iface: ens2
ansible_ssh_extra_args: "-o StrictHostKeyChecking=no" ansible_ssh_extra_args: "-o StrictHostKeyChecking=no"
with_items: "{{ created.results }}" with_items: "{{ servers.results }}"
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
stat: stat:
path: "{{ hepto_bin }}" path: "{{ hepto_bin }}"
register: hepto_exists register: hepto_exists
- name: Download hepto binary for amd64 - name: Download hepto binary for amd64
get_url: get_url:
url: "{{ hepto_url }}" url: "{{ hepto_url }}"
......
--- ---
#- ansible.builtin.import_playbook: cloud.yaml # Import the cloud playbook to populate inventory
- import_playbook: cloud.yaml
- hosts: mastere # Do the actual testing from master
tasks: - hosts: master
tasks:
- name: Deploy podinfo - name: Deploy podinfo
ansible.builtin.shell: | ansible.builtin.shell: |
helm repo add --force-update podinfo https://stefanprodan.github.io/podinfo helm repo add --force-update podinfo https://stefanprodan.github.io/podinfo
helm repo update helm repo update
# Podinfo default repository does not expose ipv6, switch to docker.io
helm upgrade --install podinfo podinfo/podinfo --set image.repository=docker.io/stefanprodan/podinfo helm upgrade --install podinfo podinfo/podinfo --set image.repository=docker.io/stefanprodan/podinfo
environment: environment:
KUBECONFIG: "{{ kubeconfig }}" KUBECONFIG: "{{ kubeconfig }}"
...@@ -18,16 +20,10 @@ ...@@ -18,16 +20,10 @@
environment: environment:
KUBECONFIG: "{{ kubeconfig }}" KUBECONFIG: "{{ kubeconfig }}"
# This is run from master for now, running from localhost is too complex
- name: Try and access the public URL - name: Try and access the public URL
ansible.builtin.get_url: ansible.builtin.get_url:
url: "http://[{{ external_ips | first }}]:9898" url: "http://[{{ external_ips | first }}]:9898"
dest: /tmp dest: /tmp
retries: 100 retries: 100
delay: 30 delay: 30
- hosts: localhost
tasks:
- name: Cleanup cloud deployment
include_role:
name: cloud
tasks_from: cleanup.yaml
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment