initial commit from IBM Cloud example

Signed-off-by: Vincent Batts <vbatts@hashbangbash.com>
This commit is contained in:
Vincent Batts 2019-06-03 10:42:48 +02:00
commit 4e76f510b3
Signed by: vbatts
GPG key ID: 10937E57733F1362
32 changed files with 1925 additions and 0 deletions

View file

@ -0,0 +1,75 @@
#!/bin/bash
# set -x
echo "Build environment variables:"
echo "REGISTRY_URL=${REGISTRY_URL}"
echo "REGISTRY_NAMESPACE=${REGISTRY_NAMESPACE}"
echo "IMAGE_NAME=${IMAGE_NAME}"
echo "BUILD_NUMBER=${BUILD_NUMBER}"
echo "ARCHIVE_DIR=${ARCHIVE_DIR}"
# also run 'env' command to find all available env variables
# or learn more about the available environment variables at:
# https://cloud.ibm.com/docs/services/ContinuousDelivery/pipeline_deploy_var.html#deliverypipeline_environment
# To review or change build options use:
# ibmcloud cr build --help
echo "Checking registry namespace: ${REGISTRY_NAMESPACE}"
NS=$( ibmcloud cr namespaces | grep ${REGISTRY_NAMESPACE} ||: )
if [ -z "${NS}" ]; then
echo -e "Registry namespace ${REGISTRY_NAMESPACE} not found, creating it."
ibmcloud cr namespace-add ${REGISTRY_NAMESPACE}
echo -e "Registry namespace ${REGISTRY_NAMESPACE} created."
else
echo -e "Registry namespace ${REGISTRY_NAMESPACE} found."
fi
echo -e "Existing images in registry"
ibmcloud cr images
echo "=========================================================="
echo -e "BUILDING CONTAINER IMAGE: ${IMAGE_NAME}:${BUILD_NUMBER}"
set -x
ibmcloud cr build -t ${REGISTRY_URL}/${REGISTRY_NAMESPACE}/${IMAGE_NAME}:${BUILD_NUMBER} .
set +x
ibmcloud cr image-inspect ${REGISTRY_URL}/${REGISTRY_NAMESPACE}/${IMAGE_NAME}:${BUILD_NUMBER}
export PIPELINE_IMAGE_URL="$REGISTRY_URL/$REGISTRY_NAMESPACE/$IMAGE_NAME:$BUILD_NUMBER"
ibmcloud cr images
echo "=========================================================="
echo "COPYING ARTIFACTS needed for deployment and testing (in particular build.properties)"
echo "Checking archive dir presence"
mkdir -p $ARCHIVE_DIR
# Persist env variables into a properties file (build.properties) so that all pipeline stages consuming this
# build as input and configured with an environment properties file valued 'build.properties'
# will be able to reuse the env variables in their job shell scripts.
# CHART information from build.properties is used in Helm Chart deployment to set the release name
CHART_NAME=$(find chart/. -maxdepth 2 -type d -name '[^.]?*' -printf %f -quit)
echo "CHART_NAME=${CHART_NAME}" >> $ARCHIVE_DIR/build.properties
# IMAGE information from build.properties is used in Helm Chart deployment to set the release name
echo "IMAGE_NAME=${IMAGE_NAME}" >> $ARCHIVE_DIR/build.properties
echo "BUILD_NUMBER=${BUILD_NUMBER}" >> $ARCHIVE_DIR/build.properties
# REGISTRY information from build.properties is used in Helm Chart deployment to generate cluster secret
echo "REGISTRY_URL=${REGISTRY_URL}" >> $ARCHIVE_DIR/build.properties
echo "REGISTRY_NAMESPACE=${REGISTRY_NAMESPACE}" >> $ARCHIVE_DIR/build.properties
echo "File 'build.properties' created for passing env variables to subsequent pipeline jobs:"
cat $ARCHIVE_DIR/build.properties
echo "Copy pipeline scripts along with the build"
# Copy scripts (incl. deploy scripts)
if [ -d ./scripts/ ]; then
if [ ! -d $ARCHIVE_DIR/scripts/ ]; then # no need to copy if working in ./ already
cp -r ./scripts/ $ARCHIVE_DIR/
fi
fi
echo "Copy Helm chart along with the build"
if [ ! -d $ARCHIVE_DIR/chart/ ]; then # no need to copy if working in ./ already
cp -r ./chart/ $ARCHIVE_DIR/
fi

View file

@ -0,0 +1,136 @@
#!/bin/bash
#set -x
#View build properties
cat build.properties
echo "Check cluster availability"
IP_ADDR=$(ibmcloud cs workers ${PIPELINE_KUBERNETES_CLUSTER_NAME} | grep normal | head -n 1 | awk '{ print $2 }')
if [ -z $IP_ADDR ]; then
echo "$PIPELINE_KUBERNETES_CLUSTER_NAME not created or workers not ready"
exit 1
fi
echo "Configuring cluster namespace"
if kubectl get namespace ${CLUSTER_NAMESPACE}; then
echo -e "Namespace ${CLUSTER_NAMESPACE} found."
else
kubectl create namespace ${CLUSTER_NAMESPACE}
echo -e "Namespace ${CLUSTER_NAMESPACE} created."
fi
echo "Configuring cluster role binding"
if kubectl get clusterrolebinding kube-system:default; then
echo -e "Cluster role binding found."
else
kubectl create clusterrolebinding kube-system:default --clusterrole=cluster-admin --serviceaccount=kube-system:default
echo -e "Cluster role binding created."
fi
echo "Configuring Tiller (Helm's server component)"
helm init --upgrade
kubectl rollout status -w deployment/tiller-deploy --namespace=kube-system
while [ "$(helm version | grep "Tiller")" != "" ]; do
echo "Waiting for server..."
sleep 10
done
helm version
echo "CHART_NAME: $CHART_NAME"
echo "DEFINE RELEASE by prefixing image (app) name with namespace if not 'default' as Helm needs unique release names across namespaces"
if [[ "${CLUSTER_NAMESPACE}" != "default" ]]; then
RELEASE_NAME="${CLUSTER_NAMESPACE}-${IMAGE_NAME}"
else
RELEASE_NAME=${IMAGE_NAME}
fi
echo "RELEASE_NAME: $RELEASE_NAME"
echo "CHECKING CHART (lint)"
helm lint chart/${CHART_NAME}
IMAGE_REPOSITORY=${REGISTRY_URL}/${REGISTRY_NAMESPACE}/${IMAGE_NAME}
# Using 'upgrade --install" for rolling updates. Note that subsequent updates will occur in the same namespace the release is currently deployed in, ignoring the explicit--namespace argument".
echo -e "Dry run into: ${PIPELINE_KUBERNETES_CLUSTER_NAME}/${CLUSTER_NAMESPACE}."
helm upgrade --install --debug --dry-run ${RELEASE_NAME} ./chart/${CHART_NAME} --namespace ${CLUSTER_NAMESPACE} --set image.repository=${IMAGE_REPOSITORY},image.tag=${BUILD_NUMBER}
echo -e "Deploying into: ${PIPELINE_KUBERNETES_CLUSTER_NAME}/${CLUSTER_NAMESPACE}."
helm upgrade --install ${RELEASE_NAME} ./chart/${CHART_NAME} --namespace ${CLUSTER_NAMESPACE} --set image.repository=${IMAGE_REPOSITORY},image.tag=${BUILD_NUMBER}
echo -e "CHECKING deployment status of release ${RELEASE_NAME} with image tag: ${BUILD_NUMBER}"
echo ""
for ITERATION in {1..30}
do
DATA=$( kubectl get pods --namespace ${CLUSTER_NAMESPACE} -a -l release=${RELEASE_NAME} -o json )
NOT_READY=$( echo $DATA | jq '.items[].status.containerStatuses[] | select(.image=="'"${IMAGE_REPOSITORY}:${BUILD_NUMBER}"'") | select(.ready==false) ' )
if [[ -z "$NOT_READY" ]]; then
echo -e "All pods are ready:"
echo $DATA | jq '.items[].status.containerStatuses[] | select(.image=="'"${IMAGE_REPOSITORY}:${BUILD_NUMBER}"'") | select(.ready==true) '
break # deployment succeeded
fi
REASON=$(echo $DATA | jq '.items[].status.containerStatuses[] | select(.image=="'"${IMAGE_REPOSITORY}:${BUILD_NUMBER}"'") | .state.waiting.reason')
echo -e "${ITERATION} : Deployment still pending..."
echo -e "NOT_READY:${NOT_READY}"
echo -e "REASON: ${REASON}"
if [[ ${REASON} == *ErrImagePull* ]] || [[ ${REASON} == *ImagePullBackOff* ]]; then
echo "Detected ErrImagePull or ImagePullBackOff failure. "
echo "Please check proper authenticating to from cluster to image registry (e.g. image pull secret)"
break; # no need to wait longer, error is fatal
elif [[ ${REASON} == *CrashLoopBackOff* ]]; then
echo "Detected CrashLoopBackOff failure. "
echo "Application is unable to start, check the application startup logs"
break; # no need to wait longer, error is fatal
fi
sleep 5
done
if [[ ! -z "$NOT_READY" ]]; then
echo ""
echo "=========================================================="
echo "DEPLOYMENT FAILED"
echo "Deployed Services:"
kubectl describe services ${RELEASE_NAME}-${CHART_NAME} --namespace ${CLUSTER_NAMESPACE}
echo ""
echo "Deployed Pods:"
kubectl describe pods --selector app=${CHART_NAME} --namespace ${CLUSTER_NAMESPACE}
echo ""
echo "Application Logs"
kubectl logs --selector app=${CHART_NAME} --namespace ${CLUSTER_NAMESPACE}
echo "=========================================================="
PREVIOUS_RELEASE=$( helm history ${RELEASE_NAME} | grep SUPERSEDED | sort -r -n | awk '{print $1}' | head -n 1 )
echo -e "Could rollback to previous release: ${PREVIOUS_RELEASE} using command:"
echo -e "helm rollback ${RELEASE_NAME} ${PREVIOUS_RELEASE}"
# helm rollback ${RELEASE_NAME} ${PREVIOUS_RELEASE}
# echo -e "History for release:${RELEASE_NAME}"
# helm history ${RELEASE_NAME}
# echo "Deployed Services:"
# kubectl describe services ${RELEASE_NAME}-${CHART_NAME} --namespace ${CLUSTER_NAMESPACE}
# echo ""
# echo "Deployed Pods:"
# kubectl describe pods --selector app=${CHART_NAME} --namespace ${CLUSTER_NAMESPACE}
exit 1
fi
echo ""
echo "=========================================================="
echo "DEPLOYMENT SUCCEEDED"
echo ""
echo -e "Status for release:${RELEASE_NAME}"
helm status ${RELEASE_NAME}
echo ""
echo -e "History for release:${RELEASE_NAME}"
helm history ${RELEASE_NAME}
# echo ""
# echo "Deployed Services:"
# kubectl describe services ${RELEASE_NAME}-${CHART_NAME} --namespace ${CLUSTER_NAMESPACE}
# echo ""
# echo "Deployed Pods:"
# kubectl describe pods --selector app=${CHART_NAME} --namespace ${CLUSTER_NAMESPACE}
echo "=========================================================="
IP_ADDR=$(ibmcloud cs workers ${PIPELINE_KUBERNETES_CLUSTER_NAME} | grep normal | head -n 1 | awk '{ print $2 }')
PORT=$(kubectl get services --namespace ${CLUSTER_NAMESPACE} | grep ${RELEASE_NAME} | sed 's/[^:]*:\([0-9]*\).*/\1/g')
echo -e "View the application health at: http://${IP_ADDR}:${PORT}/health"