This commit is contained in:
432
internal/controller/gitlabjobrunner_controller.go
Normal file
432
internal/controller/gitlabjobrunner_controller.go
Normal file
@@ -0,0 +1,432 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
batchv1alpha1 "github.com/rml/gitlab-job-runner/api/v1alpha1"
|
||||
)
|
||||
|
||||
// GitlabJobRunnerReconciler reconciles a GitlabJobRunner object
|
||||
type GitlabJobRunnerReconciler struct {
|
||||
client.Client
|
||||
Scheme *runtime.Scheme
|
||||
}
|
||||
|
||||
const (
|
||||
finalizerName = "batch.rml.ru/finalizer"
|
||||
defaultImage = "bitnami/git:latest"
|
||||
defaultBranch = "main"
|
||||
)
|
||||
|
||||
// +kubebuilder:rbac:groups=batch.rml.ru,resources=gitlabjobrunners,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=batch.rml.ru,resources=gitlabjobrunners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=batch.rml.ru,resources=gitlabjobrunners/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
func (r *GitlabJobRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := logf.FromContext(ctx)
|
||||
|
||||
// Fetch the GitlabJobRunner instance
|
||||
runner := &batchv1alpha1.GitlabJobRunner{}
|
||||
if err := r.Get(ctx, req.NamespacedName, runner); err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Error(err, "unable to fetch GitlabJobRunner")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Handle finalizer
|
||||
if runner.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if !controllerutil.ContainsFinalizer(runner, finalizerName) {
|
||||
controllerutil.AddFinalizer(runner, finalizerName)
|
||||
if err := r.Update(ctx, runner); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if controllerutil.ContainsFinalizer(runner, finalizerName) {
|
||||
if err := r.cleanupResources(ctx, runner); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
controllerutil.RemoveFinalizer(runner, finalizerName)
|
||||
if err := r.Update(ctx, runner); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Verify GitLab secret exists
|
||||
secret := &corev1.Secret{}
|
||||
if err := r.Get(ctx, types.NamespacedName{
|
||||
Name: runner.Spec.GitlabSecretRef,
|
||||
Namespace: runner.Namespace,
|
||||
}, secret); err != nil {
|
||||
log.Error(err, "unable to fetch GitLab secret")
|
||||
r.updateStatus(ctx, runner, "Degraded", metav1.ConditionFalse, "SecretNotFound", "GitLab secret not found")
|
||||
return ctrl.Result{RequeueAfter: time.Minute}, err
|
||||
}
|
||||
|
||||
// Determine if this should be a Job or CronJob
|
||||
if runner.Spec.Schedule != "" {
|
||||
return r.reconcileCronJob(ctx, runner)
|
||||
}
|
||||
return r.reconcileJob(ctx, runner)
|
||||
}
|
||||
|
||||
// reconcileJob handles one-time Job creation
|
||||
func (r *GitlabJobRunnerReconciler) reconcileJob(ctx context.Context, runner *batchv1alpha1.GitlabJobRunner) (ctrl.Result, error) {
|
||||
log := logf.FromContext(ctx)
|
||||
|
||||
jobName := runner.Name
|
||||
job := &batchv1.Job{}
|
||||
err := r.Get(ctx, types.NamespacedName{Name: jobName, Namespace: runner.Namespace}, job)
|
||||
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
job = r.constructJob(runner)
|
||||
if err := controllerutil.SetControllerReference(runner, job, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err := r.Create(ctx, job); err != nil {
|
||||
log.Error(err, "unable to create Job")
|
||||
r.updateStatus(ctx, runner, "Degraded", metav1.ConditionFalse, "JobCreationFailed", err.Error())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
log.Info("created Job", "job", jobName)
|
||||
r.updateStatus(ctx, runner, "Progressing", metav1.ConditionTrue, "JobCreated", "Job created successfully")
|
||||
return ctrl.Result{}, nil
|
||||
} else if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Job exists, update status based on its state
|
||||
if job.Status.Succeeded > 0 {
|
||||
r.updateStatus(ctx, runner, "Available", metav1.ConditionTrue, "JobCompleted", "Job completed successfully")
|
||||
} else if job.Status.Failed > 0 {
|
||||
r.updateStatus(ctx, runner, "Degraded", metav1.ConditionFalse, "JobFailed", "Job failed")
|
||||
} else {
|
||||
r.updateStatus(ctx, runner, "Progressing", metav1.ConditionTrue, "JobRunning", "Job is running")
|
||||
runner.Status.ActiveJobName = jobName
|
||||
r.Status().Update(ctx, runner)
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// reconcileCronJob handles CronJob creation and management
|
||||
func (r *GitlabJobRunnerReconciler) reconcileCronJob(ctx context.Context, runner *batchv1alpha1.GitlabJobRunner) (ctrl.Result, error) {
|
||||
log := logf.FromContext(ctx)
|
||||
|
||||
cronJobName := runner.Name
|
||||
cronJob := &batchv1.CronJob{}
|
||||
err := r.Get(ctx, types.NamespacedName{Name: cronJobName, Namespace: runner.Namespace}, cronJob)
|
||||
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
cronJob = r.constructCronJob(runner)
|
||||
if err := controllerutil.SetControllerReference(runner, cronJob, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if err := r.Create(ctx, cronJob); err != nil {
|
||||
log.Error(err, "unable to create CronJob")
|
||||
r.updateStatus(ctx, runner, "Degraded", metav1.ConditionFalse, "CronJobCreationFailed", err.Error())
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
log.Info("created CronJob", "cronjob", cronJobName)
|
||||
r.updateStatus(ctx, runner, "Available", metav1.ConditionTrue, "CronJobCreated", "CronJob created successfully")
|
||||
return ctrl.Result{}, nil
|
||||
} else if err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// CronJob exists, check if update is needed
|
||||
needsUpdate := false
|
||||
if cronJob.Spec.Schedule != runner.Spec.Schedule {
|
||||
cronJob.Spec.Schedule = runner.Spec.Schedule
|
||||
needsUpdate = true
|
||||
}
|
||||
if cronJob.Spec.Suspend != nil && *cronJob.Spec.Suspend != runner.Spec.Suspend {
|
||||
cronJob.Spec.Suspend = &runner.Spec.Suspend
|
||||
needsUpdate = true
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
if err := r.Update(ctx, cronJob); err != nil {
|
||||
log.Error(err, "unable to update CronJob")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
log.Info("updated CronJob", "cronjob", cronJobName)
|
||||
}
|
||||
|
||||
// Update status with last schedule time
|
||||
if cronJob.Status.LastScheduleTime != nil {
|
||||
runner.Status.LastScheduleTime = cronJob.Status.LastScheduleTime
|
||||
r.Status().Update(ctx, runner)
|
||||
}
|
||||
|
||||
r.updateStatus(ctx, runner, "Available", metav1.ConditionTrue, "CronJobActive", "CronJob is active")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// constructJob creates a Job specification
|
||||
func (r *GitlabJobRunnerReconciler) constructJob(runner *batchv1alpha1.GitlabJobRunner) *batchv1.Job {
|
||||
image := runner.Spec.Image
|
||||
if image == "" {
|
||||
image = defaultImage
|
||||
}
|
||||
|
||||
branch := runner.Spec.Branch
|
||||
if branch == "" {
|
||||
branch = defaultBranch
|
||||
}
|
||||
|
||||
backoffLimit := int32(3)
|
||||
ttlSecondsAfterFinished := int32(3600)
|
||||
|
||||
job := &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gitlabjobrunner",
|
||||
"app.kubernetes.io/instance": runner.Name,
|
||||
"app.kubernetes.io/managed-by": "gitlabjobrunner-controller",
|
||||
},
|
||||
},
|
||||
Spec: batchv1.JobSpec{
|
||||
BackoffLimit: &backoffLimit,
|
||||
TTLSecondsAfterFinished: &ttlSecondsAfterFinished,
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: r.constructPodSpec(runner, image, branch),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return job
|
||||
}
|
||||
|
||||
// constructCronJob creates a CronJob specification
|
||||
func (r *GitlabJobRunnerReconciler) constructCronJob(runner *batchv1alpha1.GitlabJobRunner) *batchv1.CronJob {
|
||||
image := runner.Spec.Image
|
||||
if image == "" {
|
||||
image = defaultImage
|
||||
}
|
||||
|
||||
branch := runner.Spec.Branch
|
||||
if branch == "" {
|
||||
branch = defaultBranch
|
||||
}
|
||||
|
||||
concurrencyPolicy := batchv1.ForbidConcurrent
|
||||
if runner.Spec.ConcurrencyPolicy != "" {
|
||||
switch runner.Spec.ConcurrencyPolicy {
|
||||
case "Allow":
|
||||
concurrencyPolicy = batchv1.AllowConcurrent
|
||||
case "Replace":
|
||||
concurrencyPolicy = batchv1.ReplaceConcurrent
|
||||
}
|
||||
}
|
||||
|
||||
successfulJobsHistoryLimit := int32(3)
|
||||
failedJobsHistoryLimit := int32(1)
|
||||
|
||||
cronJob := &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gitlabjobrunner",
|
||||
"app.kubernetes.io/instance": runner.Name,
|
||||
"app.kubernetes.io/managed-by": "gitlabjobrunner-controller",
|
||||
},
|
||||
},
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: runner.Spec.Schedule,
|
||||
ConcurrencyPolicy: concurrencyPolicy,
|
||||
Suspend: &runner.Spec.Suspend,
|
||||
SuccessfulJobsHistoryLimit: &successfulJobsHistoryLimit,
|
||||
FailedJobsHistoryLimit: &failedJobsHistoryLimit,
|
||||
JobTemplate: batchv1.JobTemplateSpec{
|
||||
Spec: batchv1.JobSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: r.constructPodSpec(runner, image, branch),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return cronJob
|
||||
}
|
||||
|
||||
// constructPodSpec creates the pod specification for Job/CronJob
|
||||
func (r *GitlabJobRunnerReconciler) constructPodSpec(runner *batchv1alpha1.GitlabJobRunner, image, branch string) corev1.PodSpec {
|
||||
serviceAccount := runner.Spec.ServiceAccountName
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
|
||||
return corev1.PodSpec{
|
||||
ServiceAccountName: serviceAccount,
|
||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||
InitContainers: []corev1.Container{
|
||||
{
|
||||
Name: "git-clone",
|
||||
Image: image,
|
||||
Command: []string{
|
||||
"sh",
|
||||
"-c",
|
||||
fmt.Sprintf(`
|
||||
git clone -b %s --depth 1 https://oauth2:$GITLAB_TOKEN@$(echo $GITLAB_URL | sed 's|https://||')/%s /workspace
|
||||
`, branch, runner.Spec.RepositoryURL),
|
||||
},
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "GITLAB_URL",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: runner.Spec.GitlabSecretRef,
|
||||
},
|
||||
Key: "GITLAB_URL",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "GITLAB_TOKEN",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: runner.Spec.GitlabSecretRef,
|
||||
},
|
||||
Key: "GITLAB_TOKEN",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "workspace",
|
||||
MountPath: "/workspace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "executor",
|
||||
Image: "bitnami/kubectl:latest",
|
||||
Command: []string{
|
||||
"sh",
|
||||
"-c",
|
||||
fmt.Sprintf("cd /workspace && sh %s", runner.Spec.ScriptPath),
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "workspace",
|
||||
MountPath: "/workspace",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "workspace",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// updateStatus updates the status conditions
|
||||
func (r *GitlabJobRunnerReconciler) updateStatus(ctx context.Context, runner *batchv1alpha1.GitlabJobRunner, condType string, status metav1.ConditionStatus, reason, message string) {
|
||||
condition := metav1.Condition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
ObservedGeneration: runner.Generation,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
Reason: reason,
|
||||
Message: message,
|
||||
}
|
||||
|
||||
// Remove existing condition of the same type
|
||||
for i, c := range runner.Status.Conditions {
|
||||
if c.Type == condType {
|
||||
runner.Status.Conditions = append(runner.Status.Conditions[:i], runner.Status.Conditions[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
runner.Status.Conditions = append(runner.Status.Conditions, condition)
|
||||
r.Status().Update(ctx, runner)
|
||||
}
|
||||
|
||||
// cleanupResources handles cleanup when GitlabJobRunner is deleted
|
||||
func (r *GitlabJobRunnerReconciler) cleanupResources(ctx context.Context, runner *batchv1alpha1.GitlabJobRunner) error {
|
||||
log := logf.FromContext(ctx)
|
||||
|
||||
// Delete associated Job if exists
|
||||
job := &batchv1.Job{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: runner.Name, Namespace: runner.Namespace}, job); err == nil {
|
||||
if err := r.Delete(ctx, job); err != nil {
|
||||
log.Error(err, "unable to delete Job")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Delete associated CronJob if exists
|
||||
cronJob := &batchv1.CronJob{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Name: runner.Name, Namespace: runner.Namespace}, cronJob); err == nil {
|
||||
if err := r.Delete(ctx, cronJob); err != nil {
|
||||
log.Error(err, "unable to delete CronJob")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *GitlabJobRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&batchv1alpha1.GitlabJobRunner{}).
|
||||
Owns(&batchv1.Job{}).
|
||||
Owns(&batchv1.CronJob{}).
|
||||
Named("gitlabjobrunner").
|
||||
Complete(r)
|
||||
}
|
||||
84
internal/controller/gitlabjobrunner_controller_test.go
Normal file
84
internal/controller/gitlabjobrunner_controller_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
batchv1alpha1 "github.com/rml/gitlab-job-runner/api/v1alpha1"
|
||||
)
|
||||
|
||||
var _ = Describe("GitlabJobRunner Controller", func() {
|
||||
Context("When reconciling a resource", func() {
|
||||
const resourceName = "test-resource"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
typeNamespacedName := types.NamespacedName{
|
||||
Name: resourceName,
|
||||
Namespace: "default", // TODO(user):Modify as needed
|
||||
}
|
||||
gitlabjobrunner := &batchv1alpha1.GitlabJobRunner{}
|
||||
|
||||
BeforeEach(func() {
|
||||
By("creating the custom resource for the Kind GitlabJobRunner")
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, gitlabjobrunner)
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
resource := &batchv1alpha1.GitlabJobRunner{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: resourceName,
|
||||
Namespace: "default",
|
||||
},
|
||||
// TODO(user): Specify other spec details if needed.
|
||||
}
|
||||
Expect(k8sClient.Create(ctx, resource)).To(Succeed())
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// TODO(user): Cleanup logic after each test, like removing the resource instance.
|
||||
resource := &batchv1alpha1.GitlabJobRunner{}
|
||||
err := k8sClient.Get(ctx, typeNamespacedName, resource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Cleanup the specific resource instance GitlabJobRunner")
|
||||
Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
|
||||
})
|
||||
It("should successfully reconcile the resource", func() {
|
||||
By("Reconciling the created resource")
|
||||
controllerReconciler := &GitlabJobRunnerReconciler{
|
||||
Client: k8sClient,
|
||||
Scheme: k8sClient.Scheme(),
|
||||
}
|
||||
|
||||
_, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
|
||||
NamespacedName: typeNamespacedName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
// TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
|
||||
// Example: If you expect a certain status condition after reconciliation, verify it here.
|
||||
})
|
||||
})
|
||||
})
|
||||
116
internal/controller/suite_test.go
Normal file
116
internal/controller/suite_test.go
Normal file
@@ -0,0 +1,116 @@
|
||||
/*
|
||||
Copyright 2025.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
|
||||
batchv1alpha1 "github.com/rml/gitlab-job-runner/api/v1alpha1"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var (
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
testEnv *envtest.Environment
|
||||
cfg *rest.Config
|
||||
k8sClient client.Client
|
||||
)
|
||||
|
||||
func TestControllers(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
RunSpecs(t, "Controller Suite")
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func() {
|
||||
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
|
||||
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
|
||||
var err error
|
||||
err = batchv1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
|
||||
ErrorIfCRDPathMissing: true,
|
||||
}
|
||||
|
||||
// Retrieve the first found binary directory to allow running tests from IDEs
|
||||
if getFirstFoundEnvTestBinaryDir() != "" {
|
||||
testEnv.BinaryAssetsDirectory = getFirstFoundEnvTestBinaryDir()
|
||||
}
|
||||
|
||||
// cfg is defined in this file globally.
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cfg).NotTo(BeNil())
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(k8sClient).NotTo(BeNil())
|
||||
})
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
cancel()
|
||||
err := testEnv.Stop()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
// getFirstFoundEnvTestBinaryDir locates the first binary in the specified path.
|
||||
// ENVTEST-based tests depend on specific binaries, usually located in paths set by
|
||||
// controller-runtime. When running tests directly (e.g., via an IDE) without using
|
||||
// Makefile targets, the 'BinaryAssetsDirectory' must be explicitly configured.
|
||||
//
|
||||
// This function streamlines the process by finding the required binaries, similar to
|
||||
// setting the 'KUBEBUILDER_ASSETS' environment variable. To ensure the binaries are
|
||||
// properly set up, run 'make setup-envtest' beforehand.
|
||||
func getFirstFoundEnvTestBinaryDir() string {
|
||||
basePath := filepath.Join("..", "..", "bin", "k8s")
|
||||
entries, err := os.ReadDir(basePath)
|
||||
if err != nil {
|
||||
logf.Log.Error(err, "Failed to read directory", "path", basePath)
|
||||
return ""
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
return filepath.Join(basePath, entry.Name())
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
Reference in New Issue
Block a user