diff --git a/pom.xml b/pom.xml
index 8b416bd9..9ded82fe 100644
--- a/pom.xml
+++ b/pom.xml
@@ -63,6 +63,7 @@
1.9.21.2
2.25.0
2.12.0
+ 0.1.0-SNAPSHOT
@@ -71,6 +72,11 @@
GitHub Apache Maven Packages
https://maven.pkg.github.com/skodjob/opendatahub-crds
+
+ test-frame
+ GitHub Apache Maven Packages
+ https://maven.pkg.github.com/skodjob/test-frame
+
@@ -93,6 +99,21 @@
+
+ io.skodjob
+ test-frame-common
+ ${test-frame.version}
+
+
+ io.skodjob
+ test-frame-kubernetes
+ ${test-frame.version}
+
+
+ io.skodjob
+ test-frame-openshift
+ ${test-frame.version}
+
io.fabric8
openshift-client
@@ -253,6 +274,9 @@
junit.jupiter.extensions.autodetection.enabled = true
+
+ oc
+
diff --git a/settings.xml b/settings.xml
index dbe1ec00..b5d80e68 100644
--- a/settings.xml
+++ b/settings.xml
@@ -5,6 +5,11 @@
x-access-token
${env.GITHUB_TOKEN}
+
+ test-frame
+ x-access-token
+ ${env.GITHUB_TOKEN}
+
test-metadata-generator
x-access-token
diff --git a/src/main/java/io/odh/test/Environment.java b/src/main/java/io/odh/test/Environment.java
index 1719a65f..6dc39fae 100644
--- a/src/main/java/io/odh/test/Environment.java
+++ b/src/main/java/io/odh/test/Environment.java
@@ -38,8 +38,6 @@ public class Environment {
public static final String USER_PATH = System.getProperty("user.dir");
private static final String CONFIG_FILE_PATH_ENV = "ENV_FILE";
- private static final String TOKEN_ENV = "KUBE_TOKEN";
- private static final String URL_ENV = "KUBE_URL";
private static final String PRODUCT_ENV = "PRODUCT";
private static final String LOG_DIR_ENV = "LOG_DIR";
@@ -74,9 +72,6 @@ public class Environment {
* Set values
*/
public static final String PRODUCT = getOrDefault(PRODUCT_ENV, PRODUCT_ODH);
- public static final String RUN_USER = getOrDefault("USER", null);
- public static final String KUBE_TOKEN = getOrDefault(TOKEN_ENV, null);
- public static final String KUBE_URL = getOrDefault(URL_ENV, null);
//Install
public static final boolean SKIP_INSTALL_OPERATOR_DEPS = getOrDefault(SKIP_INSTALL_OPERATOR_DEPS_ENV, Boolean::valueOf, false);
diff --git a/src/main/java/io/odh/test/TestUtils.java b/src/main/java/io/odh/test/TestUtils.java
index 1a09b8b9..83ca92b0 100644
--- a/src/main/java/io/odh/test/TestUtils.java
+++ b/src/main/java/io/odh/test/TestUtils.java
@@ -7,7 +7,15 @@
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.exc.InvalidFormatException;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
-import io.odh.test.framework.WaitException;
+import io.fabric8.kubernetes.api.model.EndpointSubset;
+import io.fabric8.kubernetes.api.model.Endpoints;
+import io.fabric8.kubernetes.client.KubernetesClientException;
+import io.fabric8.kubernetes.client.dsl.Resource;
+import io.fabric8.openshift.api.model.operatorhub.v1alpha1.InstallPlan;
+import io.opendatahub.datasciencecluster.v1.datascienceclusterstatus.Conditions;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.utils.KubeUtils;
+import io.skodjob.testframe.wait.Wait;
import org.junit.jupiter.api.TestInfo;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.slf4j.Logger;
@@ -15,8 +23,6 @@
import java.io.IOException;
import java.io.InputStream;
-import java.io.PrintWriter;
-import java.io.StringWriter;
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
@@ -24,15 +30,13 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.Duration;
+import java.util.List;
+import java.util.NoSuchElementException;
import java.util.concurrent.Callable;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.Executor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.function.BooleanSupplier;
import static io.odh.test.TestConstants.GLOBAL_POLL_INTERVAL_SHORT;
import static io.odh.test.TestConstants.GLOBAL_TIMEOUT;
@@ -56,79 +60,6 @@ private TestUtils() {
// All static methods
}
- /**
- * Poll the given {@code ready} function every {@code pollIntervalMs} milliseconds until it returns true,
- * or throw a WaitException if it doesn't return true within {@code timeoutMs} milliseconds.
- *
- * @return The remaining time left until timeout occurs
- * (helpful if you have several calls which need to share a common timeout),
- */
- public static long waitFor(String description, long pollIntervalMs, long timeoutMs, BooleanSupplier ready) {
- return waitFor(description, pollIntervalMs, timeoutMs, ready, () -> { });
- }
-
- public static long waitFor(String description, long pollIntervalMs, long timeoutMs, BooleanSupplier ready, Runnable onTimeout) {
- LOGGER.debug("Waiting for {}", description);
- long deadline = System.currentTimeMillis() + timeoutMs;
-
- String exceptionMessage = null;
- String previousExceptionMessage = null;
-
- // in case we are polling every 1s, we want to print exception after x tries, not on the first try
- // for minutes poll interval will 2 be enough
- int exceptionAppearanceCount = Duration.ofMillis(pollIntervalMs).toMinutes() > 0 ? 2 : Math.max((int) (timeoutMs / pollIntervalMs) / 4, 2);
- int exceptionCount = 0;
- int newExceptionAppearance = 0;
-
- StringWriter stackTraceError = new StringWriter();
-
- while (true) {
- boolean result;
- try {
- result = ready.getAsBoolean();
- } catch (Exception e) {
- exceptionMessage = e.getMessage();
-
- if (++exceptionCount == exceptionAppearanceCount && exceptionMessage != null && exceptionMessage.equals(previousExceptionMessage)) {
- LOGGER.error("While waiting for {} exception occurred: {}", description, exceptionMessage);
- // log the stacktrace
- e.printStackTrace(new PrintWriter(stackTraceError));
- } else if (exceptionMessage != null && !exceptionMessage.equals(previousExceptionMessage) && ++newExceptionAppearance == 2) {
- previousExceptionMessage = exceptionMessage;
- }
-
- result = false;
- }
- long timeLeft = deadline - System.currentTimeMillis();
- if (result) {
- return timeLeft;
- }
- if (timeLeft <= 0) {
- if (exceptionCount > 1) {
- LOGGER.error("Exception waiting for {}, {}", description, exceptionMessage);
-
- if (!stackTraceError.toString().isEmpty()) {
- // printing handled stacktrace
- LOGGER.error(stackTraceError.toString());
- }
- }
- onTimeout.run();
- WaitException waitException = new WaitException("Timeout after " + timeoutMs + " ms waiting for " + description);
- waitException.printStackTrace();
- throw waitException;
- }
- long sleepTime = Math.min(pollIntervalMs, timeLeft);
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("{} not ready, will try again in {} ms ({}ms till timeout)", description, sleepTime, timeLeft);
- }
- try {
- Thread.sleep(sleepTime);
- } catch (InterruptedException e) {
- return deadline - System.currentTimeMillis();
- }
- }
- }
-
/**
* Polls the given HTTP {@code url} until it gives != 503 status code
*/
@@ -141,7 +72,7 @@ public static void waitForServiceNotUnavailable(String url) {
}
public static void waitForServiceNotUnavailable(HttpClient httpClient, String url) {
- TestUtils.waitFor("service to be not unavailable", GLOBAL_POLL_INTERVAL_SHORT, GLOBAL_TIMEOUT, () -> {
+ Wait.until("service to be not unavailable", GLOBAL_POLL_INTERVAL_SHORT, GLOBAL_TIMEOUT, () -> {
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.GET()
@@ -167,42 +98,6 @@ public Thread newThread(Runnable r) {
}
});
- public static CompletableFuture asyncWaitFor(String description, long pollIntervalMs, long timeoutMs, BooleanSupplier ready) {
- LOGGER.info("Waiting for {}", description);
- long deadline = System.currentTimeMillis() + timeoutMs;
- CompletableFuture future = new CompletableFuture<>();
- Executor delayed = CompletableFuture.delayedExecutor(pollIntervalMs, TimeUnit.MILLISECONDS, EXECUTOR);
- Runnable r = new Runnable() {
- @Override
- public void run() {
- boolean result;
- try {
- result = ready.getAsBoolean();
- } catch (Exception e) {
- future.completeExceptionally(e);
- return;
- }
- long timeLeft = deadline - System.currentTimeMillis();
- if (!future.isDone()) {
- if (!result) {
- if (timeLeft >= 0) {
- if (LOGGER.isTraceEnabled()) {
- LOGGER.trace("{} not ready, will try again ({}ms till timeout)", description, timeLeft);
- }
- delayed.execute(this);
- } else {
- future.completeExceptionally(new TimeoutException(String.format("Waiting for %s timeout %s exceeded", description, timeoutMs)));
- }
- } else {
- future.complete(null);
- }
- }
- }
- };
- r.run();
- return future;
- }
-
public static InputStream getFileFromResourceAsStream(String fileName) {
// The class loader that loaded the class
@@ -272,4 +167,72 @@ public static T runUntilPass(int retry, Callable fn) {
}
throw new IllegalStateException(String.format("Command wasn't pass in %s attempts", retry));
}
+
+ public static io.opendatahub.datasciencecluster.v1.datascienceclusterstatus.Conditions getDscConditionByType(List conditions, String type) {
+ return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
+ }
+
+ public static org.kubeflow.v1.notebookstatus.Conditions getNotebookConditionByType(List conditions, String type) {
+ return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
+ }
+
+ public static io.kserve.serving.v1beta1.inferenceservicestatus.Conditions getInferenceServiceConditionByType(List conditions, String type) {
+ return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
+ }
+
+ public static void clearOdhRemainingResources() {
+ KubeResourceManager.getKubeClient().getClient().apiextensions().v1().customResourceDefinitions().list().getItems()
+ .stream().filter(crd -> crd.getMetadata().getName().contains("opendatahub.io")).toList()
+ .forEach(crd -> {
+ LOGGER.info("Deleting CRD {}", crd.getMetadata().getName());
+ KubeResourceManager.getKubeClient().getClient().resource(crd).delete();
+ });
+ KubeResourceManager.getKubeClient().getClient().namespaces().withName("opendatahub").delete();
+ }
+
+ /**
+ * TODO - this should be removed when https://github.com/opendatahub-io/opendatahub-operator/issues/765 will be resolved
+ */
+ public static void deleteDefaultDSCI() {
+ LOGGER.info("Clearing DSCI ...");
+ KubeResourceManager.getKubeCmdClient().exec(false, true, Long.valueOf(GLOBAL_TIMEOUT).intValue(), "delete", "dsci", "--all");
+ }
+
+ public static void waitForInstallPlan(String namespace, String csvName) {
+ Wait.until(String.format("Install plan with new version: %s:%s", namespace, csvName),
+ GLOBAL_POLL_INTERVAL_SHORT, GLOBAL_TIMEOUT, () -> {
+ try {
+ InstallPlan ip = KubeUtils.getNonApprovedInstallPlan(namespace, csvName);
+ LOGGER.debug("Found InstallPlan {} - {}", ip.getMetadata().getName(), ip.getSpec().getClusterServiceVersionNames());
+ return true;
+ } catch (NoSuchElementException ex) {
+ LOGGER.debug("No new install plan available. Checking again ...");
+ return false;
+ }
+ }, () -> { });
+ }
+
+ public static void waitForEndpoints(String name, Resource endpoints) {
+ Wait.until("%s service endpoints to come up".formatted(name), GLOBAL_POLL_INTERVAL_SHORT, GLOBAL_TIMEOUT, () -> {
+ try {
+ Endpoints endpointset = endpoints.get();
+ if (endpointset == null) {
+ return false;
+ }
+ List subsets = endpointset.getSubsets();
+ if (subsets.isEmpty()) {
+ return false;
+ }
+ for (EndpointSubset subset : subsets) {
+ return !subset.getAddresses().isEmpty();
+ }
+ } catch (KubernetesClientException e) {
+ if (e.getCode() == 404) {
+ return false;
+ }
+ throw e;
+ }
+ return false;
+ });
+ }
}
diff --git a/src/main/java/io/odh/test/framework/WaitException.java b/src/main/java/io/odh/test/framework/WaitException.java
deleted file mode 100644
index bead013c..00000000
--- a/src/main/java/io/odh/test/framework/WaitException.java
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework;
-
-public class WaitException extends RuntimeException {
- public WaitException(String message) {
- super(message);
- }
-
- public WaitException(Throwable cause) {
- super(cause);
- }
-}
diff --git a/src/main/java/io/odh/test/framework/listeners/OdhResourceCleaner.java b/src/main/java/io/odh/test/framework/listeners/OdhResourceCleaner.java
deleted file mode 100644
index e1f35486..00000000
--- a/src/main/java/io/odh/test/framework/listeners/OdhResourceCleaner.java
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.listeners;
-
-import io.odh.test.Environment;
-import io.odh.test.platform.KubeUtils;
-import org.junit.jupiter.api.extension.AfterAllCallback;
-import org.junit.jupiter.api.extension.ExtensionContext;
-
-public class OdhResourceCleaner implements AfterAllCallback {
-
- @Override
- public void afterAll(ExtensionContext extensionContext) {
- if (!Environment.SKIP_INSTALL_OPERATOR && !Environment.SKIP_DEPLOY_DSCI_DSC) {
- KubeUtils.clearOdhRemainingResources();
- }
- }
-}
diff --git a/src/main/java/io/odh/test/framework/listeners/ResourceManagerContextHandler.java b/src/main/java/io/odh/test/framework/listeners/ResourceManagerContextHandler.java
deleted file mode 100644
index c3ccf9c7..00000000
--- a/src/main/java/io/odh/test/framework/listeners/ResourceManagerContextHandler.java
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.listeners;
-
-import io.odh.test.framework.manager.ResourceManager;
-import org.junit.jupiter.api.extension.AfterAllCallback;
-import org.junit.jupiter.api.extension.AfterEachCallback;
-import org.junit.jupiter.api.extension.BeforeAllCallback;
-import org.junit.jupiter.api.extension.BeforeEachCallback;
-import org.junit.jupiter.api.extension.ExtensionContext;
-
-/**
- * jUnit5 specific class which listening on test callbacks
- */
-public class ResourceManagerContextHandler implements BeforeAllCallback, BeforeEachCallback, AfterAllCallback, AfterEachCallback {
-
- @Override
- public void beforeAll(ExtensionContext extensionContext) throws Exception {
- ResourceManager.getInstance().switchToClassResourceStack();
- }
-
- @Override
- public void beforeEach(ExtensionContext extensionContext) throws Exception {
- ResourceManager.getInstance().switchToTestResourceStack();
- }
-
- @Override
- public void afterAll(ExtensionContext extensionContext) throws Exception {
- ResourceManager.getInstance().switchToClassResourceStack();
- }
-
- @Override
- public void afterEach(ExtensionContext extensionContext) throws Exception {
- ResourceManager.getInstance().switchToTestResourceStack();
- }
-}
diff --git a/src/main/java/io/odh/test/framework/listeners/ResourceManagerDeleteHandler.java b/src/main/java/io/odh/test/framework/listeners/ResourceManagerDeleteHandler.java
deleted file mode 100644
index 57a896f8..00000000
--- a/src/main/java/io/odh/test/framework/listeners/ResourceManagerDeleteHandler.java
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.listeners;
-
-import io.odh.test.framework.manager.ResourceManager;
-import org.junit.jupiter.api.extension.AfterAllCallback;
-import org.junit.jupiter.api.extension.AfterEachCallback;
-import org.junit.jupiter.api.extension.ExtensionContext;
-
-/**
- * jUnit5 specific class which listening on test callbacks
- */
-public class ResourceManagerDeleteHandler implements AfterAllCallback, AfterEachCallback {
- @Override
- public void afterAll(ExtensionContext extensionContext) throws Exception {
- ResourceManager.getInstance().switchToClassResourceStack();
- ResourceManager.getInstance().deleteResources();
- }
-
- @Override
- public void afterEach(ExtensionContext extensionContext) throws Exception {
- ResourceManager.getInstance().switchToTestResourceStack();
- ResourceManager.getInstance().deleteResources();
- }
-}
diff --git a/src/main/java/io/odh/test/framework/listeners/TestVisualSeparator.java b/src/main/java/io/odh/test/framework/listeners/TestVisualSeparator.java
deleted file mode 100644
index b946ff0a..00000000
--- a/src/main/java/io/odh/test/framework/listeners/TestVisualSeparator.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.listeners;
-
-import io.odh.test.framework.ExtensionContextParameterResolver;
-import io.odh.test.LoggerUtils;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.extension.ExtendWith;
-import org.junit.jupiter.api.extension.ExtensionContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-@ExtendWith(ExtensionContextParameterResolver.class)
-public interface TestVisualSeparator {
- Logger LOGGER = LoggerFactory.getLogger(TestVisualSeparator.class);
-
- @BeforeEach
- default void beforeEachTest(ExtensionContext testContext) {
- LoggerUtils.logSeparator();
- LOGGER.info(String.format("%s.%s-STARTED", testContext.getRequiredTestClass().getName(),
- testContext.getDisplayName().replace("()", "")));
- }
-
- @AfterEach
- default void afterEachTest(ExtensionContext testContext) {
- LOGGER.info(String.format("%s.%s-FINISHED", testContext.getRequiredTestClass().getName(),
- testContext.getDisplayName().replace("()", "")));
- LoggerUtils.logSeparator();
- }
-}
diff --git a/src/main/java/io/odh/test/framework/logs/LogCollector.java b/src/main/java/io/odh/test/framework/logs/LogCollector.java
index 09b797a6..ec49b498 100644
--- a/src/main/java/io/odh/test/framework/logs/LogCollector.java
+++ b/src/main/java/io/odh/test/framework/logs/LogCollector.java
@@ -10,9 +10,10 @@
import io.odh.test.OdhConstants;
import io.odh.test.TestConstants;
import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.platform.KubeClient;
-import io.odh.test.platform.cmdClient.KubeCmdClient;
+import io.skodjob.testframe.clients.KubeClient;
+import io.skodjob.testframe.clients.cmdClient.KubeCmdClient;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.utils.KubeUtils;
import org.junit.jupiter.api.extension.ExtensionContext;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -31,12 +32,12 @@ public static void saveKubernetesState(ExtensionContext extensionContext, Throwa
Path logPath = TestUtils.getLogPath(Environment.LOG_DIR.resolve("failedTest").toString(), extensionContext);
Files.createDirectories(logPath);
try {
- ResourceManager.addNamespaceForLogCollect(OdhConstants.BUNDLE_OPERATOR_NAMESPACE);
- ResourceManager.addNamespaceForLogCollect(OdhConstants.OLM_OPERATOR_NAMESPACE);
- ResourceManager.addNamespaceForLogCollect(OdhConstants.CONTROLLERS_NAMESPACE);
- ResourceManager.addNamespaceForLogCollect(OdhConstants.MONITORING_NAMESPACE);
- ResourceManager.addNamespaceForLogCollect(OdhConstants.ISTIO_SYSTEM_NAMESPACE);
- ResourceManager.addNamespaceForLogCollect(OdhConstants.KNATIVE_SERVING_NAMESPACE);
+ KubeUtils.labelNamespace(OdhConstants.BUNDLE_OPERATOR_NAMESPACE, TestConstants.LOG_COLLECT_LABEL, "true");
+ KubeUtils.labelNamespace(OdhConstants.OLM_OPERATOR_NAMESPACE, TestConstants.LOG_COLLECT_LABEL, "true");
+ KubeUtils.labelNamespace(OdhConstants.CONTROLLERS_NAMESPACE, TestConstants.LOG_COLLECT_LABEL, "true");
+ KubeUtils.labelNamespace(OdhConstants.MONITORING_NAMESPACE, TestConstants.LOG_COLLECT_LABEL, "true");
+ KubeUtils.labelNamespace(OdhConstants.ISTIO_SYSTEM_NAMESPACE, TestConstants.LOG_COLLECT_LABEL, "true");
+ KubeUtils.labelNamespace(OdhConstants.KNATIVE_SERVING_NAMESPACE, TestConstants.LOG_COLLECT_LABEL, "true");
} catch (Exception ignored) {
LOGGER.warn("Cannot label namespaces for collect logs");
}
@@ -59,7 +60,7 @@ private static void writeLogsFromPods(Path logpath, Pod pod) {
try {
LOGGER.debug("Get logs from pod {}/{} container {}", pod.getMetadata().getNamespace(), pod.getMetadata().getName(), container.getName());
Files.writeString(logpath.resolve(pod.getMetadata().getNamespace()).resolve(pod.getMetadata().getName() + "-" + container.getName() + ".log"),
- ResourceManager.getKubeClient().getLogsFromContainer(pod.getMetadata().getNamespace(), pod.getMetadata().getName(), container.getName()));
+ KubeResourceManager.getKubeClient().getLogsFromContainer(pod.getMetadata().getNamespace(), pod.getMetadata().getName(), container.getName()));
} catch (Exception e) {
LOGGER.warn("Cannot get logs for pod {}/{}", pod.getMetadata().getNamespace(), pod.getMetadata().getName());
}
@@ -75,7 +76,7 @@ private static void writePodsDescription(Path logpath, Pod pod) {
try {
LOGGER.debug("Get description of pod {}/{}", pod.getMetadata().getNamespace(), pod.getMetadata().getName());
Files.writeString(logpath.resolve(pod.getMetadata().getNamespace()).resolve(pod.getMetadata().getName() + ".describe.log"),
- ResourceManager.getKubeCmdClient().namespace(pod.getMetadata().getNamespace()).describe(pod.getKind(), pod.getMetadata().getName()));
+ KubeResourceManager.getKubeCmdClient().namespace(pod.getMetadata().getNamespace()).describe(pod.getKind(), pod.getMetadata().getName()));
} catch (Exception e) {
LOGGER.warn("Cannot get description of pod {}/{}", pod.getMetadata().getNamespace(), pod.getMetadata().getName());
}
@@ -90,7 +91,7 @@ private static void writeDeployments(Path logpath, Deployment deployment) {
try {
LOGGER.debug("Get deployment {}/{}", deployment.getMetadata().getNamespace(), deployment.getMetadata().getName());
Files.writeString(logpath.resolve(deployment.getMetadata().getNamespace()).resolve("deployment-" + deployment.getMetadata().getName() + ".yaml"),
- ResourceManager.getKubeCmdClient().exec(false, false, "get", "deployment", deployment.getMetadata().getName(),
+ KubeResourceManager.getKubeCmdClient().exec(false, false, "get", "deployment", deployment.getMetadata().getName(),
"-n", deployment.getMetadata().getNamespace(), "-o", "yaml").out());
} catch (Exception e) {
LOGGER.warn("Cannot get deployment of pod {}/{}", deployment.getMetadata().getNamespace(), deployment.getMetadata().getName());
@@ -98,8 +99,8 @@ private static void writeDeployments(Path logpath, Deployment deployment) {
}
private static void saveClusterState(Path logpath) throws IOException {
- KubeClient kube = ResourceManager.getKubeClient();
- KubeCmdClient> cmdClient = ResourceManager.getKubeCmdClient();
+ KubeClient kube = KubeResourceManager.getKubeClient();
+ KubeCmdClient> cmdClient = KubeResourceManager.getKubeCmdClient();
// Collecting cluster wide resources and CRs
Files.writeString(logpath.resolve("describe-cluster-nodes.log"), cmdClient.exec(false, false, "describe", "nodes").out());
@@ -114,7 +115,7 @@ private static void saveClusterState(Path logpath) throws IOException {
kube.getClient().namespaces().withLabel(TestConstants.LOG_COLLECT_LABEL).list().getItems().forEach(ns -> {
LOGGER.debug("Listing pods in {}", ns.getMetadata().getName());
- kube.listPods(ns.getMetadata().getName()).forEach(pod -> {
+ kube.getClient().pods().inNamespace(ns.getMetadata().getName()).list().getItems().forEach(pod -> {
writeLogsFromPods(logpath, pod);
writePodsDescription(logpath, pod);
});
diff --git a/src/main/java/io/odh/test/framework/manager/ResourceCondition.java b/src/main/java/io/odh/test/framework/manager/ResourceCondition.java
deleted file mode 100644
index 9fa6d7a8..00000000
--- a/src/main/java/io/odh/test/framework/manager/ResourceCondition.java
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-
-package io.odh.test.framework.manager;
-
-import io.fabric8.kubernetes.api.model.HasMetadata;
-
-import java.util.Objects;
-import java.util.function.Predicate;
-
-public class ResourceCondition {
- private final Predicate predicate;
- private final String conditionName;
-
- public ResourceCondition(Predicate predicate, String conditionName) {
- this.predicate = predicate;
- this.conditionName = conditionName;
- }
-
- public String getConditionName() {
- return conditionName;
- }
-
- public Predicate getPredicate() {
- return predicate;
- }
-
- public static ResourceCondition readiness(ResourceType type) {
- return new ResourceCondition<>(type::waitForReadiness, "readiness");
- }
-
- public static ResourceCondition deletion() {
- return new ResourceCondition<>(Objects::isNull, "deletion");
- }
-}
diff --git a/src/main/java/io/odh/test/framework/manager/ResourceItem.java b/src/main/java/io/odh/test/framework/manager/ResourceItem.java
deleted file mode 100644
index c4eafae3..00000000
--- a/src/main/java/io/odh/test/framework/manager/ResourceItem.java
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.manager;
-
-import io.fabric8.kubernetes.api.model.HasMetadata;
-
-public final class ResourceItem {
-
- ThrowableRunner throwableRunner;
- T resource;
-
- public ResourceItem(ThrowableRunner throwableRunner, T resource) {
- this.throwableRunner = throwableRunner;
- this.resource = resource;
- }
-
- public ResourceItem(ThrowableRunner throwableRunner) {
- this.throwableRunner = throwableRunner;
- }
-
- public ThrowableRunner getThrowableRunner() {
- return throwableRunner;
- }
- public T getResource() {
- return resource;
- }
-}
diff --git a/src/main/java/io/odh/test/framework/manager/ResourceManager.java b/src/main/java/io/odh/test/framework/manager/ResourceManager.java
deleted file mode 100644
index b74444a4..00000000
--- a/src/main/java/io/odh/test/framework/manager/ResourceManager.java
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.manager;
-
-import io.fabric8.kubernetes.api.model.HasMetadata;
-import io.fabric8.kubernetes.api.model.Namespace;
-import io.fabric8.kubernetes.api.model.admissionregistration.v1.ValidatingWebhookConfiguration;
-import io.fabric8.kubernetes.api.model.apiextensions.v1.CustomResourceDefinition;
-import io.fabric8.kubernetes.api.model.apps.Deployment;
-import io.fabric8.kubernetes.api.model.rbac.ClusterRole;
-import io.fabric8.kubernetes.api.model.rbac.ClusterRoleBinding;
-import io.odh.test.TestConstants;
-import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.resources.DataScienceClusterResource;
-import io.odh.test.framework.manager.resources.DataScienceInitializationResource;
-import io.odh.test.framework.manager.resources.InferenceServiceResource;
-import io.odh.test.framework.manager.resources.NamespaceResource;
-import io.odh.test.framework.manager.resources.NotebookResource;
-import io.odh.test.framework.manager.resources.OperatorGroupResource;
-import io.odh.test.framework.manager.resources.SubscriptionResource;
-import io.odh.test.platform.KubeClient;
-import io.odh.test.platform.cmdClient.KubeCmdClient;
-import io.odh.test.platform.cmdClient.Oc;
-import io.odh.test.utils.DeploymentUtils;
-import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
-import io.opendatahub.dscinitialization.v1.DSCInitialization;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Stack;
-
-import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-public class ResourceManager {
- private static final Logger LOGGER = LoggerFactory.getLogger(ResourceManager.class);
-
- private static ResourceManager instance;
- private static KubeClient client;
- private static KubeCmdClient> kubeCmdClient;
-
- static final Stack> CLASS_RESOURCE_STACK = new Stack<>();
- static final Stack> METHOD_RESOURCE_STACK = new Stack<>();
-
- static Stack> resourceStackPointer = CLASS_RESOURCE_STACK;
-
- static List defaultNamespacesForLogCollect = Arrays.asList(
- "openshift-marketplace",
- "openshift-operators"
- );
-
- public static synchronized ResourceManager getInstance() {
- if (instance == null) {
- instance = new ResourceManager();
- client = new KubeClient(TestConstants.DEFAULT_NAMESPACE);
- kubeCmdClient = new Oc(client.getKubeconfigPath());
- for (String ns : defaultNamespacesForLogCollect) {
- addNamespaceForLogCollect(ns);
- }
- }
- return instance;
- }
-
- public static KubeClient getKubeClient() {
- return client;
- }
-
- public static KubeCmdClient> getKubeCmdClient() {
- return kubeCmdClient;
- }
-
- private final ResourceType>[] resourceTypes = new ResourceType[]{
- new NamespaceResource(),
- new SubscriptionResource(),
- new OperatorGroupResource(),
- new DataScienceClusterResource(),
- new DataScienceInitializationResource(),
- new NotebookResource(),
- new InferenceServiceResource(),
- };
-
- public final void switchToTestResourceStack() {
- resourceStackPointer = METHOD_RESOURCE_STACK;
- }
-
- public final void switchToClassResourceStack() {
- resourceStackPointer = CLASS_RESOURCE_STACK;
- }
-
- public static void addNamespaceForLogCollect(String namespace) {
- NamespaceResource.labelNamespace(namespace, TestConstants.LOG_COLLECT_LABEL, "true");
- }
-
- public final void pushToStack(ResourceItem> item) {
- resourceStackPointer.push(item);
- }
-
- @SafeVarargs
- public final void createResourceWithoutWait(T... resources) {
- createResource(false, resources);
- }
-
- @SafeVarargs
- public final void createResourceWithWait(T... resources) {
- createResource(true, resources);
- }
-
- @SafeVarargs
- private void createResource(boolean waitReady, T... resources) {
- for (T resource : resources) {
- ResourceType type = findResourceType(resource);
-
- synchronized (this) {
- resourceStackPointer.push(
- new ResourceItem<>(
- () -> deleteResource(resource),
- resource
- ));
- }
-
- if (resource.getMetadata().getNamespace() == null) {
- LOGGER.info("Creating/Updating {} {}",
- resource.getKind(), resource.getMetadata().getName());
- } else {
- LOGGER.info("Creating/Updating {} {}/{}",
- resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName());
- addNamespaceForLogCollect(resource.getMetadata().getNamespace());
- }
-
- if (type == null) {
- if (resource instanceof Deployment deployment) {
- if (client.getClient().apps().deployments().resource(deployment).get() != null) {
- client.getClient().apps().deployments().resource(deployment).update();
- } else {
- client.getClient().apps().deployments().resource(deployment).create();
- }
- if (waitReady) {
- DeploymentUtils.waitForDeploymentReady(resource.getMetadata().getNamespace(), resource.getMetadata().getName());
- }
- } else {
- if (client.getClient().resource(resource).get() != null) {
- client.getClient().resource(resource).update();
- } else {
- client.getClient().resource(resource).create();
- }
- }
- } else {
- if (type.get(resource.getMetadata().getNamespace(), resource.getMetadata().getName()) != null) {
- type.update(resource);
- } else {
- type.create(resource);
- }
-
- if (waitReady) {
- assertTrue(waitResourceCondition(resource, ResourceCondition.readiness(type)),
- String.format("Timed out waiting for %s %s/%s to be ready", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()));
- }
- }
- }
- }
-
- @SafeVarargs
- public final void deleteResource(T... resources) {
- for (T resource : resources) {
- ResourceType type = findResourceType(resource);
- if (type == null) {
- LOGGER.info("Deleting of {} {}",
- resource.getKind(), resource.getMetadata().getName());
- if (resource instanceof Deployment) {
- Deployment deployment = (Deployment) resource;
- client.getClient().apps().deployments().resource(deployment).delete();
- DeploymentUtils.waitForDeploymentDeletion(resource.getMetadata().getNamespace(), resource.getMetadata().getName());
- } else {
- client.getClient().resource(resource).delete();
- }
- } else {
- if (resource.getMetadata().getNamespace() == null) {
- LOGGER.info("Deleting of {} {}",
- resource.getKind(), resource.getMetadata().getName());
- } else {
- LOGGER.info("Deleting of {} {}/{}",
- resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName());
- }
-
- try {
- type.delete(resource);
- assertTrue(waitResourceCondition(resource, ResourceCondition.deletion()),
- String.format("Timed out deleting %s %s/%s", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName()));
- } catch (Exception e) {
- if (resource.getMetadata().getNamespace() == null) {
- LOGGER.error("Failed to delete {} {}", resource.getKind(), resource.getMetadata().getName(), e);
- } else {
- LOGGER.error("Failed to delete {} {}/{}", resource.getKind(), resource.getMetadata().getNamespace(), resource.getMetadata().getName(), e);
- }
- }
- }
- }
- }
-
- @SafeVarargs
- public final void updateResource(T... resources) {
- for (T resource : resources) {
- ResourceType type = findResourceType(resource);
- if (type != null) {
- type.update(resource);
- } else {
- client.getClient().resource(resource).update();
- }
- }
- }
-
- public final boolean waitResourceCondition(T resource, ResourceCondition condition) {
- assertNotNull(resource);
- assertNotNull(resource.getMetadata());
- assertNotNull(resource.getMetadata().getName());
-
- // cluster role binding and custom resource definition does not need namespace...
- if (!(resource instanceof ClusterRoleBinding || resource instanceof CustomResourceDefinition
- || resource instanceof ClusterRole || resource instanceof ValidatingWebhookConfiguration
- || resource instanceof DataScienceCluster || resource instanceof Namespace
- || resource instanceof DSCInitialization)) {
- assertNotNull(resource.getMetadata().getNamespace());
- }
-
- ResourceType type = findResourceType(resource);
- assertNotNull(type);
- boolean[] resourceReady = new boolean[1];
-
- TestUtils.waitFor("resource condition: " + condition.getConditionName() + " to be fulfilled for resource " + resource.getKind() + ":" + resource.getMetadata().getName(),
- TestConstants.GLOBAL_POLL_INTERVAL_MEDIUM, TestConstants.GLOBAL_TIMEOUT,
- () -> {
- T res = type.get(resource.getMetadata().getNamespace(), resource.getMetadata().getName());
- resourceReady[0] = condition.getPredicate().test(res);
- if (!resourceReady[0]) {
- type.delete(res);
- }
- return resourceReady[0];
- });
-
- return resourceReady[0];
- }
-
- public void deleteResources() {
- if (!resourceStackPointer.isEmpty()) {
- LOGGER.info(String.join("", Collections.nCopies(76, "#")));
-
- while (!resourceStackPointer.empty()) {
- try {
- ResourceItem> resourceItem = resourceStackPointer.pop();
- resourceItem.getThrowableRunner().run();
- } catch (Exception e) {
- e.printStackTrace();
- }
- }
- LOGGER.info(String.join("", Collections.nCopies(76, "#")));
- }
- }
-
- private ResourceType findResourceType(T resource) {
- // other no conflicting types
- for (ResourceType> type : resourceTypes) {
- if (type.getKind().equals(resource.getKind())) {
- return (ResourceType) type;
- }
- }
- return null;
- }
-}
diff --git a/src/main/java/io/odh/test/framework/manager/ResourceType.java b/src/main/java/io/odh/test/framework/manager/ResourceType.java
deleted file mode 100644
index 86b50133..00000000
--- a/src/main/java/io/odh/test/framework/manager/ResourceType.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.manager;
-
-import io.fabric8.kubernetes.api.model.HasMetadata;
-
-/**
- * Providing contract for all resources which must implement REST API methods for create, update (refresh) and so on.
- * @param type for all our resources for instance KafkaResource, KafkaConnectResource, OlmResource, ServiceResource etc.
- */
-public interface ResourceType {
- String getKind();
-
- /**
- * Retrieve resource using Kubernetes API
- * @return specific resource with T type.
- */
- T get(String namespace, String name);
-
- /**
- * Creates specific resource based on T type using Kubernetes API
- */
- void create(T resource);
-
- /**
- * Delete specific resource based on T type using Kubernetes API
- */
- void delete(T resource);
-
- /**
- * Update specific resource based on T type using Kubernetes API
- */
- void update(T resource);
-
- /**
- * Check if this resource is marked as ready or not with wait.
- *
- * @return true if ready.
- */
- boolean waitForReadiness(T resource);
-}
diff --git a/src/main/java/io/odh/test/framework/manager/ThrowableRunner.java b/src/main/java/io/odh/test/framework/manager/ThrowableRunner.java
deleted file mode 100644
index 9cce54f7..00000000
--- a/src/main/java/io/odh/test/framework/manager/ThrowableRunner.java
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.manager;
-
-@FunctionalInterface
-public interface ThrowableRunner {
- void run() throws Exception;
-}
diff --git a/src/main/java/io/odh/test/framework/manager/requirements/AuthorinoOperator.java b/src/main/java/io/odh/test/framework/manager/requirements/AuthorinoOperator.java
index ca6f94b4..6af90b76 100644
--- a/src/main/java/io/odh/test/framework/manager/requirements/AuthorinoOperator.java
+++ b/src/main/java/io/odh/test/framework/manager/requirements/AuthorinoOperator.java
@@ -9,9 +9,9 @@
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.SubscriptionBuilder;
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.TestConstants;
-import io.odh.test.framework.manager.ResourceItem;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.utils.PodUtils;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.resources.ResourceItem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -41,8 +41,8 @@ public static void deployOperator() {
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(subscription);
- ResourceManager.getInstance().pushToStack(new ResourceItem<>(() -> deleteOperator(subscription), null));
+ KubeResourceManager.getInstance().createOrUpdateResourceWithWait(subscription);
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(() -> deleteOperator(subscription), null));
isOperatorReady();
}
@@ -52,6 +52,6 @@ public static void isOperatorReady() {
}
public static void deleteOperator(Subscription subscription) {
- ResourceManager.getKubeClient().delete(Collections.singletonList(subscription));
+ KubeResourceManager.getKubeClient().delete(Collections.singletonList(subscription));
}
}
diff --git a/src/main/java/io/odh/test/framework/manager/requirements/PipelinesOperator.java b/src/main/java/io/odh/test/framework/manager/requirements/PipelinesOperator.java
index 1d5ccc6d..df5c943a 100644
--- a/src/main/java/io/odh/test/framework/manager/requirements/PipelinesOperator.java
+++ b/src/main/java/io/odh/test/framework/manager/requirements/PipelinesOperator.java
@@ -9,9 +9,9 @@
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.SubscriptionBuilder;
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.TestConstants;
-import io.odh.test.framework.manager.ResourceItem;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.utils.PodUtils;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.resources.ResourceItem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -42,8 +42,8 @@ public static void deployOperator() {
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(subscription);
- ResourceManager.getInstance().pushToStack(new ResourceItem<>(() -> deleteOperator(subscription), null));
+ KubeResourceManager.getInstance().createOrUpdateResourceWithWait(subscription);
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(() -> deleteOperator(subscription), null));
isOperatorReady();
}
@@ -53,6 +53,6 @@ public static void isOperatorReady() {
}
public static void deleteOperator(Subscription subscription) {
- ResourceManager.getKubeClient().delete(Collections.singletonList(subscription));
+ KubeResourceManager.getKubeClient().delete(Collections.singletonList(subscription));
}
}
diff --git a/src/main/java/io/odh/test/framework/manager/requirements/ServerlessOperator.java b/src/main/java/io/odh/test/framework/manager/requirements/ServerlessOperator.java
index 0abe35f1..bd1dd4ba 100644
--- a/src/main/java/io/odh/test/framework/manager/requirements/ServerlessOperator.java
+++ b/src/main/java/io/odh/test/framework/manager/requirements/ServerlessOperator.java
@@ -12,10 +12,9 @@
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.SubscriptionBuilder;
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.TestConstants;
-import io.odh.test.framework.manager.ResourceItem;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.resources.OperatorGroupResource;
import io.odh.test.utils.PodUtils;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.resources.ResourceItem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -27,6 +26,7 @@ public class ServerlessOperator {
public static final String SUBSCRIPTION_NAME = "serverless-operator";
public static final String OPERATOR_NAME = "serverless-operator";
public static final String OPERATOR_NAMESPACE = "openshift-serverless";
+
public static void deployOperator() {
// Create ns for the operator
Namespace ns = new NamespaceBuilder()
@@ -35,9 +35,10 @@ public static void deployOperator() {
.withLabels(Collections.singletonMap(OdhAnnotationsLabels.APP_LABEL_KEY, OdhAnnotationsLabels.APP_LABEL_VALUE))
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithoutWait(ns);
+ KubeResourceManager.getInstance().createOrUpdateResourceWithWait(ns);
//Create operator group for the operator
- if (OperatorGroupResource.operatorGroupClient().inNamespace(OPERATOR_NAMESPACE).list().getItems().isEmpty()) {
+ if (KubeResourceManager.getKubeClient().getOpenShiftClient().operatorHub().operatorGroups()
+ .inNamespace(OPERATOR_NAMESPACE).list().getItems().isEmpty()) {
OperatorGroupBuilder operatorGroup = new OperatorGroupBuilder()
.editOrNewMetadata()
.withName("odh-group")
@@ -45,7 +46,7 @@ public static void deployOperator() {
.withLabels(Collections.singletonMap(OdhAnnotationsLabels.APP_LABEL_KEY, OdhAnnotationsLabels.APP_LABEL_VALUE))
.endMetadata();
- ResourceManager.getInstance().createResourceWithWait(operatorGroup.build());
+ KubeResourceManager.getInstance().createResourceWithWait(operatorGroup.build());
} else {
LOGGER.info("OperatorGroup is already exists.");
}
@@ -67,8 +68,8 @@ public static void deployOperator() {
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(subscription);
- ResourceManager.getInstance().pushToStack(new ResourceItem<>(() -> deleteOperator(ns), null));
+ KubeResourceManager.getInstance().createOrUpdateResourceWithWait(subscription);
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(() -> deleteOperator(ns), null));
isOperatorReady();
}
@@ -80,6 +81,6 @@ public static void isOperatorReady() {
}
public static void deleteOperator(Namespace namespace) {
- ResourceManager.getKubeClient().delete(Collections.singletonList(namespace));
+ KubeResourceManager.getKubeClient().delete(Collections.singletonList(namespace));
}
}
diff --git a/src/main/java/io/odh/test/framework/manager/requirements/ServiceMeshOperator.java b/src/main/java/io/odh/test/framework/manager/requirements/ServiceMeshOperator.java
index c393990e..58eb93f3 100644
--- a/src/main/java/io/odh/test/framework/manager/requirements/ServiceMeshOperator.java
+++ b/src/main/java/io/odh/test/framework/manager/requirements/ServiceMeshOperator.java
@@ -9,9 +9,9 @@
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.SubscriptionBuilder;
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.TestConstants;
-import io.odh.test.framework.manager.ResourceItem;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.utils.PodUtils;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.resources.ResourceItem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -43,8 +43,8 @@ public static void deployOperator() {
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(subscription);
- ResourceManager.getInstance().pushToStack(new ResourceItem<>(() -> deleteOperator(subscription), null));
+ KubeResourceManager.getInstance().createOrUpdateResourceWithWait(subscription);
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(() -> deleteOperator(subscription), null));
isOperatorReady();
}
@@ -54,6 +54,6 @@ public static void isOperatorReady() {
}
public static void deleteOperator(Subscription subscription) {
- ResourceManager.getKubeClient().delete(Collections.singletonList(subscription));
+ KubeResourceManager.getKubeClient().delete(Collections.singletonList(subscription));
}
}
diff --git a/src/main/java/io/odh/test/framework/manager/resources/DataScienceClusterResource.java b/src/main/java/io/odh/test/framework/manager/resources/DataScienceClusterResource.java
index e0a13a6b..8b3e76e8 100644
--- a/src/main/java/io/odh/test/framework/manager/resources/DataScienceClusterResource.java
+++ b/src/main/java/io/odh/test/framework/manager/resources/DataScienceClusterResource.java
@@ -8,62 +8,80 @@
import io.fabric8.kubernetes.api.model.events.v1.Event;
import io.fabric8.kubernetes.client.dsl.EventingAPIGroupDSL;
import io.fabric8.kubernetes.client.dsl.MixedOperation;
+import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.odh.test.OdhConstants;
import io.odh.test.TestConstants;
import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.ResourceType;
-import io.odh.test.platform.KubeUtils;
import io.odh.test.utils.PodUtils;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
+import io.skodjob.testframe.interfaces.ResourceType;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.wait.Wait;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.List;
import java.util.Objects;
+import java.util.function.Consumer;
public class DataScienceClusterResource implements ResourceType {
private static final Logger LOGGER = LoggerFactory.getLogger(DataScienceClusterResource.class);
+
+ @Override
+ public NonNamespaceOperation, ?, ?> getClient() {
+ return dataScienceCLusterClient();
+ }
+
@Override
public String getKind() {
return "DataScienceCluster";
}
- @Override
- public DataScienceCluster get(String namespace, String name) {
+ public DataScienceCluster get(String name) {
return dataScienceCLusterClient().withName(name).get();
}
@Override
public void create(DataScienceCluster resource) {
- dataScienceCLusterClient().resource(resource).create();
+ if (get(resource.getMetadata().getName()) == null) {
+ dataScienceCLusterClient().resource(resource).create();
+ } else {
+ update(resource);
+ }
}
@Override
- public void delete(DataScienceCluster resource) {
- dataScienceCLusterClient().withName(resource.getMetadata().getName()).delete();
+ public void update(DataScienceCluster resource) {
+ dataScienceCLusterClient().resource(resource).update();
}
@Override
- public void update(DataScienceCluster resource) {
- dataScienceCLusterClient().resource(resource).update();
+ public void delete(String s) {
+ dataScienceCLusterClient().withName(s).delete();
+ }
+
+ @Override
+ public void replace(String s, Consumer editor) {
+ DataScienceCluster toBeUpdated = dataScienceCLusterClient().withName(s).get();
+ editor.accept(toBeUpdated);
+ update(toBeUpdated);
}
@Override
public boolean waitForReadiness(DataScienceCluster resource) {
String message = String.format("DataScienceCluster %s readiness", resource.getMetadata().getName());
- TestUtils.waitFor(message, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
+ Wait.until(message, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
boolean dscReady;
DataScienceCluster dsc = dataScienceCLusterClient().withName(resource.getMetadata().getName()).get();
- String dashboardStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "dashboardReady").getStatus();
+ String dashboardStatus = TestUtils.getDscConditionByType(dsc.getStatus().getConditions(), "dashboardReady").getStatus();
LOGGER.debug("DataScienceCluster {} Dashboard status: {}", resource.getMetadata().getName(), dashboardStatus);
dscReady = dashboardStatus.equals("True");
- String workbenchesStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "workbenchesReady").getStatus();
+ String workbenchesStatus = TestUtils.getDscConditionByType(dsc.getStatus().getConditions(), "workbenchesReady").getStatus();
LOGGER.debug("DataScienceCluster {} Workbenches status: {}", resource.getMetadata().getName(), workbenchesStatus);
dscReady = dscReady && workbenchesStatus.equals("True");
@@ -114,7 +132,8 @@ public boolean waitForReadiness(DataScienceCluster resource) {
// https://github.com/red-hat-data-services/rhods-operator/blob/rhoai-2.8/controllers/datasciencecluster/datasciencecluster_controller.go#L257
// Wait for standard Kubernetes condition types (status for the whole DSC)
- record ConditionExpectation(String conditionType, String expectedStatus) { }
+ record ConditionExpectation(String conditionType, String expectedStatus) {
+ }
List conditionExpectations = List.of(
new ConditionExpectation("Available", "True"),
new ConditionExpectation("Progressing", "False"),
@@ -124,18 +143,18 @@ record ConditionExpectation(String conditionType, String expectedStatus) { }
for (ConditionExpectation conditionExpectation : conditionExpectations) {
String conditionType = conditionExpectation.conditionType;
String expectedStatus = conditionExpectation.expectedStatus;
- String conditionStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), conditionType).getStatus();
+ String conditionStatus = TestUtils.getDscConditionByType(dsc.getStatus().getConditions(), conditionType).getStatus();
LOGGER.debug("DataScienceCluster {} {} status: {}", resource.getMetadata().getName(), conditionType, conditionStatus);
dscReady = dscReady && Objects.equals(conditionStatus, expectedStatus);
}
// Wait for ReconcileComplete condition (for the whole DSC)
- String reconcileStatus = KubeUtils.getDscConditionByType(dsc.getStatus().getConditions(), "ReconcileComplete").getStatus();
+ String reconcileStatus = TestUtils.getDscConditionByType(dsc.getStatus().getConditions(), "ReconcileComplete").getStatus();
LOGGER.debug("DataScienceCluster {} ReconcileComplete status: {}", resource.getMetadata().getName(), reconcileStatus);
dscReady = dscReady && reconcileStatus.equals("True");
// Wait for DataScienceClusterCreationSuccessful event
- EventingAPIGroupDSL eventsClient = ResourceManager.getKubeClient().getClient().events();
+ EventingAPIGroupDSL eventsClient = KubeResourceManager.getKubeClient().getClient().events();
List resourceEvents = eventsClient.v1().events().inAnyNamespace().withNewFilter()
.withField("regarding.name", resource.getMetadata().getName())
.withField("regarding.uid", resource.getMetadata().getUid())
@@ -146,20 +165,25 @@ record ConditionExpectation(String conditionType, String expectedStatus) { }
dscReady = dscReady && hasCreationSuccessfulEvent;
return dscReady;
- }, () -> { });
+ }, () -> {
+ });
String namespace = OdhConstants.CONTROLLERS_NAMESPACE;
LOGGER.info("Waiting for pods readiness in {}", namespace);
PodUtils.waitForPodsReady(namespace, true, () -> {
- ResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "pods");
- ResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "events");
+ KubeResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "pods");
+ KubeResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "events");
});
return true;
}
- public static MixedOperation, Resource> dataScienceCLusterClient() {
- return ResourceManager.getKubeClient().getClient().resources(DataScienceCluster.class);
+ @Override
+ public boolean waitForDeletion(DataScienceCluster dataScienceCluster) {
+ return get(dataScienceCluster.getMetadata().getName()) == null;
}
+ public static MixedOperation, Resource> dataScienceCLusterClient() {
+ return KubeResourceManager.getKubeClient().getClient().resources(DataScienceCluster.class);
+ }
}
diff --git a/src/main/java/io/odh/test/framework/manager/resources/DataScienceInitializationResource.java b/src/main/java/io/odh/test/framework/manager/resources/DataScienceInitializationResource.java
index ce3e5910..2d69fb93 100644
--- a/src/main/java/io/odh/test/framework/manager/resources/DataScienceInitializationResource.java
+++ b/src/main/java/io/odh/test/framework/manager/resources/DataScienceInitializationResource.java
@@ -6,28 +6,36 @@
import io.fabric8.kubernetes.api.model.KubernetesResourceList;
import io.fabric8.kubernetes.client.dsl.MixedOperation;
+import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.odh.test.TestConstants;
import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.ResourceType;
import io.opendatahub.dscinitialization.v1.DSCInitialization;
+import io.skodjob.testframe.interfaces.ResourceType;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.wait.Wait;
+
+import java.util.function.Consumer;
public class DataScienceInitializationResource implements ResourceType {
+ @Override
+ public NonNamespaceOperation, ?, ?> getClient() {
+ return dsciClient();
+ }
+
@Override
public String getKind() {
return "DSCInitialization";
}
- @Override
- public DSCInitialization get(String namespace, String name) {
+ public DSCInitialization get(String name) {
return dsciClient().withName(name).get();
}
@Override
public void create(DSCInitialization resource) {
- if (get("", resource.getMetadata().getName()) == null) {
+ if (get(resource.getMetadata().getName()) == null) {
TestUtils.runUntilPass(5, () -> dsciClient().resource(resource).create());
} else {
TestUtils.runUntilPass(5, () -> dsciClient().resource(resource).update());
@@ -35,19 +43,26 @@ public void create(DSCInitialization resource) {
}
@Override
- public void delete(DSCInitialization resource) {
- dsciClient().withName(resource.getMetadata().getName()).delete();
+ public void update(DSCInitialization resource) {
+ TestUtils.runUntilPass(5, () -> dsciClient().resource(resource).update());
}
@Override
- public void update(DSCInitialization resource) {
- TestUtils.runUntilPass(5, () -> dsciClient().resource(resource).update());
+ public void delete(String s) {
+ dsciClient().withName(s).delete();
+ }
+
+ @Override
+ public void replace(String s, Consumer editor) {
+ DSCInitialization toBeUpdated = dsciClient().withName(s).get();
+ editor.accept(toBeUpdated);
+ update(toBeUpdated);
}
@Override
public boolean waitForReadiness(DSCInitialization resource) {
String message = String.format("DSCInitialization %s readiness", resource.getMetadata().getName());
- TestUtils.waitFor(message, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
+ Wait.until(message, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
boolean dsciReady;
DSCInitialization dsci = dsciClient().withName(resource.getMetadata().getName()).get();
@@ -60,8 +75,13 @@ public boolean waitForReadiness(DSCInitialization resource) {
return true;
}
+ @Override
+ public boolean waitForDeletion(DSCInitialization dscInitialization) {
+ return get(dscInitialization.getMetadata().getName()) == null;
+ }
+
public static MixedOperation, Resource> dsciClient() {
- return ResourceManager.getKubeClient().getClient().resources(DSCInitialization.class);
+ return KubeResourceManager.getKubeClient().getClient().resources(DSCInitialization.class);
}
}
diff --git a/src/main/java/io/odh/test/framework/manager/resources/InferenceServiceResource.java b/src/main/java/io/odh/test/framework/manager/resources/InferenceServiceResource.java
index 671739cd..a064bc5f 100644
--- a/src/main/java/io/odh/test/framework/manager/resources/InferenceServiceResource.java
+++ b/src/main/java/io/odh/test/framework/manager/resources/InferenceServiceResource.java
@@ -10,22 +10,24 @@
import io.kserve.serving.v1beta1.InferenceService;
import io.odh.test.TestConstants;
import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.ResourceType;
-import io.odh.test.platform.KubeUtils;
import io.odh.test.utils.PodUtils;
+import io.skodjob.testframe.interfaces.NamespacedResourceType;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.wait.Wait;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-public class InferenceServiceResource implements ResourceType {
+import java.util.function.Consumer;
+
+public class InferenceServiceResource implements NamespacedResourceType {
private static final Logger LOGGER = LoggerFactory.getLogger(InferenceServiceResource.class);
+
@Override
public String getKind() {
return "InferenceService";
}
- @Override
public InferenceService get(String namespace, String name) {
return inferenceServiceClient().inNamespace(namespace).withName(name).get();
}
@@ -36,28 +38,35 @@ public void create(InferenceService resource) {
}
@Override
- public void delete(InferenceService resource) {
- inferenceServiceClient().inNamespace(resource.getMetadata().getNamespace()).withName(resource.getMetadata().getName()).delete();
+ public void update(InferenceService resource) {
+ inferenceServiceClient().resource(resource).update();
}
@Override
- public void update(InferenceService resource) {
- inferenceServiceClient().resource(resource).update();
+ public void delete(String resource) {
+ inferenceServiceClient().withName(resource).delete();
+ }
+
+ @Override
+ public void replace(String s, Consumer editor) {
+ InferenceService toBeUpdated = inferenceServiceClient().withName(s).get();
+ editor.accept(toBeUpdated);
+ update(toBeUpdated);
}
@Override
public boolean waitForReadiness(InferenceService resource) {
String message = String.format("InferenceService %s readiness", resource.getMetadata().getName());
- TestUtils.waitFor(message, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
+ Wait.until(message, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
boolean isReady;
InferenceService inferenceService = get(resource.getMetadata().getNamespace(), resource.getMetadata().getName());
- String predictorReadyStatus = KubeUtils.getInferenceServiceConditionByType(inferenceService.getStatus().getConditions(), "PredictorReady").getStatus();
+ String predictorReadyStatus = TestUtils.getInferenceServiceConditionByType(inferenceService.getStatus().getConditions(), "PredictorReady").getStatus();
LOGGER.debug("InferenceService {} PredictorReady status: {}", resource.getMetadata().getName(), predictorReadyStatus);
isReady = predictorReadyStatus.equals("True");
- String readyStatus = KubeUtils.getInferenceServiceConditionByType(inferenceService.getStatus().getConditions(), "Ready").getStatus();
+ String readyStatus = TestUtils.getInferenceServiceConditionByType(inferenceService.getStatus().getConditions(), "Ready").getStatus();
LOGGER.debug("InferenceService {} Ready status: {}", resource.getMetadata().getName(), readyStatus);
isReady = isReady && readyStatus.equals("True");
@@ -67,15 +76,46 @@ public boolean waitForReadiness(InferenceService resource) {
String namespace = resource.getMetadata().getNamespace();
LOGGER.info("Waiting for pods readiness in {}", namespace);
PodUtils.waitForPodsReady(namespace, true, () -> {
- ResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "pods");
- ResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "events");
+ KubeResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "pods");
+ KubeResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "events");
});
return true;
}
+ @Override
+ public boolean waitForDeletion(InferenceService inferenceService) {
+ return get(inferenceService.getMetadata().getNamespace(), inferenceService.getMetadata().getName()) == null;
+ }
+
public static MixedOperation, Resource> inferenceServiceClient() {
- return ResourceManager.getKubeClient().getClient().resources(InferenceService.class);
+ return KubeResourceManager.getKubeClient().getClient().resources(InferenceService.class);
}
+ @Override
+ public MixedOperation, ?, ?> getClient() {
+ return inferenceServiceClient();
+ }
+
+ @Override
+ public void createInNamespace(String namespace, InferenceService inferenceService) {
+ inferenceServiceClient().inNamespace(namespace).resource(inferenceService).create();
+ }
+
+ @Override
+ public void updateInNamespace(String namespace, InferenceService inferenceService) {
+ inferenceServiceClient().inNamespace(namespace).resource(inferenceService).update();
+ }
+
+ @Override
+ public void deleteFromNamespace(String namespace, String resource) {
+ inferenceServiceClient().inNamespace(namespace).withName(resource).delete();
+ }
+
+ @Override
+ public void replaceInNamespace(String namespace, String s, Consumer editor) {
+ InferenceService toBeUpdated = inferenceServiceClient().inNamespace(namespace).withName(s).get();
+ editor.accept(toBeUpdated);
+ update(toBeUpdated);
+ }
}
diff --git a/src/main/java/io/odh/test/framework/manager/resources/NamespaceResource.java b/src/main/java/io/odh/test/framework/manager/resources/NamespaceResource.java
deleted file mode 100644
index badef7d9..00000000
--- a/src/main/java/io/odh/test/framework/manager/resources/NamespaceResource.java
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.manager.resources;
-
-import io.fabric8.kubernetes.api.model.Namespace;
-import io.fabric8.kubernetes.api.model.NamespaceBuilder;
-import io.odh.test.TestConstants;
-import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.ResourceType;
-
-public class NamespaceResource implements ResourceType {
-
- @Override
- public String getKind() {
- return "Namespace";
- }
-
- @Override
- public Namespace get(String namespace, String name) {
- return ResourceManager.getKubeClient().getClient().namespaces().withName(name).get();
- }
-
- @Override
- public void create(Namespace resource) {
- if (get("", resource.getMetadata().getName()) != null) {
- ResourceManager.getKubeClient().getClient().resource(resource).update();
- } else {
- ResourceManager.getKubeClient().getClient().resource(resource).create();
- }
- }
-
- @Override
- public void delete(Namespace resource) {
- ResourceManager.getKubeClient().getClient().namespaces().withName(resource.getMetadata().getName()).delete();
- }
-
- @Override
- public void update(Namespace resource) {
- ResourceManager.getKubeClient().getClient().resource(resource).update();
- }
-
- @Override
- public boolean waitForReadiness(Namespace resource) {
- return resource != null;
- }
-
- public static void labelNamespace(String namespace, String key, String value) {
- if (ResourceManager.getKubeClient().namespaceExists(namespace)) {
- TestUtils.waitFor(String.format("Namespace %s has label: %s", namespace, key), TestConstants.GLOBAL_POLL_INTERVAL_1_SEC, TestConstants.GLOBAL_STABILITY_TIME * 1000, () -> {
- try {
- ResourceManager.getKubeClient().getClient().namespaces().withName(namespace).edit(n ->
- new NamespaceBuilder(n)
- .editMetadata()
- .addToLabels(key, value)
- .endMetadata()
- .build());
- } catch (Exception ex) {
- return false;
- }
- Namespace n = ResourceManager.getKubeClient().getClient().namespaces().withName(namespace).get();
- if (n != null) {
- return n.getMetadata().getLabels().get(key) != null;
- }
- return false;
- });
- }
- }
-}
diff --git a/src/main/java/io/odh/test/framework/manager/resources/NotebookResource.java b/src/main/java/io/odh/test/framework/manager/resources/NotebookResource.java
index a478aa57..a9938082 100644
--- a/src/main/java/io/odh/test/framework/manager/resources/NotebookResource.java
+++ b/src/main/java/io/odh/test/framework/manager/resources/NotebookResource.java
@@ -11,19 +11,20 @@
import io.odh.test.Environment;
import io.odh.test.OdhConstants;
import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.ResourceType;
+import io.skodjob.testframe.interfaces.NamespacedResourceType;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.kubeflow.v1.Notebook;
import java.io.IOException;
import java.io.InputStream;
import java.util.Map;
import java.util.Objects;
+import java.util.function.Consumer;
import org.apache.commons.io.IOUtils;
-public class NotebookResource implements ResourceType {
+public class NotebookResource implements NamespacedResourceType {
private static final String REGISTRY_PATH = "image-registry.openshift-image-registry.svc:5000";
public static final String JUPYTER_MINIMAL_IMAGE = "jupyter-minimal-notebook";
@@ -45,7 +46,6 @@ public String getKind() {
return "Notebook";
}
- @Override
public Notebook get(String namespace, String name) {
return notebookClient().inNamespace(namespace).withName(name).get();
}
@@ -56,13 +56,20 @@ public void create(Notebook resource) {
}
@Override
- public void delete(Notebook resource) {
- notebookClient().inNamespace(resource.getMetadata().getNamespace()).withName(resource.getMetadata().getName()).delete();
+ public void update(Notebook resource) {
+ notebookClient().inNamespace(resource.getMetadata().getNamespace()).resource(resource).update();
}
@Override
- public void update(Notebook resource) {
- notebookClient().inNamespace(resource.getMetadata().getNamespace()).resource(resource).update();
+ public void delete(String s) {
+ notebookClient().withName(s).delete();
+ }
+
+ @Override
+ public void replace(String resource, Consumer editor) {
+ Notebook toBeUpdated = notebookClient().withName(resource).get();
+ editor.accept(toBeUpdated);
+ update(toBeUpdated);
}
@Override
@@ -70,8 +77,13 @@ public boolean waitForReadiness(Notebook resource) {
return resource != null;
}
+ @Override
+ public boolean waitForDeletion(Notebook notebook) {
+ return get(notebook.getMetadata().getNamespace(), notebook.getMetadata().getName()) == null;
+ }
+
public static MixedOperation, Resource> notebookClient() {
- return ResourceManager.getKubeClient().getClient().resources(Notebook.class);
+ return KubeResourceManager.getKubeClient().getClient().resources(Notebook.class);
}
public static Notebook loadDefaultNotebook(String namespace, String name, String image) throws IOException {
@@ -79,10 +91,10 @@ public static Notebook loadDefaultNotebook(String namespace, String name, String
String notebookString = IOUtils.toString(is, "UTF-8");
notebookString = notebookString.replace("my-project", namespace).replace("my-workbench", name);
// Set new Route url
- String routeHost = ResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).routes().inNamespace(OdhConstants.CONTROLLERS_NAMESPACE).withName(OdhConstants.DASHBOARD_ROUTE_NAME).get().getSpec().getHost();
+ String routeHost = KubeResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).routes().inNamespace(OdhConstants.CONTROLLERS_NAMESPACE).withName(OdhConstants.DASHBOARD_ROUTE_NAME).get().getSpec().getHost();
notebookString = notebookString.replace("odh_dashboard_route", "https://" + routeHost);
// Set correct username
- String username = ResourceManager.getKubeCmdClient().getUsername().strip();
+ String username = KubeResourceManager.getKubeCmdClient().getUsername().strip();
notebookString = notebookString.replace("odh_user", username);
// Replace image
notebookString = notebookString.replace("notebook_image_placeholder", image);
@@ -97,4 +109,31 @@ public static String getNotebookImage(String imageName, String imageTag) {
return REGISTRY_PATH + "/" + OdhConstants.CONTROLLERS_NAMESPACE + "/" + RHOAI_IMAGES_MAP.get(imageName) + ":" + imageTag;
}
}
+
+ @Override
+ public MixedOperation, ?, ?> getClient() {
+ return notebookClient();
+ }
+
+ @Override
+ public void createInNamespace(String namespace, Notebook notebook) {
+ notebookClient().inNamespace(namespace).resource(notebook).create();
+ }
+
+ @Override
+ public void updateInNamespace(String namespace, Notebook notebook) {
+ notebookClient().inNamespace(namespace).resource(notebook).update();
+ }
+
+ @Override
+ public void deleteFromNamespace(String namespace, String resource) {
+ notebookClient().inNamespace(namespace).withName(resource).delete();
+ }
+
+ @Override
+ public void replaceInNamespace(String namespace, String resoruce, Consumer editor) {
+ Notebook toBeUpdated = notebookClient().inNamespace(namespace).withName(resoruce).get();
+ editor.accept(toBeUpdated);
+ update(toBeUpdated);
+ }
}
diff --git a/src/main/java/io/odh/test/framework/manager/resources/OperatorGroupResource.java b/src/main/java/io/odh/test/framework/manager/resources/OperatorGroupResource.java
deleted file mode 100644
index 78d05a4d..00000000
--- a/src/main/java/io/odh/test/framework/manager/resources/OperatorGroupResource.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.manager.resources;
-
-import io.fabric8.kubernetes.client.dsl.MixedOperation;
-import io.fabric8.kubernetes.client.dsl.Resource;
-import io.fabric8.openshift.api.model.operatorhub.v1.OperatorGroup;
-import io.fabric8.openshift.api.model.operatorhub.v1.OperatorGroupList;
-import io.fabric8.openshift.client.OpenShiftClient;
-import io.odh.test.TestConstants;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.ResourceType;
-
-public class OperatorGroupResource implements ResourceType {
- @Override
- public String getKind() {
- return TestConstants.OPERATOR_GROUP;
- }
-
- @Override
- public OperatorGroup get(String namespace, String name) {
- return operatorGroupClient().inNamespace(namespace).withName(name).get();
- }
-
- @Override
- public void create(OperatorGroup resource) {
- operatorGroupClient().inNamespace(resource.getMetadata().getNamespace()).resource(resource).create();
- }
-
- @Override
- public void delete(OperatorGroup resource) {
- operatorGroupClient().inNamespace(resource.getMetadata().getNamespace()).withName(resource.getMetadata().getName()).delete();
- }
-
- @Override
- public void update(OperatorGroup resource) {
- operatorGroupClient().inNamespace(resource.getMetadata().getNamespace()).resource(resource).update();
- }
-
- @Override
- public boolean waitForReadiness(OperatorGroup resource) {
- return resource != null;
- }
-
- public static MixedOperation> operatorGroupClient() {
- return ResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).operatorHub().operatorGroups();
- }
-}
diff --git a/src/main/java/io/odh/test/framework/manager/resources/SubscriptionResource.java b/src/main/java/io/odh/test/framework/manager/resources/SubscriptionResource.java
deleted file mode 100644
index eadb6ee0..00000000
--- a/src/main/java/io/odh/test/framework/manager/resources/SubscriptionResource.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.framework.manager.resources;
-
-import io.fabric8.kubernetes.api.model.DeletionPropagation;
-import io.fabric8.kubernetes.client.dsl.MixedOperation;
-import io.fabric8.kubernetes.client.dsl.Resource;
-import io.fabric8.openshift.api.model.operatorhub.v1alpha1.Subscription;
-import io.fabric8.openshift.api.model.operatorhub.v1alpha1.SubscriptionList;
-import io.fabric8.openshift.client.OpenShiftClient;
-import io.odh.test.TestConstants;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.ResourceType;
-
-public class SubscriptionResource implements ResourceType {
-
- @Override
- public String getKind() {
- return TestConstants.SUBSCRIPTION;
- }
-
- @Override
- public Subscription get(String namespace, String name) {
- return subscriptionClient().inNamespace(namespace).withName(name).get();
- }
-
- @Override
- public void create(Subscription resource) {
- subscriptionClient().inNamespace(resource.getMetadata().getNamespace()).resource(resource).create();
- }
-
- @Override
- public void delete(Subscription resource) {
- subscriptionClient().inNamespace(resource.getMetadata().getNamespace())
- .withName(resource.getMetadata().getName()).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete();
- }
-
- @Override
- public void update(Subscription resource) {
- subscriptionClient().inNamespace(resource.getMetadata().getNamespace()).resource(resource).update();
- }
-
- @Override
- public boolean waitForReadiness(Subscription resource) {
- return resource != null;
- }
-
- public static MixedOperation> subscriptionClient() {
- return ResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).operatorHub().subscriptions();
- }
-}
diff --git a/src/main/java/io/odh/test/install/BundleInstall.java b/src/main/java/io/odh/test/install/BundleInstall.java
index 17bea942..7c876c41 100644
--- a/src/main/java/io/odh/test/install/BundleInstall.java
+++ b/src/main/java/io/odh/test/install/BundleInstall.java
@@ -10,9 +10,8 @@
import io.odh.test.Environment;
import io.odh.test.TestConstants;
import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceItem;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.platform.KubeUtils;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.resources.ResourceItem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -41,7 +40,7 @@ public BundleInstall(String installFilePath) throws IOException {
installFile = new File(installFilePath);
is = new FileInputStream(installFilePath);
}
- resources = ResourceManager.getKubeClient().readResourcesFromYaml(is);
+ resources = KubeResourceManager.getKubeClient().readResourcesFromFile(is);
}
public BundleInstall() throws IOException {
@@ -89,17 +88,18 @@ private void modifyOperatorImage() {
public void create() {
modifyOperatorImage();
- ResourceManager.getInstance().createResourceWithWait(resources.toArray(new HasMetadata[0]));
- ResourceManager.getInstance().pushToStack(new ResourceItem<>(KubeUtils::deleteDefaultDSCI, null));
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(TestUtils::clearOdhRemainingResources, null));
+ KubeResourceManager.getInstance().createOrUpdateResourceWithWait(resources.toArray(new HasMetadata[0]));
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(TestUtils::deleteDefaultDSCI, null));
}
public void createWithoutResourceManager() {
modifyOperatorImage();
- ResourceManager.getKubeClient().create(resources, r -> r);
+ KubeResourceManager.getKubeClient().createOrUpdate(resources, r -> r);
}
public void deleteWithoutResourceManager() {
- KubeUtils.deleteDefaultDSCI();
- ResourceManager.getKubeClient().delete(resources);
+ TestUtils.deleteDefaultDSCI();
+ KubeResourceManager.getKubeClient().delete(resources);
}
}
diff --git a/src/main/java/io/odh/test/install/OlmInstall.java b/src/main/java/io/odh/test/install/OlmInstall.java
index 1dfa3cc3..51e513cf 100644
--- a/src/main/java/io/odh/test/install/OlmInstall.java
+++ b/src/main/java/io/odh/test/install/OlmInstall.java
@@ -13,11 +13,10 @@
import io.odh.test.Environment;
import io.odh.test.OdhConstants;
import io.odh.test.TestConstants;
-import io.odh.test.framework.manager.ResourceItem;
-import io.odh.test.framework.manager.ResourceManager;
-import io.odh.test.framework.manager.resources.OperatorGroupResource;
-import io.odh.test.platform.KubeUtils;
+import io.odh.test.TestUtils;
import io.odh.test.utils.DeploymentUtils;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.resources.ResourceItem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -44,7 +43,7 @@ public void create() {
createNamespace();
// Create operator group and subscription
createOperatorGroup();
- ResourceManager.getInstance().pushToStack(new ResourceItem<>(this::deleteCSV));
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(this::deleteCSV));
createAndModifySubscription();
// Wait for operator creation
@@ -54,7 +53,7 @@ public void create() {
public void createManual() {
createNamespace();
createOperatorGroup();
- ResourceManager.getInstance().pushToStack(new ResourceItem<>(this::deleteCSV));
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(this::deleteCSV));
createAndModifySubscription();
}
@@ -68,14 +67,15 @@ private void createNamespace() {
.withName(namespace)
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithoutWait(ns);
+ KubeResourceManager.getInstance().createResourceWithoutWait(ns);
}
/**
* Creates OperatorGroup in specific namespace
*/
private void createOperatorGroup() {
- if (OperatorGroupResource.operatorGroupClient().inNamespace(namespace).list().getItems().isEmpty()) {
+ if (KubeResourceManager.getKubeClient().getOpenShiftClient().operatorHub().operatorGroups()
+ .inNamespace(namespace).list().getItems().isEmpty()) {
OperatorGroupBuilder operatorGroup = new OperatorGroupBuilder()
.editOrNewMetadata()
.withName("odh-group")
@@ -83,7 +83,7 @@ private void createOperatorGroup() {
.withLabels(Collections.singletonMap("app", "odh"))
.endMetadata();
- ResourceManager.getInstance().createResourceWithWait(operatorGroup.build());
+ KubeResourceManager.getInstance().createResourceWithWait(operatorGroup.build());
} else {
LOGGER.info("OperatorGroup is already exists.");
}
@@ -96,12 +96,13 @@ private void createOperatorGroup() {
private void createAndModifySubscription() {
Subscription subscription = prepareSubscription();
- ResourceManager.getInstance().createResourceWithWait(subscription);
- ResourceManager.getInstance().pushToStack(new ResourceItem<>(KubeUtils::deleteDefaultDSCI, null));
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(TestUtils::clearOdhRemainingResources, null));
+ KubeResourceManager.getInstance().createOrUpdateResourceWithWait(subscription);
+ KubeResourceManager.getInstance().pushToStack(new ResourceItem<>(TestUtils::deleteDefaultDSCI, null));
}
public void updateSubscription() {
Subscription subscription = prepareSubscription();
- ResourceManager.getInstance().updateResource(subscription);
+ KubeResourceManager.getInstance().updateResource(subscription);
}
public Subscription prepareSubscription() {
@@ -125,21 +126,21 @@ public Subscription prepareSubscription() {
}
public void deleteCSV() {
- ResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).operatorHub().clusterServiceVersions().inNamespace(namespace)
+ KubeResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).operatorHub().clusterServiceVersions().inNamespace(namespace)
.list().getItems().stream().filter(csv -> csv.getMetadata().getName().contains(olmAppBundlePrefix)).toList()
.forEach(csv -> {
LOGGER.info("Deleting CSV {}", csv.getMetadata().getName());
- ResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).operatorHub().clusterServiceVersions().resource(csv).delete();
+ KubeResourceManager.getKubeClient().getOpenShiftClient().operatorHub().clusterServiceVersions().resource(csv).delete();
});
deleteInstallPlans();
}
public void deleteInstallPlans() {
- ResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).operatorHub().installPlans().inNamespace(namespace)
+ KubeResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).operatorHub().installPlans().inNamespace(namespace)
.list().getItems().stream().filter(ip -> ip.getSpec().getClusterServiceVersionNames().stream().toList().toString().contains(olmAppBundlePrefix)).toList()
.forEach(ip -> {
LOGGER.info("Deleting InstallPlan {}", ip.getMetadata().getName());
- ResourceManager.getKubeClient().getClient().adapt(OpenShiftClient.class).operatorHub().installPlans().resource(ip).delete();
+ KubeResourceManager.getKubeClient().getOpenShiftClient().operatorHub().installPlans().resource(ip).delete();
});
}
diff --git a/src/main/java/io/odh/test/platform/KFPv1Client.java b/src/main/java/io/odh/test/platform/KFPv1Client.java
index b1217ff8..81676bbb 100644
--- a/src/main/java/io/odh/test/platform/KFPv1Client.java
+++ b/src/main/java/io/odh/test/platform/KFPv1Client.java
@@ -9,8 +9,8 @@
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategies;
-import io.odh.test.TestUtils;
import io.odh.test.platform.httpClient.MultipartFormDataBodyPublisher;
+import io.skodjob.testframe.wait.Wait;
import lombok.SneakyThrows;
import org.hamcrest.Matchers;
import org.junit.jupiter.api.Assertions;
@@ -123,7 +123,7 @@ public PipelineRun waitForPipelineRun(String pipelineRunId) {
.build();
AtomicReference run = new AtomicReference<>();
- TestUtils.waitFor("pipelineRun to complete", 5000, 10 * 60 * 1000, () -> {
+ Wait.until("pipelineRun to complete", 5000, 10 * 60 * 1000, () -> {
HttpResponse reply = null;
try {
reply = httpClient.send(request, HttpResponse.BodyHandlers.ofString());
diff --git a/src/main/java/io/odh/test/platform/KFPv2Client.java b/src/main/java/io/odh/test/platform/KFPv2Client.java
index 42290fc2..e5e07ced 100644
--- a/src/main/java/io/odh/test/platform/KFPv2Client.java
+++ b/src/main/java/io/odh/test/platform/KFPv2Client.java
@@ -9,8 +9,8 @@
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategies;
-import io.odh.test.TestUtils;
import io.odh.test.platform.httpClient.MultipartFormDataBodyPublisher;
+import io.skodjob.testframe.wait.Wait;
import lombok.SneakyThrows;
import org.hamcrest.Matchers;
import org.junit.jupiter.api.Assertions;
@@ -146,7 +146,7 @@ public PipelineRun waitForPipelineRun(String pipelineRunId) {
.build();
AtomicReference run = new AtomicReference<>();
- TestUtils.waitFor("pipelineRun to complete", 5000, 10 * 60 * 1000, () -> {
+ Wait.until("pipelineRun to complete", 5000, 10 * 60 * 1000, () -> {
try {
HttpResponse reply = httpClient.send(request, HttpResponse.BodyHandlers.ofString());
Assertions.assertEquals(reply.statusCode(), 200, reply.body());
diff --git a/src/main/java/io/odh/test/platform/KubeClient.java b/src/main/java/io/odh/test/platform/KubeClient.java
deleted file mode 100644
index 0260feaa..00000000
--- a/src/main/java/io/odh/test/platform/KubeClient.java
+++ /dev/null
@@ -1,453 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform;
-
-import io.fabric8.kubernetes.api.model.ConfigMap;
-import io.fabric8.kubernetes.api.model.HasMetadata;
-import io.fabric8.kubernetes.api.model.KubernetesResourceList;
-import io.fabric8.kubernetes.api.model.LabelSelector;
-import io.fabric8.kubernetes.api.model.Namespace;
-import io.fabric8.kubernetes.api.model.Node;
-import io.fabric8.kubernetes.api.model.Pod;
-import io.fabric8.kubernetes.api.model.apps.Deployment;
-import io.fabric8.kubernetes.api.model.apps.StatefulSet;
-import io.fabric8.kubernetes.api.model.batch.v1.Job;
-import io.fabric8.kubernetes.api.model.batch.v1.JobList;
-import io.fabric8.kubernetes.api.model.batch.v1.JobStatus;
-import io.fabric8.kubernetes.client.Config;
-import io.fabric8.kubernetes.client.ConfigBuilder;
-import io.fabric8.kubernetes.client.KubernetesClient;
-import io.fabric8.kubernetes.client.KubernetesClientBuilder;
-import io.fabric8.kubernetes.client.dsl.MixedOperation;
-import io.fabric8.kubernetes.client.dsl.Resource;
-import io.fabric8.kubernetes.client.dsl.RollableScalableResource;
-import io.fabric8.openshift.api.model.operatorhub.v1alpha1.InstallPlan;
-import io.fabric8.openshift.api.model.operatorhub.v1alpha1.InstallPlanBuilder;
-import io.fabric8.openshift.client.OpenShiftClient;
-import io.odh.test.Environment;
-import io.odh.test.TestConstants;
-import io.odh.test.TestUtils;
-import io.odh.test.platform.executor.Exec;
-import io.opendatahub.v1alpha.OdhDashboardConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.Arrays;
-import java.util.List;
-import java.util.function.Function;
-import java.util.stream.Collectors;
-
-public class KubeClient {
- protected final KubernetesClient client;
- protected String namespace;
- private String kubeconfigPath;
-
- private static final Logger LOGGER = LoggerFactory.getLogger(KubeClient.class);
-
- public KubeClient(String namespace) {
- LOGGER.debug("Creating client in namespace: {}", namespace);
- Config config = getConfig();
-
- this.client = new KubernetesClientBuilder()
- .withConfig(config)
- .build()
- .adapt(OpenShiftClient.class);
- this.namespace = namespace;
- }
-
- public KubeClient(Config config, String namespace) {
- this.client = new KubernetesClientBuilder()
- .withConfig(config)
- .build()
- .adapt(OpenShiftClient.class);
- this.namespace = namespace;
- }
-
- public KubeClient(KubernetesClient client, String namespace) {
- LOGGER.debug("Creating client in namespace: {}", namespace);
- this.client = client;
- this.namespace = namespace;
- }
-
- // ============================
- // ---------> CLIENT <---------
- // ============================
-
- public KubernetesClient getClient() {
- return client;
- }
-
- // ===============================
- // ---------> NAMESPACE <---------
- // ===============================
-
- public KubeClient inNamespace(String namespace) {
- LOGGER.debug("Using namespace: {}", namespace);
- this.namespace = namespace;
- return this;
- }
-
- private Config getConfig() {
- if (Environment.KUBE_URL != null
- && Environment.KUBE_TOKEN != null) {
- Exec.exec(Arrays.asList("oc", "login", "--token", Environment.KUBE_TOKEN,
- "--insecure-skip-tls-verify",
- "--kubeconfig", Environment.USER_PATH + "/test.kubeconfig",
- Environment.KUBE_URL));
- kubeconfigPath = Environment.USER_PATH + "/test.kubeconfig";
- return new ConfigBuilder()
- .withOauthToken(Environment.KUBE_TOKEN)
- .withMasterUrl(Environment.KUBE_URL)
- .withDisableHostnameVerification(true)
- .withTrustCerts(true)
- .build();
- } else {
- return Config.autoConfigure(System.getenv()
- .getOrDefault("KUBE_CONTEXT", null));
- }
- }
-
- public Namespace getNamespace(String namespace) {
- return client.namespaces().withName(namespace).get();
- }
-
- public boolean namespaceExists(String namespace) {
- return client.namespaces().list().getItems().stream().map(n -> n.getMetadata().getName())
- .toList().contains(namespace);
- }
-
- public String getKubeconfigPath() {
- return kubeconfigPath;
- }
-
- // ==================================================
- // ---------> Create/read multi-resources <---------
- // ==================================================
- public void create(String namespace, InputStream is, Function modifier) throws IOException {
- try (is) {
- client.load(is).get().forEach(i -> {
- HasMetadata h = modifier.apply(i);
- if (h != null) {
- if (client.resource(h).inNamespace(namespace).get() == null) {
- LOGGER.debug("Creating {} {}/{}", h.getKind(), namespace, h.getMetadata().getName());
- client.resource(h).inNamespace(namespace).create();
- } else {
- LOGGER.debug("Updating {} {}/{}", h.getKind(), namespace, h.getMetadata().getName());
- client.resource(h).inNamespace(namespace).update();
- }
- }
- });
- }
- }
-
- public void create(InputStream is, Function modifier) throws IOException {
- try (is) {
- client.load(is).get().forEach(i -> {
- HasMetadata h = modifier.apply(i);
- if (h != null) {
- if (client.resource(h).get() == null) {
- LOGGER.debug("Creating {} {}/{}", h.getKind(), h.getMetadata().getNamespace(), h.getMetadata().getName());
- client.resource(h).create();
- } else {
- LOGGER.debug("Updating {} {}/{}", h.getKind(), h.getMetadata().getNamespace(), h.getMetadata().getName());
- client.resource(h).update();
- }
- }
- });
- }
- }
-
- public void create(String namespace, List resources, Function modifier) {
- resources.forEach(i -> {
- HasMetadata h = modifier.apply(i);
- if (h != null) {
- if (client.resource(h).inNamespace(namespace).get() == null) {
- LOGGER.debug("Creating {} {}/{}", h.getKind(), namespace, h.getMetadata().getName());
- client.resource(h).inNamespace(namespace).create();
- } else {
- LOGGER.debug("Updating {} {}/{}", h.getKind(), namespace, h.getMetadata().getName());
- client.resource(h).inNamespace(namespace).update();
- }
- }
- });
- }
-
- public void create(List resources, Function modifier) {
- resources.forEach(i -> {
- HasMetadata h = modifier.apply(i);
- if (h != null) {
- if (client.resource(h).get() == null) {
- LOGGER.debug("Creating {} {}/{}", h.getKind(), h.getMetadata().getNamespace(), h.getMetadata().getName());
- client.resource(h).create();
- } else {
- LOGGER.debug("Updating {} {}/{}", h.getKind(), h.getMetadata().getNamespace(), h.getMetadata().getName());
- client.resource(h).update();
- }
- }
- });
- }
-
- public void delete(List resources) {
- resources.forEach(h -> {
- if (h != null) {
- if (client.resource(h).get() != null) {
- LOGGER.debug("Deleting {} {}/{}", h.getKind(), h.getMetadata().getNamespace(), h.getMetadata().getName());
- client.resource(h).delete();
- }
- }
- });
- }
-
- public void delete(List resources, String namespace) {
- resources.forEach(h -> {
- if (h != null) {
- if (client.resource(h).inNamespace(namespace).get() != null) {
- LOGGER.debug("Deleting {} {}/{}", h.getKind(), namespace, h.getMetadata().getName());
- client.resource(h).inNamespace(namespace).delete();
- }
- }
- });
- }
-
- public List readResourcesFromYaml(InputStream is) throws IOException {
- try (is) {
- return client.load(is).items();
- }
- }
-
- /**
- * Gets namespace status
- */
- public boolean getNamespaceStatus(String namespaceName) {
- return client.namespaces().withName(namespaceName).isReady();
- }
-
- // ================================
- // ---------> CONFIG MAP <---------
- // ================================
- public ConfigMap getConfigMap(String namespaceName, String configMapName) {
- return client.configMaps().inNamespace(namespaceName).withName(configMapName).get();
- }
-
- public ConfigMap getConfigMap(String configMapName) {
- return getConfigMap(namespace, configMapName);
- }
-
-
- public boolean getConfigMapStatus(String namespace, String configMapName) {
- return client.configMaps().inNamespace(namespace).withName(configMapName).isReady();
- }
-
- // =========================
- // ---------> POD <---------
- // =========================
- public List listPods() {
- return client.pods().inNamespace(namespace).list().getItems();
- }
-
- public List listPods(String namespaceName) {
- return client.pods().inNamespace(namespaceName).list().getItems();
- }
-
- public List listPods(String namespaceName, LabelSelector selector) {
- return client.pods().inNamespace(namespaceName).withLabelSelector(selector).list().getItems();
- }
-
- /**
- * Returns list of pods by prefix in pod name
- *
- * @param namespaceName Namespace name
- * @param podNamePrefix prefix with which the name should begin
- * @return List of pods
- */
- public List listPodsByPrefixInName(String namespaceName, String podNamePrefix) {
- return listPods(namespaceName)
- .stream().filter(p -> p.getMetadata().getName().startsWith(podNamePrefix))
- .collect(Collectors.toList());
- }
-
- /**
- * Gets pod
- */
- public Pod getPod(String namespaceName, String name) {
- return client.pods().inNamespace(namespaceName).withName(name).get();
- }
-
- public Pod getPod(String name) {
- return getPod(namespace, name);
- }
-
- public String getLogsFromPod(String namespaceName, String podName) {
- return client.pods().inNamespace(namespaceName).withName(podName).getLog();
- }
-
- public String getLogsFromContainer(String namespaceName, String podName, String containerName) {
- return client.pods().inNamespace(namespaceName).withName(podName).inContainer(containerName).getLog();
- }
-
- // ==================================
- // ---------> STATEFUL SET <---------
- // ==================================
-
- /**
- * Gets stateful set
- */
- public StatefulSet getStatefulSet(String namespaceName, String statefulSetName) {
- return client.apps().statefulSets().inNamespace(namespaceName).withName(statefulSetName).get();
- }
-
- public StatefulSet getStatefulSet(String statefulSetName) {
- return getStatefulSet(namespace, statefulSetName);
- }
-
- /**
- * Gets stateful set
- */
- public RollableScalableResource statefulSet(String namespaceName, String statefulSetName) {
- return client.apps().statefulSets().inNamespace(namespaceName).withName(statefulSetName);
- }
-
- public RollableScalableResource statefulSet(String statefulSetName) {
- return statefulSet(namespace, statefulSetName);
- }
- // ================================
- // ---------> DEPLOYMENT <---------
- // ================================
-
- /**
- * Gets deployment
- */
-
- public Deployment getDeployment(String namespaceName, String deploymentName) {
- return client.apps().deployments().inNamespace(namespaceName).withName(deploymentName).get();
- }
-
- public Deployment getDeployment(String deploymentName) {
- return client.apps().deployments().inNamespace(namespace).withName(deploymentName).get();
- }
-
- public Deployment getDeploymentFromAnyNamespaces(String deploymentName) {
- return client.apps().deployments().inAnyNamespace().list().getItems().stream().filter(
- deployment -> deployment.getMetadata().getName().equals(deploymentName))
- .findFirst()
- .orElseThrow();
- }
-
- /**
- * Gets deployment status
- */
- public LabelSelector getDeploymentSelectors(String namespaceName, String deploymentName) {
- return client.apps().deployments().inNamespace(namespaceName).withName(deploymentName).get().getSpec().getSelector();
- }
-
- // ==========================
- // ---------> NODE <---------
- // ==========================
-
- public String getNodeAddress() {
- return listNodes().get(0).getStatus().getAddresses().get(0).getAddress();
- }
-
- public List listNodes() {
- return client.nodes().list().getItems();
- }
-
- public List listWorkerNodes() {
- return listNodes().stream().filter(node -> node.getMetadata().getLabels().containsKey("node-role.kubernetes.io/worker")).collect(Collectors.toList());
- }
-
- public List listMasterNodes() {
- return listNodes().stream().filter(node -> node.getMetadata().getLabels().containsKey("node-role.kubernetes.io/master")).collect(Collectors.toList());
- }
-
- // =========================
- // ---------> JOB <---------
- // =========================
-
- public boolean jobExists(String jobName) {
- return client.batch().v1().jobs().inNamespace(namespace).list().getItems().stream().anyMatch(j -> j.getMetadata().getName().startsWith(jobName));
- }
-
- public Job getJob(String jobName) {
- return client.batch().v1().jobs().inNamespace(namespace).withName(jobName).get();
- }
-
- public boolean checkSucceededJobStatus(String namespace, String jobName) {
- return checkSucceededJobStatus(namespace, jobName, 1);
- }
-
- public boolean checkSucceededJobStatus(String namespaceName, String jobName, int expectedSucceededPods) {
- return getJobStatus(namespaceName, jobName).getSucceeded().equals(expectedSucceededPods);
- }
-
- public boolean checkFailedJobStatus(String namespaceName, String jobName, int expectedFailedPods) {
- return getJobStatus(namespaceName, jobName).getFailed().equals(expectedFailedPods);
- }
-
- // Pods Statuses: 0 Running / 0 Succeeded / 1 Failed
- public JobStatus getJobStatus(String namespaceName, String jobName) {
- return client.batch().v1().jobs().inNamespace(namespaceName).withName(jobName).get().getStatus();
- }
-
- public JobStatus getJobStatus(String jobName) {
- return getJobStatus(namespace, jobName);
- }
-
- public JobList getJobList() {
- return client.batch().v1().jobs().inNamespace(namespace).list();
- }
-
- public List listJobs(String namespace, String namePrefix) {
- return client.batch().v1().jobs().inNamespace(namespace).list().getItems().stream()
- .filter(job -> job.getMetadata().getName().startsWith(namePrefix)).collect(Collectors.toList());
- }
-
- public String getDeploymentNameByPrefix(String namespace, String namePrefix) {
- List prefixDeployments = client.apps().deployments().inNamespace(namespace).list().getItems().stream().filter(
- rs -> rs.getMetadata().getName().startsWith(namePrefix)).toList();
-
- if (!prefixDeployments.isEmpty()) {
- return prefixDeployments.get(0).getMetadata().getName();
- } else {
- return null;
- }
- }
-
- public InstallPlan getInstallPlan(String namespaceName, String installPlanName) {
- return client.adapt(OpenShiftClient.class).operatorHub().installPlans().inNamespace(namespaceName).withName(installPlanName).get();
- }
-
- public void approveInstallPlan(String namespaceName, String installPlanName) throws InterruptedException {
- LOGGER.debug("Approving InstallPlan {}", installPlanName);
- TestUtils.waitFor("InstallPlan approval", TestConstants.GLOBAL_POLL_INTERVAL_SHORT, 15_000, () -> {
- try {
- InstallPlan installPlan = new InstallPlanBuilder(this.getInstallPlan(namespaceName, installPlanName))
- .editSpec()
- .withApproved()
- .endSpec()
- .build();
-
- client.adapt(OpenShiftClient.class).operatorHub().installPlans().inNamespace(namespaceName).withName(installPlanName).patch(installPlan);
- return true;
- } catch (Exception ex) {
- LOGGER.error(String.valueOf(ex));
- return false;
- }
- });
- }
-
- public InstallPlan getNonApprovedInstallPlan(String namespaceName, String csvPrefix) {
- return client.adapt(OpenShiftClient.class).operatorHub().installPlans()
- .inNamespace(namespaceName).list().getItems().stream()
- .filter(installPlan -> !installPlan.getSpec().getApproved() && installPlan.getSpec().getClusterServiceVersionNames().toString().contains(csvPrefix))
- .findFirst().get();
- }
-
- public MixedOperation, Resource> dashboardConfigClient() {
- return client.resources(OdhDashboardConfig.class);
- }
-}
diff --git a/src/main/java/io/odh/test/platform/KubeClusterException.java b/src/main/java/io/odh/test/platform/KubeClusterException.java
deleted file mode 100644
index e7d89c1a..00000000
--- a/src/main/java/io/odh/test/platform/KubeClusterException.java
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform;
-
-
-import io.odh.test.platform.executor.ExecResult;
-
-public class KubeClusterException extends RuntimeException {
- public final ExecResult result;
-
- public KubeClusterException(ExecResult result, String s) {
- super(s);
- this.result = result;
- }
-
- public KubeClusterException(Throwable cause) {
- super(cause);
- this.result = null;
- }
-
- public static class NotFound extends KubeClusterException {
-
- public NotFound(ExecResult result, String s) {
- super(result, s);
- }
- }
-
- public static class AlreadyExists extends KubeClusterException {
-
- public AlreadyExists(ExecResult result, String s) {
- super(result, s);
- }
- }
-
- public static class InvalidResource extends KubeClusterException {
-
- public InvalidResource(ExecResult result, String s) {
- super(result, s);
- }
- }
-}
diff --git a/src/main/java/io/odh/test/platform/KubeUtils.java b/src/main/java/io/odh/test/platform/KubeUtils.java
deleted file mode 100644
index 8d0bd151..00000000
--- a/src/main/java/io/odh/test/platform/KubeUtils.java
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform;
-
-import io.fabric8.kubernetes.api.model.EndpointSubset;
-import io.fabric8.kubernetes.api.model.Endpoints;
-import io.fabric8.kubernetes.client.KubernetesClientException;
-import io.fabric8.kubernetes.client.dsl.Resource;
-import io.fabric8.openshift.api.model.operatorhub.v1alpha1.InstallPlan;
-import io.odh.test.TestConstants;
-import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.util.List;
-import java.util.NoSuchElementException;
-
-public class KubeUtils {
-
- static final Logger LOGGER = LoggerFactory.getLogger(KubeUtils.class);
-
- public static io.opendatahub.datasciencecluster.v1.datascienceclusterstatus.Conditions getDscConditionByType(List conditions, String type) {
- return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
- }
-
- public static org.kubeflow.v1.notebookstatus.Conditions getNotebookConditionByType(List conditions, String type) {
- return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
- }
-
- public static io.kserve.serving.v1beta1.inferenceservicestatus.Conditions getInferenceServiceConditionByType(List conditions, String type) {
- return conditions.stream().filter(c -> c.getType().equals(type)).findFirst().orElseGet(null);
- }
-
- public static void clearOdhRemainingResources() {
- ResourceManager.getKubeClient().getClient().apiextensions().v1().customResourceDefinitions().list().getItems()
- .stream().filter(crd -> crd.getMetadata().getName().contains("opendatahub.io")).toList()
- .forEach(crd -> {
- LOGGER.info("Deleting CRD {}", crd.getMetadata().getName());
- ResourceManager.getKubeClient().getClient().resource(crd).delete();
- });
- ResourceManager.getKubeClient().getClient().namespaces().withName("opendatahub").delete();
- }
-
- /**
- * TODO - this should be removed when https://github.com/opendatahub-io/opendatahub-operator/issues/765 will be resolved
- */
- public static void deleteDefaultDSCI() {
- LOGGER.info("Clearing DSCI ...");
- ResourceManager.getKubeCmdClient().exec(false, true, Long.valueOf(TestConstants.GLOBAL_TIMEOUT).intValue(), "delete", "dsci", "--all");
- }
-
- public static void waitForInstallPlan(String namespace, String csvName) {
- TestUtils.waitFor(String.format("Install plan with new version: %s:%s", namespace, csvName),
- TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
- try {
- InstallPlan ip = ResourceManager.getKubeClient().getNonApprovedInstallPlan(namespace, csvName);
- LOGGER.debug("Found InstallPlan {} - {}", ip.getMetadata().getName(), ip.getSpec().getClusterServiceVersionNames());
- return true;
- } catch (NoSuchElementException ex) {
- LOGGER.debug("No new install plan available. Checking again ...");
- return false;
- }
- }, () -> { });
- }
-
- private KubeUtils() {
- }
-
- public static void waitForEndpoints(String name, Resource endpoints) {
- TestUtils.waitFor("%s service endpoints to come up".formatted(name), TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
- try {
- Endpoints endpointset = endpoints.get();
- if (endpointset == null) {
- return false;
- }
- List subsets = endpointset.getSubsets();
- if (subsets.isEmpty()) {
- return false;
- }
- for (EndpointSubset subset : subsets) {
- return !subset.getAddresses().isEmpty();
- }
- } catch (KubernetesClientException e) {
- if (e.getCode() == 404) {
- return false;
- }
- throw e;
- }
- return false;
- });
- }
-}
diff --git a/src/main/java/io/odh/test/platform/RayClient.java b/src/main/java/io/odh/test/platform/RayClient.java
index c01758be..53821131 100644
--- a/src/main/java/io/odh/test/platform/RayClient.java
+++ b/src/main/java/io/odh/test/platform/RayClient.java
@@ -10,7 +10,7 @@
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.PropertyNamingStrategies;
import io.odh.test.TestConstants;
-import io.odh.test.TestUtils;
+import io.skodjob.testframe.wait.Wait;
import lombok.SneakyThrows;
import java.io.IOException;
@@ -67,7 +67,7 @@ public void waitForJob(String jobId) {
.GET()
.build();
- TestUtils.waitFor("ray job to finish executing", TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
+ Wait.until("ray job to finish executing", TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT, () -> {
HttpResponse result;
try {
result = httpClient.send(request, HttpResponse.BodyHandlers.ofString());
diff --git a/src/main/java/io/odh/test/platform/cmdClient/BaseCmdKubeClient.java b/src/main/java/io/odh/test/platform/cmdClient/BaseCmdKubeClient.java
deleted file mode 100644
index 9dd9ee4b..00000000
--- a/src/main/java/io/odh/test/platform/cmdClient/BaseCmdKubeClient.java
+++ /dev/null
@@ -1,406 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform.cmdClient;
-
-import io.odh.test.platform.KubeClusterException;
-import io.odh.test.platform.executor.Exec;
-import io.odh.test.platform.executor.ExecResult;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.nio.file.NoSuchFileException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Comparator;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Consumer;
-import java.util.stream.Collectors;
-
-import static java.lang.String.join;
-import static java.util.Arrays.asList;
-
-public abstract class BaseCmdKubeClient> implements KubeCmdClient {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(BaseCmdKubeClient.class);
-
- private static final String CREATE = "create";
- private static final String APPLY = "apply";
- private static final String DELETE = "delete";
- private static final String REPLACE = "replace";
- private static final String PROCESS = "process";
-
- public static final String STATEFUL_SET = "statefulset";
- public static final String CM = "cm";
-
- protected String config;
-
- String namespace = defaultNamespace();
-
- protected BaseCmdKubeClient(String config) {
- this.config = config;
- }
-
- @Override
- public abstract String cmd();
-
- @Override
- @SuppressWarnings("unchecked")
- public K deleteByName(String resourceType, String resourceName) {
- Exec.exec(namespacedCommand(DELETE, resourceType, resourceName));
- return (K) this;
- }
-
- protected static class Context implements AutoCloseable {
- @Override
- public void close() {
- }
- }
-
- private static final Context NOOP = new Context();
-
- protected Context defaultContext() {
- return NOOP;
- }
-
- // Admin context is not implemented now, because it's not needed
- // In case it will be needed in the future, we should change the kubeconfig and apply it for both oc and kubectl
- protected Context adminContext() {
- return defaultContext();
- }
-
- protected List namespacedCommand(String... rest) {
- return command(asList(rest), true);
- }
-
- @Override
- public String get(String resource, String resourceName) {
- return Exec.exec(namespacedCommand("get", resource, resourceName, "-o", "yaml")).out();
- }
-
- @Override
- public String getEvents() {
- return Exec.exec(namespacedCommand("get", "events")).out();
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K create(File... files) {
- try (Context context = defaultContext()) {
- Map execResults = execRecursive(CREATE, files, Comparator.comparing(File::getName).reversed());
- for (Map.Entry entry : execResults.entrySet()) {
- if (!entry.getValue().exitStatus()) {
- LOGGER.warn("Failed to create {}!", entry.getKey().getAbsolutePath());
- LOGGER.debug(entry.getValue().err());
- }
- }
- return (K) this;
- }
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K apply(File... files) {
- try (Context context = defaultContext()) {
- Map execResults = execRecursive(APPLY, files, Comparator.comparing(File::getName).reversed());
- for (Map.Entry entry : execResults.entrySet()) {
- if (!entry.getValue().exitStatus()) {
- LOGGER.warn("Failed to apply {}!", entry.getKey().getAbsolutePath());
- LOGGER.debug(entry.getValue().err());
- }
- }
- return (K) this;
- }
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K delete(File... files) {
- try (Context context = defaultContext()) {
- Map execResults = execRecursive(DELETE, files, Comparator.comparing(File::getName).reversed());
- for (Map.Entry entry : execResults.entrySet()) {
- if (!entry.getValue().exitStatus()) {
- LOGGER.warn("Failed to delete {}!", entry.getKey().getAbsolutePath());
- LOGGER.debug(entry.getValue().err());
- }
- }
- return (K) this;
- }
- }
-
- private Map execRecursive(String subcommand, File[] files, Comparator cmp) {
- Map execResults = new HashMap<>(25);
- for (File f : files) {
- if (f.isFile()) {
- if (f.getName().endsWith(".yaml")) {
- execResults.put(f, Exec.exec(null, namespacedCommand(subcommand, "-f", f.getAbsolutePath()), 0, false, false));
- }
- } else if (f.isDirectory()) {
- File[] children = f.listFiles();
- if (children != null) {
- Arrays.sort(children, cmp);
- execResults.putAll(execRecursive(subcommand, children, cmp));
- }
- } else if (!f.exists()) {
- throw new RuntimeException(new NoSuchFileException(f.getPath()));
- }
- }
- return execResults;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K replace(File... files) {
- try (Context context = defaultContext()) {
- Map execResults = execRecursive(REPLACE, files, Comparator.comparing(File::getName));
- for (Map.Entry entry : execResults.entrySet()) {
- if (!entry.getValue().exitStatus()) {
- LOGGER.warn("Failed to replace {}!", entry.getKey().getAbsolutePath());
- LOGGER.debug(entry.getValue().err());
- }
- }
- return (K) this;
- }
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K applyContentInNamespace(String yamlContent) {
- try (Context context = defaultContext()) {
- Exec.exec(yamlContent, namespacedCommand(APPLY, "-f", "-"));
- return (K) this;
- }
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K deleteContentInNamespace(String yamlContent) {
- try (Context context = defaultContext()) {
- Exec.exec(yamlContent, namespacedCommand(DELETE, "-f", "-"), 0, true, false);
- return (K) this;
- }
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K applyContent(String yamlContent) {
- try (Context context = defaultContext()) {
- Exec.exec(yamlContent, command(Arrays.asList(APPLY, "-f", "-"), false), 0, true, true);
- return (K) this;
- }
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K deleteContent(String yamlContent) {
- try (Context context = defaultContext()) {
- Exec.exec(yamlContent, command(Arrays.asList(DELETE, "-f", "-"), false), 0, true, false);
- return (K) this;
- }
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K createNamespace(String name) {
- try (Context context = adminContext()) {
- Exec.exec(namespacedCommand(CREATE, "namespace", name));
- }
- return (K) this;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K deleteNamespace(String name) {
- try (Context context = adminContext()) {
- Exec.exec(null, namespacedCommand(DELETE, "namespace", name), 0, true, false);
- }
- return (K) this;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K scaleByName(String kind, String name, int replicas) {
- try (Context context = defaultContext()) {
- Exec.exec(null, namespacedCommand("scale", kind, name, "--replicas", Integer.toString(replicas)));
- return (K) this;
- }
- }
-
- @Override
- public ExecResult execInPod(String pod, String... command) {
- List cmd = namespacedCommand("exec", pod, "--");
- cmd.addAll(asList(command));
- return Exec.exec(cmd);
- }
-
- @Override
- public ExecResult execInPodContainer(String pod, String container, String... command) {
- return execInPodContainer(true, pod, container, command);
- }
-
- @Override
- public ExecResult execInPodContainer(boolean logToOutput, String pod, String container, String... command) {
- List cmd = namespacedCommand("exec", pod, "-c", container, "--");
- cmd.addAll(asList(command));
- return Exec.exec(null, cmd, 0, logToOutput);
- }
-
- @Override
- public ExecResult exec(String... command) {
- return exec(true, command);
- }
-
- @Override
- public ExecResult exec(boolean throwError, String... command) {
- return exec(throwError, true, command);
- }
-
- @Override
- public ExecResult exec(boolean throwError, boolean logToOutput, String... command) {
- List cmd = command(asList(command), false);
- return Exec.exec(null, cmd, 0, logToOutput, throwError);
- }
-
- @Override
- public ExecResult exec(boolean throwError, boolean logToOutput, int timeout, String... command) {
- List cmd = command(asList(command), false);
- return Exec.exec(null, cmd, timeout, logToOutput, throwError);
- }
-
- @Override
- public ExecResult execInCurrentNamespace(String... commands) {
- return Exec.exec(namespacedCommand(commands));
- }
-
- @Override
- public ExecResult execInCurrentNamespace(boolean logToOutput, String... commands) {
- return Exec.exec(null, namespacedCommand(commands), 0, logToOutput);
- }
-
- enum ExType {
- BREAK,
- CONTINUE,
- THROW
- }
-
- @Override
- public String toString() {
- return cmd();
- }
-
- @Override
- public List list(String resourceType) {
- return Arrays.stream(Exec.exec(namespacedCommand("get", resourceType, "-o", "jsonpath={range .items[*]}{.metadata.name} ")).out().trim().split(" +"))
- .filter(s -> !s.trim().isEmpty()).collect(Collectors.toList());
- }
-
- @Override
- public String getResourceAsJson(String resourceType, String resourceName) {
- return Exec.exec(namespacedCommand("get", resourceType, resourceName, "-o", "json")).out();
- }
-
- @Override
- public String getResourceAsYaml(String resourceType, String resourceName) {
- return Exec.exec(namespacedCommand("get", resourceType, resourceName, "-o", "yaml")).out();
- }
-
- @Override
- public String getResourcesAsYaml(String resourceType) {
- return Exec.exec(namespacedCommand("get", resourceType, "-o", "yaml")).out();
- }
-
- @Override
- public void createResourceAndApply(String template, Map params) {
- List cmd = namespacedCommand("process", template, "-l", "app=" + template, "-o", "yaml");
- for (Map.Entry entry : params.entrySet()) {
- cmd.add("-p");
- cmd.add(entry.getKey() + "=" + entry.getValue());
- }
-
- String yaml = Exec.exec(cmd).out();
- applyContentInNamespace(yaml);
- }
-
- @Override
- public String describe(String resourceType, String resourceName) {
- return Exec.exec(namespacedCommand("describe", resourceType, resourceName)).out();
- }
-
- @Override
- public String logs(String pod, String container) {
- String[] args;
- if (container != null) {
- args = new String[]{"logs", pod, "-c", container};
- } else {
- args = new String[]{"logs", pod};
- }
- return Exec.exec(namespacedCommand(args)).out();
- }
-
- @Override
- public String searchInLog(String resourceType, String resourceName, long sinceSeconds, String... grepPattern) {
- try {
- return Exec.exec("bash", "-c", join(" ", namespacedCommand("logs", resourceType + "/" + resourceName, "--since=" + sinceSeconds + "s",
- "|", "grep", " -e " + join(" -e ", grepPattern), "-B", "1"))).out();
- } catch (KubeClusterException e) {
- if (e.result != null && e.result.returnCode() == 1) {
- LOGGER.info("{} not found", Arrays.stream(grepPattern).toList());
- } else {
- LOGGER.error("Caught exception while searching {} in logs", Arrays.stream(grepPattern).toList());
- }
- }
- return "";
- }
-
- @Override
- public String searchInLog(String resourceType, String resourceName, String resourceContainer, long sinceSeconds, String... grepPattern) {
- try {
- return Exec.exec("bash", "-c", join(" ", namespacedCommand("logs", resourceType + "/" + resourceName, "-c " + resourceContainer, "--since=" + sinceSeconds + "s",
- "|", "grep", " -e " + join(" -e ", grepPattern), "-B", "1"))).out();
- } catch (KubeClusterException e) {
- if (e.result != null && e.result.exitStatus()) {
- LOGGER.info("{} not found", Arrays.stream(grepPattern).toList());
- } else {
- LOGGER.error("Caught exception while searching {} in logs", Arrays.stream(grepPattern).toList());
- }
- }
- return "";
- }
-
- @Override
- public List listResourcesByLabel(String resourceType, String label) {
- return asList(Exec.exec(namespacedCommand("get", resourceType, "-l", label, "-o", "jsonpath={range .items[*]}{.metadata.name} ")).out().split("\\s+"));
- }
-
- private List command(List rest, boolean namespaced) {
- List result = new ArrayList<>();
- result.add(cmd());
- if (config != null) {
- result.add("--kubeconfig");
- result.add(config);
- }
- if (namespaced) {
- result.add("--namespace");
- result.add(namespace);
- }
- result.addAll(rest);
- return result;
- }
-
- @Override
- @SuppressWarnings("unchecked")
- public K process(Map parameters, String file, Consumer c) {
- List command = command(asList(PROCESS, "-f", file), false);
- command.addAll(parameters.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).toList());
- ExecResult exec = Exec.builder()
- .throwErrors(true)
- .withCommand(command)
- .exec();
- c.accept(exec.out());
- return (K) this;
- }
-}
diff --git a/src/main/java/io/odh/test/platform/cmdClient/KubeCmdClient.java b/src/main/java/io/odh/test/platform/cmdClient/KubeCmdClient.java
deleted file mode 100644
index 6cc552e9..00000000
--- a/src/main/java/io/odh/test/platform/cmdClient/KubeCmdClient.java
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform.cmdClient;
-
-
-import io.odh.test.platform.executor.ExecResult;
-
-import java.io.File;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Consumer;
-
-import static java.util.Arrays.asList;
-import static java.util.stream.Collectors.toList;
-
-/**
- * Abstraction for a kubernetes client.
- *
- * @param The subtype of KubeClient, for fluency.
- */
-public interface KubeCmdClient> {
-
- String defaultNamespace();
-
- String defaultOlmNamespace();
-
- /**
- * Deletes the resources by resource name.
- */
- K deleteByName(String resourceType, String resourceName);
-
- KubeCmdClient namespace(String namespace);
-
- /**
- * Returns namespace for cluster
- */
- String namespace();
-
- /**
- * Creates the resources in the given files.
- */
- K create(File... files);
-
- /**
- * Creates the resources in the given files.
- */
- K apply(File... files);
-
- /**
- * Deletes the resources in the given files.
- */
- K delete(File... files);
-
- default K create(String... files) {
- return create(asList(files).stream().map(File::new).collect(toList()).toArray(new File[0]));
- }
-
- default K apply(String... files) {
- return apply(asList(files).stream().map(File::new).collect(toList()).toArray(new File[0]));
- }
-
- default K delete(String... files) {
- return delete(asList(files).stream().map(File::new).collect(toList()).toArray(new File[0]));
- }
-
- /**
- * Replaces the resources in the given files.
- */
- K replace(File... files);
-
- K applyContentInNamespace(String yamlContent);
-
- K deleteContentInNamespace(String yamlContent);
-
- K applyContent(String yamlContent);
-
- K deleteContent(String yamlContent);
-
- K createNamespace(String name);
-
- K deleteNamespace(String name);
-
- /**
- * Scale resource using the scale subresource
- *
- * @param kind Kind of the resource which should be scaled
- * @param name Name of the resource which should be scaled
- * @param replicas Number of replicas to which the resource should be scaled
- * @return This kube client
- */
- K scaleByName(String kind, String name, int replicas);
-
- /**
- * Execute the given {@code command} in the given {@code pod}.
- *
- * @param pod The pod
- * @param command The command
- * @return The process result.
- */
- ExecResult execInPod(String pod, String... command);
-
- ExecResult execInCurrentNamespace(String... commands);
-
- ExecResult execInCurrentNamespace(boolean logToOutput, String... commands);
-
- /**
- * Execute the given {@code command} in the given {@code container} which is deployed in {@code pod}.
- *
- * @param pod The pod
- * @param container The container
- * @param command The command
- * @return The process result.
- */
- ExecResult execInPodContainer(String pod, String container, String... command);
-
- ExecResult execInPodContainer(boolean logToOutput, String pod, String container, String... command);
-
- /**
- * Execute the given {@code command}.
- *
- * @param command The command
- * @return The process result.
- */
- ExecResult exec(String... command);
-
- /**
- * Execute the given {@code command}. You can specify if potential failure will thrown the exception or not.
- *
- * @param throwError parameter which control thrown exception in case of failure
- * @param command The command
- * @return The process result.
- */
- ExecResult exec(boolean throwError, String... command);
-
- /**
- * Execute the given {@code command}. You can specify if potential failure will thrown the exception or not.
- *
- * @param throwError parameter which control thrown exception in case of failure
- * @param command The command
- * @param logToOutput determines if we want to print whole output of command
- * @return The process result.
- */
- ExecResult exec(boolean throwError, boolean logToOutput, String... command);
-
- /**
- * Execute the given {@code command}. You can specify if potential failure will thrown the exception or not.
- *
- * @param throwError parameter which control thrown exception in case of failure
- * @param command The command
- * @param timeout tiemout in ms
- * @param logToOutput determines if we want to print whole output of command
- * @return The process result.
- */
- ExecResult exec(boolean throwError, boolean logToOutput, int timeout, String... command);
-
- /**
- * Get the content of the given {@code resource} with the given {@code name} as YAML.
- *
- * @param resource The type of resource (e.g. "cm").
- * @param resourceName The name of the resource.
- * @return The resource YAML.
- */
- String get(String resource, String resourceName);
-
- /**
- * Get a list of events in a given namespace
- *
- * @return List of events
- */
- String getEvents();
-
- List list(String resourceType);
-
- String getResourceAsYaml(String resourceType, String resourceName);
-
- String getResourcesAsYaml(String resourceType);
-
- void createResourceAndApply(String template, Map params);
-
- String describe(String resourceType, String resourceName);
-
- default String logs(String pod) {
- return logs(pod, null);
- }
-
- String logs(String pod, String container);
-
- /**
- * @param resourceType The type of resource
- * @param resourceName The name of resource
- * @param sinceSeconds Return logs newer than a relative duration like 5s, 2m, or 3h.
- * @param grepPattern Grep patterns for search
- * @return Grep result as string
- */
- String searchInLog(String resourceType, String resourceName, long sinceSeconds, String... grepPattern);
-
- /**
- * @param resourceType The type of resource
- * @param resourceName The name of resource
- * @param resourceContainer The name of resource container
- * @param sinceSeconds Return logs newer than a relative duration like 5s, 2m, or 3h.
- * @param grepPattern Grep patterns for search
- * @return Grep result as string
- */
- String searchInLog(String resourceType, String resourceName, String resourceContainer, long sinceSeconds, String... grepPattern);
-
- String getResourceAsJson(String resourceType, String resourceName);
-
- List listResourcesByLabel(String resourceType, String label);
-
- String cmd();
-
- K process(Map domain, String file, Consumer c);
-
- String getUsername();
-}
diff --git a/src/main/java/io/odh/test/platform/cmdClient/Kubectl.java b/src/main/java/io/odh/test/platform/cmdClient/Kubectl.java
deleted file mode 100644
index 2f75e480..00000000
--- a/src/main/java/io/odh/test/platform/cmdClient/Kubectl.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform.cmdClient;
-
-/**
- * A {@link KubeCmdClient} wrapping {@code kubectl}.
- */
-public class Kubectl extends BaseCmdKubeClient {
-
- public static final String KUBECTL = "kubectl";
-
- public Kubectl() {
- this(null);
- }
-
- public Kubectl(String config) {
- super(config);
- }
-
- private Kubectl(String futureNamespace, String config) {
- super(config);
- namespace = futureNamespace;
- }
-
- @Override
- public Kubectl namespace(String namespace) {
- return new Kubectl(namespace, config);
- }
-
- @Override
- public String namespace() {
- return namespace;
- }
-
- @Override
- public String defaultNamespace() {
- return "default";
- }
-
- @Override
- public String defaultOlmNamespace() {
- return "operators";
- }
-
- @Override
- public String cmd() {
- return KUBECTL;
- }
-
- @Override
- public String getUsername() {
- // TODO - implement this!
- return null;
- }
-}
diff --git a/src/main/java/io/odh/test/platform/cmdClient/Oc.java b/src/main/java/io/odh/test/platform/cmdClient/Oc.java
deleted file mode 100644
index 593edad1..00000000
--- a/src/main/java/io/odh/test/platform/cmdClient/Oc.java
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform.cmdClient;
-
-import io.odh.test.platform.executor.Exec;
-
-import java.util.List;
-import java.util.Map;
-
-/**
- * A {@link KubeCmdClient} implementation wrapping {@code oc}.
- */
-public class Oc extends BaseCmdKubeClient {
-
- private static final String OC = "oc";
-
- public Oc() {
- this(null);
- }
-
- public Oc(String config) {
- super(config);
- }
-
- private Oc(String futureNamespace, String config) {
- super(config);
- namespace = futureNamespace;
- }
-
- @Override
- public String defaultNamespace() {
- return "myproject";
- }
-
- @Override
- public String defaultOlmNamespace() {
- return "openshift-marketplace";
- }
-
- @Override
- public Oc namespace(String namespace) {
- return new Oc(namespace, config);
- }
-
- @Override
- public String namespace() {
- return namespace;
- }
-
- @Override
- public Oc createNamespace(String name) {
- try (Context context = defaultContext()) {
- Exec.exec(cmd(), "new-project", name);
- }
- return this;
- }
-
- public Oc newApp(String template, Map params) {
- List cmd = namespacedCommand("new-app", template);
- for (Map.Entry entry : params.entrySet()) {
- cmd.add("-p");
- cmd.add(entry.getKey() + "=" + entry.getValue());
- }
-
- Exec.exec(cmd);
- return this;
- }
-
- @Override
- public String cmd() {
- return OC;
- }
-
- @Override
- public String getUsername() {
- return Exec.exec(cmd(), "whoami").out();
- }
-}
diff --git a/src/main/java/io/odh/test/platform/executor/Exec.java b/src/main/java/io/odh/test/platform/executor/Exec.java
deleted file mode 100644
index 04d6bd5e..00000000
--- a/src/main/java/io/odh/test/platform/executor/Exec.java
+++ /dev/null
@@ -1,424 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform.executor;
-
-import io.fabric8.kubernetes.api.model.EnvVar;
-import io.odh.test.platform.KubeClusterException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
-import java.nio.charset.Charset;
-import java.nio.charset.StandardCharsets;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
-import java.util.List;
-import java.util.Scanner;
-import java.util.Set;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.CompletionException;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import static java.lang.String.join;
-
-public class Exec {
-
- private static final Logger LOGGER = LoggerFactory.getLogger(Exec.class);
-
- private static final Pattern ERROR_PATTERN = Pattern.compile("Error from server \\(([a-zA-Z0-9]+)\\):");
- private static final Pattern INVALID_PATTERN = Pattern.compile("The ([a-zA-Z0-9]+) \"([a-z0-9.-]+)\" is invalid:");
- private static final Pattern PATH_SPLITTER = Pattern.compile(System.getProperty("path.separator"));
- private static final int MAXIMUM_EXEC_LOG_CHARACTER_SIZE = 2000;
- private static final Object LOCK = new Object();
-
- public Process process;
- private String stdOut;
- private String stdErr;
- private StreamGobbler stdOutReader;
- private StreamGobbler stdErrReader;
- private Path logPath;
- private final boolean appendLineSeparator;
-
- public Exec() {
- this.appendLineSeparator = true;
- }
-
- public Exec(Path logPath) {
- this.appendLineSeparator = true;
- this.logPath = logPath;
- }
-
- public Exec(boolean appendLineSeparator) {
- this.appendLineSeparator = appendLineSeparator;
- }
-
- public static ExecBuilder builder() {
- return new ExecBuilder();
- }
-
-
- /**
- * Getter for stdOutput
- *
- * @return string stdOut
- */
- public String out() {
- return stdOut;
- }
-
- /**
- * Getter for stdErrorOutput
- *
- * @return string stdErr
- */
- public String err() {
- return stdErr;
- }
-
- public boolean isRunning() {
- return process.isAlive();
- }
-
- public int getRetCode() {
- LOGGER.info("Process: {}", process);
- if (isRunning()) {
- return -1;
- } else {
- return process.exitValue();
- }
- }
-
- /**
- * Method executes external command
- *
- * @param command arguments for command
- * @return execution results
- */
- public static ExecResult exec(String... command) {
- return exec(Arrays.asList(command));
- }
-
- /**
- * Method executes external command
- *
- * @param command arguments for command
- * @return execution results
- */
- public static ExecResult exec(boolean logToOutput, String... command) {
- List commands = new ArrayList<>(Arrays.asList(command));
- return exec(null, commands, 0, logToOutput);
- }
-
- /**
- * Method executes external command
- *
- * @param command arguments for command
- * @return execution results
- */
- public static ExecResult exec(List command) {
- return exec(null, command, 0, false);
- }
-
- /**
- * Method executes external command
- *
- * @param command arguments for command
- * @return execution results
- */
- public static ExecResult exec(String input, List command) {
- return exec(input, command, 0, false);
- }
-
- /**
- * Method executes external command
- *
- * @param command arguments for command
- * @param timeout timeout for execution
- * @param logToOutput log output or not
- * @return execution results
- */
- public static ExecResult exec(String input, List command, int timeout, boolean logToOutput) {
- return exec(input, command, Collections.emptySet(), timeout, logToOutput, true);
- }
-
- /**
- * @param input input
- * @param command command
- * @param timeout timeout for command
- * @param logToOutput log to output
- * @param throwErrors throw error if exec fail
- * @return results
- */
- public static ExecResult exec(String input, List command, int timeout, boolean logToOutput, boolean throwErrors) {
- return exec(input, command, Collections.emptySet(), timeout, logToOutput, throwErrors);
- }
-
-
- /**
- * Method executes external command
- *
- * @param command arguments for command
- * @param envVars
- * @param timeout timeout for execution
- * @param logToOutput log output or not
- * @param throwErrors look for errors in output and throws exception if true
- * @return execution results
- */
- public static ExecResult exec(String input, List command, Set envVars, int timeout, boolean logToOutput, boolean throwErrors) {
- int ret = 1;
- ExecResult execResult;
- try {
- Exec executor = new Exec();
- LOGGER.info("Command: {}", String.join(" ", command));
- ret = executor.execute(input, command, envVars, timeout);
- synchronized (LOCK) {
- if (logToOutput) {
- LOGGER.info("RETURN code: {}", ret);
- if (!executor.out().isEmpty()) {
- LOGGER.info("======STDOUT START=======");
- LOGGER.info("{}", cutExecutorLog(executor.out()));
- LOGGER.info("======STDOUT END======");
- }
- if (!executor.err().isEmpty()) {
- LOGGER.info("======STDERR START=======");
- LOGGER.info("{}", cutExecutorLog(executor.err()));
- LOGGER.info("======STDERR END======");
- }
- }
- }
-
- execResult = new ExecResult(ret, executor.out(), executor.err());
-
- if (throwErrors && ret != 0) {
- String msg = "`" + join(" ", command) + "` got status code " + ret + " and stderr:\n------\n" + executor.stdErr + "\n------\nand stdout:\n------\n" + executor.stdOut + "\n------";
-
- Matcher matcher = ERROR_PATTERN.matcher(executor.err());
- KubeClusterException t = null;
-
- if (matcher.find()) {
- switch (matcher.group(1)) {
- case "NotFound":
- t = new KubeClusterException.NotFound(execResult, msg);
- break;
- case "AlreadyExists":
- t = new KubeClusterException.AlreadyExists(execResult, msg);
- break;
- default:
- break;
- }
- }
- matcher = INVALID_PATTERN.matcher(executor.err());
- if (matcher.find()) {
- t = new KubeClusterException.InvalidResource(execResult, msg);
- }
- if (t == null) {
- t = new KubeClusterException(execResult, msg);
- }
- throw t;
- }
- return new ExecResult(ret, executor.out(), executor.err());
-
- } catch (IOException | ExecutionException e) {
- throw new KubeClusterException(e);
- } catch (InterruptedException e) {
- Thread.currentThread().interrupt();
- throw new KubeClusterException(e);
- }
- }
-
- /**
- * Method executes external command
- *
- * @param commands arguments for command
- * @param envVars
- * @param timeoutMs timeout in ms for kill
- * @return returns ecode of execution
- * @throws IOException
- * @throws InterruptedException
- * @throws ExecutionException
- */
- public int execute(String input, List commands, Set envVars, long timeoutMs) throws IOException, InterruptedException, ExecutionException {
- LOGGER.trace("Running command - " + join(" ", commands.toArray(new String[0])));
- ProcessBuilder builder = new ProcessBuilder();
- builder.command(commands);
- if (envVars != null) {
- envVars.forEach(e -> {
- builder.environment().put(e.getName(), e.getValue());
- });
- }
- builder.directory(new File(System.getProperty("user.dir")));
- process = builder.start();
- try (OutputStream outputStream = process.getOutputStream()) {
- if (input != null) {
- LOGGER.trace("With stdin {}", input);
- outputStream.write(input.getBytes(Charset.defaultCharset()));
- }
- }
-
- Future output = readStdOutput();
- Future error = readStdError();
-
- int retCode = 1;
- if (timeoutMs > 0) {
- if (process.waitFor(timeoutMs, TimeUnit.MILLISECONDS)) {
- retCode = process.exitValue();
- } else {
- process.destroyForcibly();
- }
- } else {
- retCode = process.waitFor();
- }
-
- try {
- stdOut = output.get(500, TimeUnit.MILLISECONDS);
- } catch (TimeoutException ex) {
- output.cancel(true);
- stdOut = stdOutReader.getData();
- }
-
- try {
- stdErr = error.get(500, TimeUnit.MILLISECONDS);
- } catch (TimeoutException ex) {
- error.cancel(true);
- stdErr = stdErrReader.getData();
- }
- storeOutputsToFile();
-
- return retCode;
- }
-
- /**
- * Method kills process
- */
- public void stop() {
- process.destroyForcibly();
- stdOut = stdOutReader.getData();
- stdErr = stdErrReader.getData();
- }
-
- /**
- * Get standard output of execution
- *
- * @return future string output
- */
- private Future readStdOutput() {
- stdOutReader = new StreamGobbler(process.getInputStream());
- return stdOutReader.read();
- }
-
- /**
- * Get standard error output of execution
- *
- * @return future string error output
- */
- private Future readStdError() {
- stdErrReader = new StreamGobbler(process.getErrorStream());
- return stdErrReader.read();
- }
-
- /**
- * Get stdOut and stdErr and store it into files
- */
- private void storeOutputsToFile() {
- if (logPath != null) {
- try {
- Files.createDirectories(logPath);
- Files.write(Paths.get(logPath.toString(), "stdOutput.log"), stdOut.getBytes(Charset.defaultCharset()));
- Files.write(Paths.get(logPath.toString(), "stdError.log"), stdErr.getBytes(Charset.defaultCharset()));
- } catch (Exception ex) {
- LOGGER.warn("Cannot save output of execution: " + ex.getMessage());
- }
- }
- }
-
- /**
- * Check if command is executable
- *
- * @param cmd command
- * @return true.false
- */
- public static boolean isExecutableOnPath(String cmd) {
- for (String dir : PATH_SPLITTER.split(System.getenv("PATH"))) {
- if (new File(dir, cmd).canExecute()) {
- return true;
- }
- }
- return false;
- }
-
- /**
- * This method check the size of executor output log and cut it if it's too long.
- *
- * @param log executor log
- * @return updated log if size is too big
- */
- public static String cutExecutorLog(String log) {
- if (log.length() > MAXIMUM_EXEC_LOG_CHARACTER_SIZE) {
- LOGGER.warn("Executor log is too long. Going to strip it and print only first {} characters", MAXIMUM_EXEC_LOG_CHARACTER_SIZE);
- return log.substring(0, MAXIMUM_EXEC_LOG_CHARACTER_SIZE);
- }
- return log;
- }
-
- /**
- * Class represent async reader
- */
- class StreamGobbler {
- private final InputStream is;
- private final StringBuffer data = new StringBuffer();
-
- /**
- * Constructor of StreamGobbler
- *
- * @param is input stream for reading
- */
- StreamGobbler(InputStream is) {
- this.is = is;
- }
-
- /**
- * Return data from stream sync
- *
- * @return string of data
- */
- public String getData() {
- return data.toString();
- }
-
- /**
- * read method
- *
- * @return return future string of output
- */
- public Future read() {
- return CompletableFuture.supplyAsync(() -> {
- try (Scanner scanner = new Scanner(is, StandardCharsets.UTF_8.name())) {
- while (scanner.hasNextLine()) {
- data.append(scanner.nextLine());
- if (appendLineSeparator) {
- data.append(System.getProperty("line.separator"));
- }
- }
- scanner.close();
- return data.toString();
- } catch (Exception e) {
- throw new CompletionException(e);
- }
- }, runnable -> new Thread(runnable).start());
- }
- }
-}
diff --git a/src/main/java/io/odh/test/platform/executor/ExecBuilder.java b/src/main/java/io/odh/test/platform/executor/ExecBuilder.java
deleted file mode 100644
index 31f72c51..00000000
--- a/src/main/java/io/odh/test/platform/executor/ExecBuilder.java
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform.executor;
-
-import io.fabric8.kubernetes.api.model.EnvVar;
-
-import java.util.Arrays;
-import java.util.List;
-import java.util.Set;
-
-public class ExecBuilder {
-
- private String input;
- private List command;
- private Set envVars;
- private int timeout;
- private boolean logToOutput;
- private boolean throwErrors;
-
- public ExecBuilder withCommand(List command) {
- this.command = command;
- return this;
- }
-
- public ExecBuilder withCommand(String... cmd) {
- this.command = Arrays.asList(cmd);
- return this;
- }
-
- public ExecBuilder withEnvVars(Set envVars) {
- this.envVars = envVars;
- return this;
- }
-
- public ExecBuilder withInput(String input) {
- this.input = input;
- return this;
- }
-
- public ExecBuilder logToOutput(boolean logToOutput) {
- this.logToOutput = logToOutput;
- return this;
- }
-
- public ExecBuilder throwErrors(boolean throwErrors) {
- this.throwErrors = throwErrors;
- return this;
- }
-
- public ExecBuilder timeout(int timeout) {
- this.timeout = timeout;
- return this;
- }
-
- public ExecResult exec() {
- return Exec.exec(input, command, envVars, timeout, logToOutput, throwErrors);
- }
-}
diff --git a/src/main/java/io/odh/test/platform/executor/ExecResult.java b/src/main/java/io/odh/test/platform/executor/ExecResult.java
deleted file mode 100644
index bf501a0a..00000000
--- a/src/main/java/io/odh/test/platform/executor/ExecResult.java
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright Skodjob authors.
- * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
- */
-package io.odh.test.platform.executor;
-
-import java.io.Serializable;
-
-public class ExecResult implements Serializable {
-
- private static final long serialVersionUID = 1L;
-
- private final int returnCode;
- private final String stdOut;
- private final String stdErr;
-
- ExecResult(int returnCode, String stdOut, String stdErr) {
- this.returnCode = returnCode;
- this.stdOut = stdOut;
- this.stdErr = stdErr;
- }
-
- public boolean exitStatus() {
- return returnCode == 0;
- }
-
- public int returnCode() {
- return returnCode;
- }
-
- public String out() {
- return stdOut;
- }
-
- public String err() {
- return stdErr;
- }
-
- @Override
- public String toString() {
- final StringBuilder sb = new StringBuilder("ExecResult{");
- sb.append("returnCode=").append(returnCode);
- sb.append(", stdOut='").append(stdOut).append('\'');
- sb.append(", stdErr='").append(stdErr).append('\'');
- sb.append('}');
- return sb.toString();
- }
-}
diff --git a/src/main/java/io/odh/test/utils/CsvUtils.java b/src/main/java/io/odh/test/utils/CsvUtils.java
index a28f6522..b1b55ef0 100644
--- a/src/main/java/io/odh/test/utils/CsvUtils.java
+++ b/src/main/java/io/odh/test/utils/CsvUtils.java
@@ -8,7 +8,7 @@
import io.fabric8.openshift.api.model.operatorhub.v1alpha1.ClusterServiceVersion;
import io.fabric8.openshift.client.OpenShiftClient;
import io.odh.test.OdhConstants;
-import io.odh.test.framework.manager.ResourceManager;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.hamcrest.Matchers;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -27,7 +27,7 @@ public class CsvUtils {
public static @Nullable String getOperatorVersionFromCsv() {
String name = OdhConstants.OLM_OPERATOR_NAME;
String namespace = OdhConstants.OLM_OPERATOR_NAMESPACE;
- OpenShiftClient client = (OpenShiftClient) ResourceManager.getKubeClient().getClient();
+ OpenShiftClient client = KubeResourceManager.getKubeClient().getOpenShiftClient();
List csvs = client.resources(ClusterServiceVersion.class)
.inNamespace(namespace)
.withLabel("operators.coreos.com/" + name + "." + namespace, "")
diff --git a/src/main/java/io/odh/test/utils/DeploymentUtils.java b/src/main/java/io/odh/test/utils/DeploymentUtils.java
index 26e28918..1e0a388c 100644
--- a/src/main/java/io/odh/test/utils/DeploymentUtils.java
+++ b/src/main/java/io/odh/test/utils/DeploymentUtils.java
@@ -10,8 +10,8 @@
import io.fabric8.kubernetes.api.model.apps.Deployment;
import io.fabric8.kubernetes.api.model.apps.DeploymentCondition;
import io.odh.test.TestConstants;
-import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.wait.Wait;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -46,10 +46,10 @@ public static void logCurrentDeploymentStatus(Deployment deployment, String name
log.add("\tMessage: " + deploymentCondition.getMessage() + "\n");
}
- if (!ResourceManager.getKubeClient().listPodsByPrefixInName(namespaceName, name).isEmpty()) {
+ if (!KubeResourceManager.getKubeClient().listPodsByPrefixInName(namespaceName, name).isEmpty()) {
log.add("\nPods with conditions and messages:\n\n");
- for (Pod pod : ResourceManager.getKubeClient().listPodsByPrefixInName(namespaceName, name)) {
+ for (Pod pod : KubeResourceManager.getKubeClient().listPodsByPrefixInName(namespaceName, name)) {
log.add(pod.getMetadata().getName() + ":");
for (PodCondition podCondition : pod.getStatus().getConditions()) {
if (podCondition.getMessage() != null) {
@@ -69,10 +69,11 @@ public static void logCurrentDeploymentStatus(Deployment deployment, String name
public static boolean waitForDeploymentReady(String namespaceName, String deploymentName) {
LOGGER.info("Waiting for Deployment: {}/{} to be ready", namespaceName, deploymentName);
- TestUtils.waitFor("readiness of Deployment: " + namespaceName + "/" + deploymentName,
+ Wait.until("readiness of Deployment: " + namespaceName + "/" + deploymentName,
TestConstants.GLOBAL_POLL_INTERVAL_SHORT, READINESS_TIMEOUT,
- () -> ResourceManager.getKubeClient().getClient().apps().deployments().inNamespace(namespaceName).withName(deploymentName).isReady(),
- () -> DeploymentUtils.logCurrentDeploymentStatus(ResourceManager.getKubeClient().getDeployment(namespaceName, deploymentName), namespaceName));
+ () -> KubeResourceManager.getKubeClient().getClient().apps().deployments().inNamespace(namespaceName).withName(deploymentName).isReady(),
+ () -> DeploymentUtils.logCurrentDeploymentStatus(KubeResourceManager.getKubeClient().getClient().apps()
+ .deployments().inNamespace(namespaceName).withName(deploymentName).get(), namespaceName));
LOGGER.info("Deployment: {}/{} is ready", namespaceName, deploymentName);
return true;
@@ -85,13 +86,14 @@ public static boolean waitForDeploymentReady(String namespaceName, String deploy
*/
public static void waitForDeploymentDeletion(String namespaceName, String name) {
LOGGER.debug("Waiting for Deployment: {}/{} deletion", namespaceName, name);
- TestUtils.waitFor("deletion of Deployment: " + namespaceName + "/" + name, TestConstants.GLOBAL_POLL_INTERVAL_MEDIUM, DELETION_TIMEOUT,
+ Wait.until("deletion of Deployment: " + namespaceName + "/" + name, TestConstants.GLOBAL_POLL_INTERVAL_MEDIUM, DELETION_TIMEOUT,
() -> {
- if (ResourceManager.getKubeClient().getDeployment(namespaceName, name) == null) {
+ if (KubeResourceManager.getKubeClient().getClient().apps()
+ .deployments().inNamespace(namespaceName).withName(name).get() == null) {
return true;
} else {
LOGGER.warn("Deployment: {}/{} is not deleted yet! Triggering force delete by cmd client!", namespaceName, name);
- ResourceManager.getKubeClient().getClient().apps().deployments().inNamespace(namespaceName).withName(name).delete();
+ KubeResourceManager.getKubeClient().getClient().apps().deployments().inNamespace(namespaceName).withName(name).delete();
return false;
}
});
@@ -104,7 +106,8 @@ public static void waitForDeploymentDeletion(String namespaceName, String name)
* @return A map of pod name to resource version for Pods in the given Deployment.
*/
public static Map depSnapshot(String namespaceName, String name) {
- Deployment deployment = ResourceManager.getKubeClient().getDeployment(namespaceName, name);
+ Deployment deployment = KubeResourceManager.getKubeClient().getClient().apps()
+ .deployments().inNamespace(namespaceName).withName(name).get();
LabelSelector selector = deployment.getSpec().getSelector();
return PodUtils.podSnapshot(namespaceName, selector);
}
@@ -118,7 +121,8 @@ public static Map depSnapshot(String namespaceName, String name)
*/
public static boolean depHasRolled(String namespaceName, String name, Map snapshot) {
LOGGER.debug("Existing snapshot: {}/{}", namespaceName, new TreeMap<>(snapshot));
- Map map = PodUtils.podSnapshot(namespaceName, ResourceManager.getKubeClient().getDeployment(namespaceName, name).getSpec().getSelector());
+ Map map = PodUtils.podSnapshot(namespaceName, KubeResourceManager.getKubeClient().getClient().apps()
+ .deployments().inNamespace(namespaceName).withName(name).get().getSpec().getSelector());
LOGGER.debug("Current snapshot: {}/{}", namespaceName, new TreeMap<>(map));
int current = map.size();
map.keySet().retainAll(snapshot.keySet());
@@ -133,7 +137,7 @@ public static boolean depHasRolled(String namespaceName, String name, Map waitTillDepHasRolled(String namespaceName, String deploymentName, Map snapshot) {
LOGGER.info("Waiting for Deployment: {}/{} rolling update", namespaceName, deploymentName);
- TestUtils.waitFor("rolling update of Deployment " + namespaceName + "/" + deploymentName,
+ Wait.until("rolling update of Deployment " + namespaceName + "/" + deploymentName,
TestConstants.GLOBAL_POLL_INTERVAL_MEDIUM, TestConstants.GLOBAL_TIMEOUT,
() -> depHasRolled(namespaceName, deploymentName, snapshot));
diff --git a/src/main/java/io/odh/test/utils/NamespaceUtils.java b/src/main/java/io/odh/test/utils/NamespaceUtils.java
index 6ed2c8e0..b8378491 100644
--- a/src/main/java/io/odh/test/utils/NamespaceUtils.java
+++ b/src/main/java/io/odh/test/utils/NamespaceUtils.java
@@ -5,8 +5,8 @@
package io.odh.test.utils;
import io.odh.test.TestConstants;
-import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.wait.Wait;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -22,16 +22,16 @@ private NamespaceUtils() { }
public static void waitForNamespaceReadiness(String name) {
LOGGER.info("Waiting for Namespace: {} readiness", name);
- TestUtils.waitFor("Namespace: " + name, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, DELETION_TIMEOUT,
- () -> ResourceManager.getKubeClient().getNamespace(name) != null);
+ Wait.until("Namespace: " + name, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, DELETION_TIMEOUT,
+ () -> KubeResourceManager.getKubeClient().getClient().namespaces().withName(name).get() != null);
LOGGER.info("Namespace: {} is ready", name);
}
public static void waitForNamespaceDeletion(String name) {
LOGGER.info("Waiting for Namespace: {} deletion", name);
- TestUtils.waitFor("Namespace: " + name, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, DELETION_TIMEOUT,
- () -> ResourceManager.getKubeClient().getNamespace(name) == null);
+ Wait.until("Namespace: " + name, TestConstants.GLOBAL_POLL_INTERVAL_SHORT, DELETION_TIMEOUT,
+ () -> KubeResourceManager.getKubeClient().getClient().namespaces().withName(name).get() == null);
LOGGER.info("Namespace: {} was deleted", name);
}
}
diff --git a/src/main/java/io/odh/test/utils/PodUtils.java b/src/main/java/io/odh/test/utils/PodUtils.java
index 11dd1e2f..1b294f12 100644
--- a/src/main/java/io/odh/test/utils/PodUtils.java
+++ b/src/main/java/io/odh/test/utils/PodUtils.java
@@ -9,8 +9,8 @@
import io.fabric8.kubernetes.api.model.Pod;
import io.fabric8.kubernetes.client.readiness.Readiness;
import io.odh.test.TestConstants;
-import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.wait.Wait;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -25,13 +25,14 @@ public class PodUtils {
private static final long DELETION_TIMEOUT = Duration.ofMinutes(5).toMillis();
private static final long READINESS_TIMEOUT = Duration.ofMinutes(10).toMillis();
- private PodUtils() { }
+ private PodUtils() {
+ }
public static void waitForPodsReady(String namespaceName, boolean containers, Runnable onTimeout) {
- TestUtils.waitFor("readiness of all Pods matching in namespace " + namespaceName,
+ Wait.until("readiness of all Pods matching in namespace " + namespaceName,
TestConstants.GLOBAL_POLL_INTERVAL_MEDIUM, READINESS_TIMEOUT,
() -> {
- List pods = ResourceManager.getKubeClient().listPods(namespaceName);
+ List pods = KubeResourceManager.getKubeClient().getClient().pods().inNamespace(namespaceName).list().getItems();
if (pods.isEmpty()) {
LOGGER.debug("Expected Pods are not ready!");
return false;
@@ -58,51 +59,55 @@ public static void waitForPodsReady(String namespaceName, boolean containers, Ru
}
public static void waitForPodsReady(String namespaceName, LabelSelector selector, int expectPods, boolean containers, Runnable onTimeout) {
- TestUtils.waitFor("readiness of all Pods matching: " + selector,
+ Wait.until("readiness of all Pods matching: " + selector,
TestConstants.GLOBAL_POLL_INTERVAL_MEDIUM, READINESS_TIMEOUT,
- () -> {
- List pods = ResourceManager.getKubeClient().listPods(namespaceName, selector);
- if (pods.isEmpty() && expectPods == 0) {
- LOGGER.debug("Expected Pods are ready");
- return true;
- }
- if (pods.isEmpty()) {
- LOGGER.debug("Pods matching: {}/{} are not ready", namespaceName, selector);
- return false;
- }
- if (pods.size() != expectPods) {
- LOGGER.debug("Expected Pods: {}/{} are not ready", namespaceName, selector);
- return false;
- }
- for (Pod pod : pods) {
- if (!(Readiness.isPodReady(pod) || Readiness.isPodSucceeded(pod))) {
- LOGGER.debug("Pod not ready: {}/{}", namespaceName, pod.getMetadata().getName());
+ () -> {
+ List pods = KubeResourceManager.getKubeClient().getClient().pods()
+ .inNamespace(namespaceName).withLabelSelector(selector).list().getItems();
+ if (pods.isEmpty() && expectPods == 0) {
+ LOGGER.debug("Expected Pods are ready");
+ return true;
+ }
+ if (pods.isEmpty()) {
+ LOGGER.debug("Pods matching: {}/{} are not ready", namespaceName, selector);
+ return false;
+ }
+ if (pods.size() != expectPods) {
+ LOGGER.debug("Expected Pods: {}/{} are not ready", namespaceName, selector);
return false;
- } else {
- if (containers) {
- for (ContainerStatus cs : pod.getStatus().getContainerStatuses()) {
- if (!(Boolean.TRUE.equals(cs.getReady())
- || cs.getState().getTerminated().getReason().equals("Completed"))) {
- LOGGER.debug("Container: {} of Pod: {}/{} not ready", namespaceName, pod.getMetadata().getName(), cs.getName());
- return false;
+ }
+ for (Pod pod : pods) {
+ if (!(Readiness.isPodReady(pod) || Readiness.isPodSucceeded(pod))) {
+ LOGGER.debug("Pod not ready: {}/{}", namespaceName, pod.getMetadata().getName());
+ return false;
+ } else {
+ if (containers) {
+ for (ContainerStatus cs : pod.getStatus().getContainerStatuses()) {
+ if (!(Boolean.TRUE.equals(cs.getReady())
+ || cs.getState().getTerminated().getReason().equals("Completed"))) {
+ LOGGER.debug("Container: {} of Pod: {}/{} not ready", namespaceName, pod.getMetadata().getName(), cs.getName());
+ return false;
+ }
}
}
}
}
- }
- LOGGER.info("Pods matching: {}/{} are ready", namespaceName, selector);
- return true;
- }, onTimeout);
+ LOGGER.info("Pods matching: {}/{} are ready", namespaceName, selector);
+ return true;
+ }, onTimeout);
}
public static void waitForPodsReadyWithRestart(String namespace, LabelSelector selector, int expectedPods, boolean containers) {
try {
- waitForPodsReady(namespace, selector, expectedPods, containers, () -> { });
+ waitForPodsReady(namespace, selector, expectedPods, containers, () -> {
+ });
} catch (Exception ex) {
LOGGER.warn("Pods not ready trying to restart");
- ResourceManager.getKubeClient().listPods(namespace, selector).forEach(p ->
- ResourceManager.getKubeClient().getClient().resource(p).delete());
- waitForPodsReady(namespace, selector, expectedPods, containers, () -> { });
+ KubeResourceManager.getKubeClient().getClient().pods()
+ .inNamespace(namespace).withLabelSelector(selector).list().getItems().forEach(p ->
+ KubeResourceManager.getKubeClient().getClient().resource(p).delete());
+ waitForPodsReady(namespace, selector, expectedPods, containers, () -> {
+ });
}
}
@@ -111,7 +116,8 @@ public static void waitForPodsReadyWithRestart(String namespace, LabelSelector s
* matching the given {@code selector}.
*/
public static Map podSnapshot(String namespaceName, LabelSelector selector) {
- List pods = ResourceManager.getKubeClient().listPods(namespaceName, selector);
+ List pods = KubeResourceManager.getKubeClient().getClient().pods()
+ .inNamespace(namespaceName).withLabelSelector(selector).list().getItems();
return pods.stream()
.collect(
Collectors.toMap(pod -> pod.getMetadata().getName(),
@@ -122,9 +128,10 @@ public static void verifyThatPodsAreStable(String namespaceName, LabelSelector l
int[] stabilityCounter = {0};
String phase = "Running";
- TestUtils.waitFor(String.format("Pods in namespace '%s' with LabelSelector %s stability in phase %s", namespaceName, labelSelector, phase), TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT,
+ Wait.until(String.format("Pods in namespace '%s' with LabelSelector %s stability in phase %s", namespaceName, labelSelector, phase), TestConstants.GLOBAL_POLL_INTERVAL_SHORT, TestConstants.GLOBAL_TIMEOUT,
() -> {
- List existingPod = ResourceManager.getKubeClient().listPods(namespaceName, labelSelector);
+ List existingPod = KubeResourceManager.getKubeClient().getClient().pods()
+ .inNamespace(namespaceName).withLabelSelector(labelSelector).list().getItems();
LOGGER.debug("Working with the following pods: {}", existingPod.stream().map(p -> p.getMetadata().getName()).toList());
for (Pod pod : existingPod) {
diff --git a/src/main/java/io/odh/test/utils/UpgradeUtils.java b/src/main/java/io/odh/test/utils/UpgradeUtils.java
index a50fe809..23446bbc 100644
--- a/src/main/java/io/odh/test/utils/UpgradeUtils.java
+++ b/src/main/java/io/odh/test/utils/UpgradeUtils.java
@@ -5,7 +5,7 @@
package io.odh.test.utils;
import io.odh.test.TestConstants;
-import io.odh.test.framework.manager.ResourceManager;
+import io.skodjob.testframe.resources.KubeResourceManager;
import java.util.Date;
@@ -16,7 +16,7 @@ public class UpgradeUtils {
public static void deploymentLogIsErrorEmpty(String namespace, String deploymentName, Date sinceTimestamp) {
// Check that operator doesn't contain errors in logs since sec
- String operatorLog = ResourceManager.getKubeClient().getClient().apps().deployments()
+ String operatorLog = KubeResourceManager.getKubeClient().getClient().apps().deployments()
.inNamespace(namespace).withName(deploymentName).sinceTime(TestConstants.TIMESTAMP_DATE_FORMAT.format(sinceTimestamp)).getLog();
assertThat(operatorLog, logHasNoUnexpectedErrors());
@@ -24,7 +24,7 @@ public static void deploymentLogIsErrorEmpty(String namespace, String deployment
public static void deploymentLogIsErrorEmpty(String namespace, String deploymentName) {
// Check that operator doesn't contain errors in logs
- String operatorLog = ResourceManager.getKubeClient().getClient().apps().deployments()
+ String operatorLog = KubeResourceManager.getKubeClient().getClient().apps().deployments()
.inNamespace(namespace).withName(deploymentName).getLog();
assertThat(operatorLog, logHasNoUnexpectedErrors());
diff --git a/src/test/java/io/odh/test/e2e/Abstract.java b/src/test/java/io/odh/test/e2e/Abstract.java
index 29238286..fadb21a4 100644
--- a/src/test/java/io/odh/test/e2e/Abstract.java
+++ b/src/test/java/io/odh/test/e2e/Abstract.java
@@ -5,14 +5,21 @@
package io.odh.test.e2e;
import io.odh.test.Environment;
-import io.odh.test.framework.listeners.ResourceManagerContextHandler;
-import io.odh.test.framework.listeners.TestVisualSeparator;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.framework.listeners.TestExceptionCallbackListener;
import io.odh.test.framework.manager.requirements.AuthorinoOperator;
import io.odh.test.framework.manager.requirements.PipelinesOperator;
import io.odh.test.framework.manager.requirements.ServerlessOperator;
import io.odh.test.framework.manager.requirements.ServiceMeshOperator;
+import io.odh.test.framework.manager.resources.DataScienceClusterResource;
+import io.odh.test.framework.manager.resources.DataScienceInitializationResource;
+import io.odh.test.framework.manager.resources.InferenceServiceResource;
+import io.odh.test.framework.manager.resources.NotebookResource;
+import io.skodjob.testframe.annotations.ResourceManager;
+import io.skodjob.testframe.annotations.TestVisualSeparator;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.resources.NamespaceResource;
+import io.skodjob.testframe.resources.OperatorGroupResource;
+import io.skodjob.testframe.resources.SubscriptionResource;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -20,14 +27,23 @@
import org.slf4j.LoggerFactory;
@ExtendWith(TestExceptionCallbackListener.class)
-@ExtendWith(ResourceManagerContextHandler.class)
+@ResourceManager(cleanResources = false)
+@TestVisualSeparator
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-public abstract class Abstract implements TestVisualSeparator {
+public abstract class Abstract {
private static final Logger LOGGER = LoggerFactory.getLogger(Abstract.class);
static {
- ResourceManager.getInstance();
+ KubeResourceManager.getInstance().setResourceTypes(
+ new NamespaceResource(),
+ new SubscriptionResource(),
+ new OperatorGroupResource(),
+ new DataScienceClusterResource(),
+ new DataScienceInitializationResource(),
+ new NotebookResource(),
+ new InferenceServiceResource()
+ );
}
@BeforeAll
diff --git a/src/test/java/io/odh/test/e2e/continuous/DataScienceClusterST.java b/src/test/java/io/odh/test/e2e/continuous/DataScienceClusterST.java
index 51c8a8e4..c89b79cb 100644
--- a/src/test/java/io/odh/test/e2e/continuous/DataScienceClusterST.java
+++ b/src/test/java/io/odh/test/e2e/continuous/DataScienceClusterST.java
@@ -10,11 +10,10 @@
import io.odh.test.Environment;
import io.odh.test.OdhConstants;
import io.odh.test.TestSuite;
+import io.odh.test.TestUtils;
import io.odh.test.e2e.Abstract;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.framework.manager.resources.DataScienceClusterResource;
import io.odh.test.install.InstallTypes;
-import io.odh.test.platform.KubeUtils;
import io.odh.test.utils.CsvUtils;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Codeflare;
@@ -26,6 +25,8 @@
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Ray;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Workbenches;
import io.opendatahub.v1alpha.OdhDashboardConfig;
+import io.skodjob.testframe.annotations.ResourceManager;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
@@ -38,6 +39,7 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
@Tag(TestSuite.CONTINUOUS)
+@ResourceManager(cleanResources = false)
public class DataScienceClusterST extends Abstract {
private static final String DS_CLUSTER_NAME = "default";
@@ -48,7 +50,7 @@ public class DataScienceClusterST extends Abstract {
@BeforeAll
void init() {
dataScienceProjectCli = DataScienceClusterResource.dataScienceCLusterClient();
- dashboardConfigCli = ResourceManager.getKubeClient().dashboardConfigClient();
+ dashboardConfigCli = KubeResourceManager.getKubeClient().getClient().resources(OdhDashboardConfig.class);
}
@Test
@@ -79,13 +81,13 @@ void checkDataScienceClusterStatus() {
assertEquals("Ready", cluster.getStatus().getPhase());
assertNull(cluster.getStatus().getErrorMessage());
- assertEquals("True", KubeUtils.getDscConditionByType(cluster.getStatus().getConditions(), "dashboardReady").getStatus());
- assertEquals("True", KubeUtils.getDscConditionByType(cluster.getStatus().getConditions(), "workbenchesReady").getStatus());
- assertEquals("True", KubeUtils.getDscConditionByType(cluster.getStatus().getConditions(), "data-science-pipelines-operatorReady").getStatus());
- assertEquals("True", KubeUtils.getDscConditionByType(cluster.getStatus().getConditions(), "kserveReady").getStatus());
- assertEquals("True", KubeUtils.getDscConditionByType(cluster.getStatus().getConditions(), "codeflareReady").getStatus());
- assertEquals("True", KubeUtils.getDscConditionByType(cluster.getStatus().getConditions(), "model-meshReady").getStatus());
- assertEquals("True", KubeUtils.getDscConditionByType(cluster.getStatus().getConditions(), "kueueReady").getStatus());
+ assertEquals("True", TestUtils.getDscConditionByType(cluster.getStatus().getConditions(), "dashboardReady").getStatus());
+ assertEquals("True", TestUtils.getDscConditionByType(cluster.getStatus().getConditions(), "workbenchesReady").getStatus());
+ assertEquals("True", TestUtils.getDscConditionByType(cluster.getStatus().getConditions(), "data-science-pipelines-operatorReady").getStatus());
+ assertEquals("True", TestUtils.getDscConditionByType(cluster.getStatus().getConditions(), "kserveReady").getStatus());
+ assertEquals("True", TestUtils.getDscConditionByType(cluster.getStatus().getConditions(), "codeflareReady").getStatus());
+ assertEquals("True", TestUtils.getDscConditionByType(cluster.getStatus().getConditions(), "model-meshReady").getStatus());
+ assertEquals("True", TestUtils.getDscConditionByType(cluster.getStatus().getConditions(), "kueueReady").getStatus());
}
@Test
diff --git a/src/test/java/io/odh/test/e2e/continuous/DataScienceProjectST.java b/src/test/java/io/odh/test/e2e/continuous/DataScienceProjectST.java
index 98e01b63..d23456dd 100644
--- a/src/test/java/io/odh/test/e2e/continuous/DataScienceProjectST.java
+++ b/src/test/java/io/odh/test/e2e/continuous/DataScienceProjectST.java
@@ -8,10 +8,11 @@
import io.fabric8.kubernetes.client.dsl.MixedOperation;
import io.fabric8.kubernetes.client.dsl.Resource;
import io.odh.test.TestSuite;
+import io.odh.test.TestUtils;
import io.odh.test.e2e.Abstract;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.framework.manager.resources.NotebookResource;
-import io.odh.test.platform.KubeUtils;
+import io.skodjob.testframe.annotations.ResourceManager;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.params.ParameterizedTest;
@@ -24,9 +25,10 @@
import java.util.stream.Stream;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
@Tag(TestSuite.CONTINUOUS)
+@ResourceManager(cleanResources = false)
public class DataScienceProjectST extends Abstract {
static final Logger LOGGER = LoggerFactory.getLogger(DataScienceProjectST.class);
@@ -48,10 +50,11 @@ void init() {
@ParameterizedTest(name = "checkDataScienceProjects-{0}")
@MethodSource("getDsProjects")
void checkDataScienceProjects(String dsProjectName) {
- assertTrue(ResourceManager.getKubeClient().namespaceExists(dsProjectName));
+ assertNotNull(KubeResourceManager.getKubeClient().getClient().namespaces().withName(dsProjectName).get());
assertEquals("true",
- ResourceManager.getKubeClient().getNamespace(dsProjectName).getMetadata().getLabels().getOrDefault("opendatahub.io/dashboard", "false"));
+ KubeResourceManager.getKubeClient().getClient().namespaces().withName(dsProjectName).get()
+ .getMetadata().getLabels().getOrDefault("opendatahub.io/dashboard", "false"));
notebookCli.inNamespace(dsProjectName).list().getItems().forEach(notebook -> {
LOGGER.info("Found notebook {} in datascience project {}", notebook.getMetadata().getName(), dsProjectName);
@@ -60,8 +63,8 @@ void checkDataScienceProjects(String dsProjectName) {
assertEquals("true",
notebook.getMetadata().getLabels().getOrDefault("opendatahub.io/odh-managed", "false"));
- assertEquals("True", KubeUtils.getNotebookConditionByType(notebook.getStatus().getConditions(), "ContainersReady").getStatus());
- assertEquals("True", KubeUtils.getNotebookConditionByType(notebook.getStatus().getConditions(), "Ready").getStatus());
+ assertEquals("True", TestUtils.getNotebookConditionByType(notebook.getStatus().getConditions(), "ContainersReady").getStatus());
+ assertEquals("True", TestUtils.getNotebookConditionByType(notebook.getStatus().getConditions(), "Ready").getStatus());
});
}
}
diff --git a/src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java b/src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java
index 4e995201..250d4f89 100644
--- a/src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java
+++ b/src/test/java/io/odh/test/e2e/standard/DataScienceClusterST.java
@@ -6,7 +6,6 @@
import io.odh.test.Environment;
import io.odh.test.TestSuite;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.framework.manager.resources.DataScienceClusterResource;
import io.odh.test.install.InstallTypes;
import io.odh.test.utils.CsvUtils;
@@ -27,6 +26,7 @@
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
import io.skodjob.annotations.TestTag;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.condition.DisabledIfEnvironmentVariable;
@@ -80,8 +80,8 @@ void createDataScienceCluster() {
// Create DSC
DataScienceCluster c = DscUtils.getBasicDSC(DS_PROJECT_NAME);
- ResourceManager.getInstance().createResourceWithWait(dsci);
- ResourceManager.getInstance().createResourceWithWait(c);
+ KubeResourceManager.getInstance().createResourceWithWait(dsci);
+ KubeResourceManager.getInstance().createResourceWithWait(c);
DataScienceCluster cluster = DataScienceClusterResource.dataScienceCLusterClient().withName(DS_PROJECT_NAME).get();
diff --git a/src/test/java/io/odh/test/e2e/standard/DistributedST.java b/src/test/java/io/odh/test/e2e/standard/DistributedST.java
index 51e26258..38458e93 100644
--- a/src/test/java/io/odh/test/e2e/standard/DistributedST.java
+++ b/src/test/java/io/odh/test/e2e/standard/DistributedST.java
@@ -23,9 +23,7 @@
import io.odh.test.Environment;
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.install.InstallTypes;
-import io.odh.test.platform.KubeUtils;
import io.odh.test.platform.RayClient;
import io.odh.test.platform.TlsUtils;
import io.odh.test.utils.CsvUtils;
@@ -39,6 +37,7 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
+import io.skodjob.testframe.resources.KubeResourceManager;
import io.x_k8s.kueue.v1beta1.ClusterQueue;
import io.x_k8s.kueue.v1beta1.ClusterQueueBuilder;
import io.x_k8s.kueue.v1beta1.LocalQueue;
@@ -90,7 +89,7 @@ public class DistributedST extends StandardAbstract {
&& c.getStatus().getConditions().stream()
.anyMatch(crdc -> crdc.getType().equals("Established") && crdc.getStatus().equals("True"));
- private final OpenShiftClient kubeClient = (OpenShiftClient) ResourceManager.getKubeClient().getClient();
+ private final OpenShiftClient kubeClient = KubeResourceManager.getKubeClient().getOpenShiftClient();
@BeforeAll
static void deployDataScienceCluster() {
@@ -104,8 +103,8 @@ static void deployDataScienceCluster() {
// Create DSC
DataScienceCluster dsc = DscUtils.getBasicDSC(DS_PROJECT_NAME);
- ResourceManager.getInstance().createResourceWithWait(dsci);
- ResourceManager.getInstance().createResourceWithWait(dsc);
+ KubeResourceManager.getInstance().createResourceWithWait(dsci);
+ KubeResourceManager.getInstance().createResourceWithWait(dsc);
}
@TestDoc(
@@ -132,7 +131,7 @@ void testDistributedWorkloadWithAppWrapper() throws Exception {
.addToLabels(OdhAnnotationsLabels.LABEL_DASHBOARD, "true")
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithWait(ns);
+ KubeResourceManager.getInstance().createResourceWithWait(ns);
});
Allure.step("Wait for AppWrapper CRD to be created", () -> {
@@ -144,13 +143,13 @@ void testDistributedWorkloadWithAppWrapper() throws Exception {
Allure.step("Create AppWrapper from yaml file", () -> {
AppWrapper koranteng = kubeClient.resources(AppWrapper.class).load(this.getClass().getResource("/codeflare/koranteng.yaml")).item();
- ResourceManager.getInstance().createResourceWithWait(koranteng);
+ KubeResourceManager.getInstance().createResourceWithWait(koranteng);
});
});
Allure.step("Wait for Ray API endpoint");
Resource endpoints = kubeClient.endpoints().inNamespace(projectName).withName("koranteng-head-svc");
- KubeUtils.waitForEndpoints("ray", endpoints);
+ TestUtils.waitForEndpoints("ray", endpoints);
Allure.step("Determine API route");
Route route = kubeClient.routes().inNamespace(projectName).withName("ray-dashboard-koranteng").get();
@@ -219,7 +218,7 @@ void testDistributedWorkloadWithKueue() throws Exception {
.withGrantMethod("auto")
.withAccessTokenInactivityTimeoutSeconds(300)
.build();
- ResourceManager.getInstance().createResourceWithoutWait(client);
+ KubeResourceManager.getInstance().createResourceWithoutWait(client);
OAuthAccessToken token = new OAuthAccessTokenBuilder()
.withNewMetadata()
@@ -232,7 +231,7 @@ void testDistributedWorkloadWithKueue() throws Exception {
.withUserName(user.getMetadata().getName())
.withUserUID(user.getMetadata().getUid())
.build();
- ResourceManager.getInstance().createResourceWithWait(token);
+ KubeResourceManager.getInstance().createResourceWithWait(token);
return privateToken;
});
@@ -245,7 +244,7 @@ void testDistributedWorkloadWithKueue() throws Exception {
.addToLabels(OdhAnnotationsLabels.LABEL_DASHBOARD, "true")
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithWait(ns);
+ KubeResourceManager.getInstance().createResourceWithWait(ns);
});
Allure.step("Create flavor", () -> {
@@ -254,7 +253,7 @@ void testDistributedWorkloadWithKueue() throws Exception {
.withName(defaultFlavor)
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithWait(flavor);
+ KubeResourceManager.getInstance().createResourceWithWait(flavor);
});
Allure.step("Create Cluster Queue", () -> {
@@ -287,7 +286,7 @@ void testDistributedWorkloadWithKueue() throws Exception {
.endResourceGroup()
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(clusterQueue);
+ KubeResourceManager.getInstance().createResourceWithWait(clusterQueue);
});
Allure.step("Create Local Queue", () -> {
@@ -301,18 +300,18 @@ void testDistributedWorkloadWithKueue() throws Exception {
.withClusterQueue(clusterQueueName)
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(localQueue);
+ KubeResourceManager.getInstance().createResourceWithWait(localQueue);
});
Allure.step("Create RayServer from yaml file", () -> {
RayCluster koranteng = kubeClient.resources(RayCluster.class).load(this.getClass().getResource("/codeflare/koranteng_ray2.yaml")).item();
- ResourceManager.getInstance().createResourceWithWait(koranteng);
+ KubeResourceManager.getInstance().createResourceWithWait(koranteng);
});
});
Allure.step("Wait for Ray API endpoint");
Resource endpoints = kubeClient.endpoints().inNamespace(projectName).withName("koranteng-head-svc");
- KubeUtils.waitForEndpoints("ray", endpoints);
+ TestUtils.waitForEndpoints("ray", endpoints);
Allure.step("Determine API route");
Route route = kubeClient.routes().inNamespace(projectName).withName("ray-dashboard-koranteng").get();
diff --git a/src/test/java/io/odh/test/e2e/standard/ModelServingST.java b/src/test/java/io/odh/test/e2e/standard/ModelServingST.java
index 3d61ca83..88b9ea19 100644
--- a/src/test/java/io/odh/test/e2e/standard/ModelServingST.java
+++ b/src/test/java/io/odh/test/e2e/standard/ModelServingST.java
@@ -28,7 +28,6 @@
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.OdhConstants;
import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.utils.DscUtils;
import io.odh.test.utils.PodUtils;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
@@ -38,6 +37,7 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
+import io.skodjob.testframe.resources.KubeResourceManager;
import lombok.SneakyThrows;
import org.hamcrest.Matchers;
import org.junit.jupiter.api.Assertions;
@@ -90,7 +90,7 @@ public class ModelServingST extends StandardAbstract {
private static final String DS_PROJECT_NAME = "test-model-serving";
- private final OpenShiftClient kubeClient = (OpenShiftClient) ResourceManager.getKubeClient().getClient();
+ private final OpenShiftClient kubeClient = (OpenShiftClient) KubeResourceManager.getKubeClient().getClient();
@BeforeAll
static void deployDataScienceCluster() {
@@ -104,8 +104,8 @@ static void deployDataScienceCluster() {
// Create DSC
DataScienceCluster dsc = DscUtils.getBasicDSC(DS_PROJECT_NAME);
- ResourceManager.getInstance().createResourceWithWait(dsci);
- ResourceManager.getInstance().createResourceWithWait(dsc);
+ KubeResourceManager.getInstance().createResourceWithWait(dsci);
+ KubeResourceManager.getInstance().createResourceWithWait(dsc);
}
@TestDoc(
@@ -142,7 +142,7 @@ void testMultiModelServerInference() {
.addToLabels(OdhAnnotationsLabels.ANNO_MODEL_MESH, "true")
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithWait(ns);
+ KubeResourceManager.getInstance().createResourceWithWait(ns);
// secret must exist for ServingRuntime to start, even though it contains no useful information
Secret storageConfig = new SecretBuilder()
@@ -153,7 +153,7 @@ void testMultiModelServerInference() {
.withType("Opaque")
.addToStringData("aws-connection-no-such-connection", "{}")
.build();
- ResourceManager.getInstance().createResourceWithWait(storageConfig);
+ KubeResourceManager.getInstance().createResourceWithWait(storageConfig);
// create serving runtime
ServingRuntime servingRuntime = processModelServerTemplate("ovms");
@@ -182,7 +182,7 @@ void testMultiModelServerInference() {
.addToVolumes(new VolumesBuilder().withName("shm").withEmptyDir(new EmptyDirBuilder().withMedium("Memory").withSizeLimit(new IntOrString("2Gi")).build()).build())
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(servingRuntimeInstance);
+ KubeResourceManager.getInstance().createResourceWithWait(servingRuntimeInstance);
// create inference service
InferenceService inferenceService = new InferenceServiceBuilder()
@@ -206,13 +206,13 @@ void testMultiModelServerInference() {
.endInferenceservicespecPredictor()
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(inferenceService);
+ KubeResourceManager.getInstance().createResourceWithWait(inferenceService);
String namespace = "knative-serving";
LOGGER.info("Waiting for pods readiness in {}", namespace);
PodUtils.waitForPodsReady(namespace, true, () -> {
- ResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "pods");
- ResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "events");
+ KubeResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "pods");
+ KubeResourceManager.getKubeCmdClient().namespace(namespace).exec(false, "get", "events");
});
Route route = kubeClient.routes().inNamespace(projectName).withName(modelName).get();
diff --git a/src/test/java/io/odh/test/e2e/standard/NotebookST.java b/src/test/java/io/odh/test/e2e/standard/NotebookST.java
index 1588d4c2..0aa752d6 100644
--- a/src/test/java/io/odh/test/e2e/standard/NotebookST.java
+++ b/src/test/java/io/odh/test/e2e/standard/NotebookST.java
@@ -13,7 +13,6 @@
import io.fabric8.kubernetes.api.model.Quantity;
import io.odh.test.Environment;
import io.odh.test.OdhAnnotationsLabels;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.framework.manager.resources.NotebookResource;
import io.odh.test.utils.DscUtils;
import io.odh.test.utils.PodUtils;
@@ -42,6 +41,7 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
import org.kubeflow.v1.Notebook;
@@ -95,7 +95,7 @@ void testCreateSimpleNotebook() throws IOException {
.addToAnnotations(OdhAnnotationsLabels.ANNO_SERVICE_MESH, "false")
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithoutWait(ns);
+ KubeResourceManager.getInstance().createResourceWithoutWait(ns);
PersistentVolumeClaim pvc = new PersistentVolumeClaimBuilder()
.withNewMetadata()
@@ -111,11 +111,11 @@ void testCreateSimpleNotebook() throws IOException {
.withVolumeMode("Filesystem")
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithoutWait(pvc);
+ KubeResourceManager.getInstance().createResourceWithoutWait(pvc);
String notebookImage = NotebookResource.getNotebookImage(NotebookResource.JUPYTER_MINIMAL_IMAGE, NotebookResource.JUPYTER_MINIMAL_2023_2_TAG);
Notebook notebook = new NotebookBuilder(NotebookResource.loadDefaultNotebook(NTB_NAMESPACE, NTB_NAME, notebookImage)).build();
- ResourceManager.getInstance().createResourceWithoutWait(notebook);
+ KubeResourceManager.getInstance().createResourceWithoutWait(notebook);
LabelSelector lblSelector = new LabelSelectorBuilder()
.withMatchLabels(Map.of("app", NTB_NAME))
@@ -169,7 +169,7 @@ void deployDataScienceCluster() {
.endSpec()
.build();
// Deploy DSCI,DSC
- ResourceManager.getInstance().createResourceWithWait(dsci);
- ResourceManager.getInstance().createResourceWithWait(dsc);
+ KubeResourceManager.getInstance().createResourceWithWait(dsci);
+ KubeResourceManager.getInstance().createResourceWithWait(dsc);
}
}
diff --git a/src/test/java/io/odh/test/e2e/standard/PipelineServerST.java b/src/test/java/io/odh/test/e2e/standard/PipelineServerST.java
index eb1a71f3..338298a1 100644
--- a/src/test/java/io/odh/test/e2e/standard/PipelineServerST.java
+++ b/src/test/java/io/odh/test/e2e/standard/PipelineServerST.java
@@ -21,9 +21,8 @@
import io.fabric8.openshift.client.OpenShiftClient;
import io.odh.test.Environment;
import io.odh.test.OdhAnnotationsLabels;
-import io.odh.test.framework.manager.ResourceManager;
+import io.odh.test.TestUtils;
import io.odh.test.platform.KFPv1Client;
-import io.odh.test.platform.KubeUtils;
import io.odh.test.utils.DscUtils;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
import io.opendatahub.datasciencepipelinesapplications.v1alpha1.DataSciencePipelinesApplication;
@@ -37,6 +36,7 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.hamcrest.Matchers;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
@@ -70,8 +70,8 @@ public class PipelineServerST extends StandardAbstract {
private static final String DS_PROJECT_NAME = "test-pipelines";
- private final ResourceManager resourceManager = ResourceManager.getInstance();
- private final KubernetesClient client = ResourceManager.getKubeClient().getClient();
+ private final KubeResourceManager resourceManager = KubeResourceManager.getInstance();
+ private final KubernetesClient client = KubeResourceManager.getKubeClient().getClient();
@BeforeAll
void deployDataScienceCluster() {
@@ -85,8 +85,8 @@ void deployDataScienceCluster() {
// Create DSC
DataScienceCluster dsc = DscUtils.getBasicDSC(DS_PROJECT_NAME);
- ResourceManager.getInstance().createResourceWithWait(dsci);
- ResourceManager.getInstance().createResourceWithWait(dsc);
+ KubeResourceManager.getInstance().createResourceWithWait(dsci);
+ KubeResourceManager.getInstance().createResourceWithWait(dsc);
}
@Issue("RHODS-5133")
@@ -129,7 +129,7 @@ void testUserCanCreateRunAndDeleteADSPipelineFromDSProject() throws IOException
.addToAnnotations(OdhAnnotationsLabels.ANNO_SERVICE_MESH, "false")
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithWait(ns);
+ KubeResourceManager.getInstance().createResourceWithWait(ns);
// create minio secret
Secret secret = new SecretBuilder()
@@ -143,7 +143,7 @@ void testUserCanCreateRunAndDeleteADSPipelineFromDSProject() throws IOException
.addToStringData("AWS_SECRET_ACCESS_KEY", "gimmeAccessPlz")
.withType("Opaque")
.build();
- ResourceManager.getInstance().createResourceWithWait(secret);
+ KubeResourceManager.getInstance().createResourceWithWait(secret);
// configure pipeline server (with minio, not AWS bucket)
DataSciencePipelinesApplication dspa = new DataSciencePipelinesApplicationBuilder()
@@ -203,11 +203,11 @@ void testUserCanCreateRunAndDeleteADSPipelineFromDSProject() throws IOException
.endScheduledWorkflow()
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(dspa);
+ KubeResourceManager.getInstance().createResourceWithWait(dspa);
// wait for pipeline api server to come up
Resource endpoints = client.endpoints().inNamespace(prjTitle).withName("ds-pipeline-pipelines-definition");
- KubeUtils.waitForEndpoints("pipelines", endpoints);
+ TestUtils.waitForEndpoints("pipelines", endpoints);
// connect to the api server we just created, route not available unless I enable oauth
Resource route = ocClient.routes()
diff --git a/src/test/java/io/odh/test/e2e/standard/PipelineV2ServerST.java b/src/test/java/io/odh/test/e2e/standard/PipelineV2ServerST.java
index 70ce1afa..06e69fa8 100644
--- a/src/test/java/io/odh/test/e2e/standard/PipelineV2ServerST.java
+++ b/src/test/java/io/odh/test/e2e/standard/PipelineV2ServerST.java
@@ -21,9 +21,8 @@
import io.fabric8.openshift.client.OpenShiftClient;
import io.odh.test.Environment;
import io.odh.test.OdhAnnotationsLabels;
-import io.odh.test.framework.manager.ResourceManager;
+import io.odh.test.TestUtils;
import io.odh.test.platform.KFPv2Client;
-import io.odh.test.platform.KubeUtils;
import io.odh.test.utils.DscUtils;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
import io.opendatahub.datasciencecluster.v1.DataScienceClusterBuilder;
@@ -56,6 +55,7 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.hamcrest.Matchers;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
@@ -90,7 +90,7 @@ public class PipelineV2ServerST extends StandardAbstract {
private static final String DS_PROJECT_NAME = "test-pipelines";
- private final KubernetesClient client = ResourceManager.getKubeClient().getClient();
+ private final KubernetesClient client = KubeResourceManager.getKubeClient().getClient();
@BeforeAll
void deployDataScienceCluster() {
@@ -154,8 +154,8 @@ void deployDataScienceCluster() {
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(dsci);
- ResourceManager.getInstance().createResourceWithWait(dsc);
+ KubeResourceManager.getInstance().createResourceWithWait(dsci);
+ KubeResourceManager.getInstance().createResourceWithWait(dsc);
}
/// ODS-2206 - Verify user can create and run a data science pipeline in DS Project
@@ -198,7 +198,7 @@ void testUserCanOperateDSv2PipelineFromDSProject() throws IOException {
.addToAnnotations(OdhAnnotationsLabels.ANNO_SERVICE_MESH, "false")
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithWait(ns);
+ KubeResourceManager.getInstance().createResourceWithWait(ns);
Allure.step("Create Minio secret");
Secret secret = new SecretBuilder()
@@ -212,7 +212,7 @@ void testUserCanOperateDSv2PipelineFromDSProject() throws IOException {
.addToStringData("AWS_SECRET_ACCESS_KEY", "gimmeAccessPlz")
.withType("Opaque")
.build();
- ResourceManager.getInstance().createResourceWithWait(secret);
+ KubeResourceManager.getInstance().createResourceWithWait(secret);
Allure.step("Create DataSciencePipelinesApplication instance with build-in Minio enabled");
DataSciencePipelinesApplication dspa = new DataSciencePipelinesApplicationBuilder()
@@ -277,12 +277,12 @@ void testUserCanOperateDSv2PipelineFromDSProject() throws IOException {
.endScheduledWorkflow()
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithWait(dspa);
+ KubeResourceManager.getInstance().createResourceWithWait(dspa);
});
Allure.step("Wait for Pipeline API server to come up");
Resource endpoints = client.endpoints().inNamespace(prjTitle).withName("ds-pipeline-pipelines-definition");
- KubeUtils.waitForEndpoints("pipelines", endpoints);
+ TestUtils.waitForEndpoints("pipelines", endpoints);
Allure.step("Connect to the API server");
Resource route = ocClient.routes()
diff --git a/src/test/java/io/odh/test/e2e/standard/StandardAbstract.java b/src/test/java/io/odh/test/e2e/standard/StandardAbstract.java
index a9f85349..dcaabec4 100644
--- a/src/test/java/io/odh/test/e2e/standard/StandardAbstract.java
+++ b/src/test/java/io/odh/test/e2e/standard/StandardAbstract.java
@@ -7,14 +7,12 @@
import io.odh.test.Environment;
import io.odh.test.TestSuite;
import io.odh.test.e2e.Abstract;
-import io.odh.test.framework.listeners.OdhResourceCleaner;
-import io.odh.test.framework.listeners.ResourceManagerDeleteHandler;
import io.odh.test.install.BundleInstall;
import io.odh.test.install.InstallTypes;
import io.odh.test.install.OlmInstall;
+import io.skodjob.testframe.annotations.ResourceManager;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Tag;
-import org.junit.jupiter.api.extension.ExtendWith;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -24,8 +22,7 @@
import static org.junit.jupiter.api.Assertions.fail;
@Tag(TestSuite.STANDARD)
-@ExtendWith(OdhResourceCleaner.class)
-@ExtendWith(ResourceManagerDeleteHandler.class)
+@ResourceManager
public abstract class StandardAbstract extends Abstract {
private static final Logger LOGGER = LoggerFactory.getLogger(StandardAbstract.class);
diff --git a/src/test/java/io/odh/test/e2e/standard/UninstallST.java b/src/test/java/io/odh/test/e2e/standard/UninstallST.java
index 2d9d036a..e22a996f 100644
--- a/src/test/java/io/odh/test/e2e/standard/UninstallST.java
+++ b/src/test/java/io/odh/test/e2e/standard/UninstallST.java
@@ -10,8 +10,6 @@
import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
import io.odh.test.Environment;
import io.odh.test.OdhConstants;
-import io.odh.test.TestUtils;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.utils.DscUtils;
import io.odh.test.utils.NamespaceUtils;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
@@ -21,6 +19,8 @@
import io.skodjob.annotations.Step;
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.wait.Wait;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
@@ -73,7 +73,7 @@ public class UninstallST extends StandardAbstract {
)
@Test
void testUninstallSimpleScenario() {
- if (ResourceManager.getKubeCmdClient().namespace(OdhConstants.OLM_OPERATOR_NAMESPACE).list(
+ if (KubeResourceManager.getKubeCmdClient().namespace(OdhConstants.OLM_OPERATOR_NAMESPACE).list(
"configmap").contains(DELETE_CONFIG_MAP_NAME)) {
Assertions.fail(
String.format("The ConfigMap '%s' is present on the cluster before the uninstall test started!",
@@ -87,19 +87,19 @@ void testUninstallSimpleScenario() {
.withLabels(Map.ofEntries(Map.entry(DELETE_ANNOTATION, "true")))
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithWait(cm);
+ KubeResourceManager.getInstance().createResourceWithWait(cm);
// Now the product should start to uninstall, let's wait a bit and check the result.
- TestUtils.waitFor(String.format("the '%s' namespace to be removed as operator is being uninstalled",
+ Wait.until(String.format("the '%s' namespace to be removed as operator is being uninstalled",
OdhConstants.CONTROLLERS_NAMESPACE), 2000, 120_000,
- () -> !ResourceManager.getKubeClient().namespaceExists(OdhConstants.CONTROLLERS_NAMESPACE));
+ () -> !KubeResourceManager.getKubeClient().namespaceExists(OdhConstants.CONTROLLERS_NAMESPACE));
// Operator itself should delete the CSV, Subscription and InstallPlan
- Assertions.assertTrue(ResourceManager.getKubeCmdClient().namespace(OdhConstants.OLM_OPERATOR_NAMESPACE).list(
+ Assertions.assertTrue(KubeResourceManager.getKubeCmdClient().namespace(OdhConstants.OLM_OPERATOR_NAMESPACE).list(
"subscriptions").isEmpty(), "The operator Subscription is still present!");
- Assertions.assertTrue(ResourceManager.getKubeCmdClient().namespace(OdhConstants.OLM_OPERATOR_NAMESPACE).list(
+ Assertions.assertTrue(KubeResourceManager.getKubeCmdClient().namespace(OdhConstants.OLM_OPERATOR_NAMESPACE).list(
"installplan").isEmpty(), "The operator InstallPlan is still present!");
- Assertions.assertFalse(ResourceManager.getKubeCmdClient().namespace(OdhConstants.OLM_OPERATOR_NAMESPACE).list(
+ Assertions.assertFalse(KubeResourceManager.getKubeCmdClient().namespace(OdhConstants.OLM_OPERATOR_NAMESPACE).list(
"csv").stream().anyMatch(s -> s.toString().contains(OdhConstants.OLM_OPERATOR_NAME)),
"The operator CSV is still present!");
@@ -117,14 +117,14 @@ void testUninstallSimpleScenario() {
// as it is included in `openshift-operators` which is common namespaces for multiple other operators.
} else {
// Check that all other expected resources have been deleted
- Assertions.assertFalse(ResourceManager.getKubeClient().namespaceExists(OdhConstants.MONITORING_NAMESPACE),
+ Assertions.assertFalse(KubeResourceManager.getKubeClient().namespaceExists(OdhConstants.MONITORING_NAMESPACE),
String.format("Namespace '%s' hasn't been removed by the operator uninstall operation!",
OdhConstants.MONITORING_NAMESPACE));
- Assertions.assertFalse(ResourceManager.getKubeClient().namespaceExists(OdhConstants.NOTEBOOKS_NAMESPACE),
+ Assertions.assertFalse(KubeResourceManager.getKubeClient().namespaceExists(OdhConstants.NOTEBOOKS_NAMESPACE),
String.format("Namespace '%s' hasn't been removed by the operator uninstall operation!",
OdhConstants.NOTEBOOKS_NAMESPACE));
- ResourceManager.getKubeCmdClient().deleteNamespace(OdhConstants.OLM_OPERATOR_NAMESPACE);
+ KubeResourceManager.getKubeCmdClient().deleteNamespace(OdhConstants.OLM_OPERATOR_NAMESPACE);
NamespaceUtils.waitForNamespaceDeletion(OdhConstants.OLM_OPERATOR_NAMESPACE);
}
}
@@ -141,8 +141,8 @@ void deployDataScienceCluster() {
DataScienceCluster dsc = DscUtils.getBasicDSC(DS_PROJECT_NAME);
// Deploy DSCI,DSC
- ResourceManager.getInstance().createResourceWithWait(dsci);
- ResourceManager.getInstance().createResourceWithWait(dsc);
+ KubeResourceManager.getInstance().createResourceWithWait(dsci);
+ KubeResourceManager.getInstance().createResourceWithWait(dsc);
}
static boolean isOdhTested() {
diff --git a/src/test/java/io/odh/test/e2e/upgrade/BundleUpgradeST.java b/src/test/java/io/odh/test/e2e/upgrade/BundleUpgradeST.java
index db108e3a..f0b489b2 100644
--- a/src/test/java/io/odh/test/e2e/upgrade/BundleUpgradeST.java
+++ b/src/test/java/io/odh/test/e2e/upgrade/BundleUpgradeST.java
@@ -9,7 +9,6 @@
import io.odh.test.Environment;
import io.odh.test.OdhConstants;
import io.odh.test.TestSuite;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.install.BundleInstall;
import io.odh.test.utils.DeploymentUtils;
import io.odh.test.utils.PodUtils;
@@ -20,6 +19,7 @@
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
import io.skodjob.annotations.TestTag;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
@@ -104,7 +104,8 @@ void testUpgradeBundle() throws IOException {
DeploymentUtils.waitTillDepHasRolled(baseBundle.getNamespace(), baseBundle.getDeploymentName(), operatorSnapshot);
- LabelSelector labelSelector = ResourceManager.getKubeClient().getDeployment(OdhConstants.CONTROLLERS_NAMESPACE, OdhConstants.DASHBOARD_CONTROLLER).getSpec().getSelector();
+ LabelSelector labelSelector = KubeResourceManager.getKubeClient().getClient().apps().deployments()
+ .inNamespace(OdhConstants.CONTROLLERS_NAMESPACE).withName(OdhConstants.DASHBOARD_CONTROLLER).get().getSpec().getSelector();
PodUtils.verifyThatPodsAreStable(OdhConstants.CONTROLLERS_NAMESPACE, labelSelector);
Date operatorLogCheckTimestamp = new Date();
diff --git a/src/test/java/io/odh/test/e2e/upgrade/OlmUpgradeST.java b/src/test/java/io/odh/test/e2e/upgrade/OlmUpgradeST.java
index e04f7e64..341b451e 100644
--- a/src/test/java/io/odh/test/e2e/upgrade/OlmUpgradeST.java
+++ b/src/test/java/io/odh/test/e2e/upgrade/OlmUpgradeST.java
@@ -10,9 +10,8 @@
import io.odh.test.Environment;
import io.odh.test.OdhConstants;
import io.odh.test.TestSuite;
-import io.odh.test.framework.manager.ResourceManager;
+import io.odh.test.TestUtils;
import io.odh.test.install.OlmInstall;
-import io.odh.test.platform.KubeUtils;
import io.odh.test.utils.DeploymentUtils;
import io.odh.test.utils.PodUtils;
import io.odh.test.utils.UpgradeUtils;
@@ -22,6 +21,8 @@
import io.skodjob.annotations.SuiteDoc;
import io.skodjob.annotations.TestDoc;
import io.skodjob.annotations.TestTag;
+import io.skodjob.testframe.resources.KubeResourceManager;
+import io.skodjob.testframe.utils.KubeUtils;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
@@ -78,9 +79,9 @@ void testUpgradeOlm() throws IOException, InterruptedException {
olmInstall.createManual();
// Approve install plan created for older version
- KubeUtils.waitForInstallPlan(olmInstall.getNamespace(), olmInstall.getOperatorName() + "." + startingVersion);
- InstallPlan ip = ResourceManager.getKubeClient().getNonApprovedInstallPlan(olmInstall.getNamespace(), olmInstall.getOperatorName() + "." + startingVersion);
- ResourceManager.getKubeClient().approveInstallPlan(olmInstall.getNamespace(), ip.getMetadata().getName());
+ TestUtils.waitForInstallPlan(olmInstall.getNamespace(), olmInstall.getOperatorName() + "." + startingVersion);
+ InstallPlan ip = KubeUtils.getNonApprovedInstallPlan(olmInstall.getNamespace(), olmInstall.getOperatorName() + "." + startingVersion);
+ KubeUtils.approveInstallPlan(olmInstall.getNamespace(), ip.getMetadata().getName());
// Wait for old version readiness
DeploymentUtils.waitForDeploymentReady(olmInstall.getNamespace(), olmInstall.getDeploymentName());
@@ -99,14 +100,15 @@ void testUpgradeOlm() throws IOException, InterruptedException {
LOGGER.info("Upgrade to next available version in OLM catalog");
// Approve upgrade to newer version
- KubeUtils.waitForInstallPlan(olmInstall.getNamespace(), olmInstall.getCsvName());
- ip = ResourceManager.getKubeClient().getNonApprovedInstallPlan(olmInstall.getNamespace(), olmInstall.getCsvName());
- ResourceManager.getKubeClient().approveInstallPlan(olmInstall.getNamespace(), ip.getMetadata().getName());
+ TestUtils.waitForInstallPlan(olmInstall.getNamespace(), olmInstall.getCsvName());
+ ip = KubeUtils.getNonApprovedInstallPlan(olmInstall.getNamespace(), olmInstall.getCsvName());
+ KubeUtils.approveInstallPlan(olmInstall.getNamespace(), ip.getMetadata().getName());
// Wait for operator RU
DeploymentUtils.waitTillDepHasRolled(olmInstall.getNamespace(), olmInstall.getDeploymentName(), operatorSnapshot);
// Wait for pod stability for Dashboard
- LabelSelector labelSelector = ResourceManager.getKubeClient().getDeployment(OdhConstants.CONTROLLERS_NAMESPACE, OdhConstants.DASHBOARD_CONTROLLER).getSpec().getSelector();
+ LabelSelector labelSelector = KubeResourceManager.getKubeClient().getClient()
+ .apps().deployments().inNamespace(OdhConstants.CONTROLLERS_NAMESPACE).withName(OdhConstants.DASHBOARD_CONTROLLER).get().getSpec().getSelector();
PodUtils.verifyThatPodsAreStable(OdhConstants.CONTROLLERS_NAMESPACE, labelSelector);
Date operatorLogCheckTimestamp = new Date();
diff --git a/src/test/java/io/odh/test/e2e/upgrade/UpgradeAbstract.java b/src/test/java/io/odh/test/e2e/upgrade/UpgradeAbstract.java
index 9c28d9f0..acee52cc 100644
--- a/src/test/java/io/odh/test/e2e/upgrade/UpgradeAbstract.java
+++ b/src/test/java/io/odh/test/e2e/upgrade/UpgradeAbstract.java
@@ -12,9 +12,6 @@
import io.odh.test.OdhAnnotationsLabels;
import io.odh.test.TestSuite;
import io.odh.test.e2e.Abstract;
-import io.odh.test.framework.listeners.OdhResourceCleaner;
-import io.odh.test.framework.listeners.ResourceManagerDeleteHandler;
-import io.odh.test.framework.manager.ResourceManager;
import io.odh.test.framework.manager.resources.NotebookResource;
import io.odh.test.utils.DscUtils;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
@@ -37,16 +34,16 @@
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Workbenches;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.WorkbenchesBuilder;
import io.opendatahub.dscinitialization.v1.DSCInitialization;
+import io.skodjob.testframe.annotations.ResourceManager;
+import io.skodjob.testframe.resources.KubeResourceManager;
import org.junit.jupiter.api.Tag;
-import org.junit.jupiter.api.extension.ExtendWith;
import org.kubeflow.v1.Notebook;
import org.kubeflow.v1.NotebookBuilder;
import java.io.IOException;
@Tag(TestSuite.UPGRADE)
-@ExtendWith(OdhResourceCleaner.class)
-@ExtendWith(ResourceManagerDeleteHandler.class)
+@ResourceManager
public abstract class UpgradeAbstract extends Abstract {
protected void deployDsc(String name) {
@@ -88,8 +85,8 @@ protected void deployDsc(String name) {
.endSpec()
.build();
// Deploy DSC
- ResourceManager.getInstance().createResourceWithWait(dsci);
- ResourceManager.getInstance().createResourceWithWait(dsc);
+ KubeResourceManager.getInstance().createResourceWithWait(dsci);
+ KubeResourceManager.getInstance().createResourceWithWait(dsc);
}
public void deployNotebook(String namespace, String name) throws IOException {
Namespace ns = new NamespaceBuilder()
@@ -99,7 +96,7 @@ public void deployNotebook(String namespace, String name) throws IOException {
.addToAnnotations(OdhAnnotationsLabels.ANNO_SERVICE_MESH, "false")
.endMetadata()
.build();
- ResourceManager.getInstance().createResourceWithoutWait(ns);
+ KubeResourceManager.getInstance().createResourceWithoutWait(ns);
PersistentVolumeClaim pvc = new PersistentVolumeClaimBuilder()
.withNewMetadata()
@@ -115,11 +112,11 @@ public void deployNotebook(String namespace, String name) throws IOException {
.withVolumeMode("Filesystem")
.endSpec()
.build();
- ResourceManager.getInstance().createResourceWithoutWait(pvc);
+ KubeResourceManager.getInstance().createResourceWithoutWait(pvc);
String notebookImage = NotebookResource.getNotebookImage(NotebookResource.JUPYTER_MINIMAL_IMAGE, NotebookResource.JUPYTER_MINIMAL_2023_2_TAG);
Notebook notebook = new NotebookBuilder(NotebookResource.loadDefaultNotebook(namespace, name, notebookImage)).build();
- ResourceManager.getInstance().createResourceWithoutWait(notebook);
+ KubeResourceManager.getInstance().createResourceWithoutWait(notebook);
}
}
diff --git a/src/test/java/io/odh/test/unit/CsvUtilsTests.java b/src/test/java/io/odh/test/unit/CsvUtilsTests.java
index 2b4ac8a0..a2c97e3a 100644
--- a/src/test/java/io/odh/test/unit/CsvUtilsTests.java
+++ b/src/test/java/io/odh/test/unit/CsvUtilsTests.java
@@ -7,8 +7,8 @@
import io.odh.test.TestSuite;
import io.odh.test.framework.ExtensionContextParameterResolver;
-import io.odh.test.framework.listeners.TestVisualSeparator;
import io.odh.test.utils.CsvUtils;
+import io.skodjob.testframe.annotations.TestVisualSeparator;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -16,8 +16,9 @@
import org.junit.jupiter.params.provider.CsvSource;
@Tag(TestSuite.UNIT)
+@TestVisualSeparator
@ExtendWith(ExtensionContextParameterResolver.class)
-public class CsvUtilsTests implements TestVisualSeparator {
+public class CsvUtilsTests {
@ParameterizedTest(name = "[{index}] Version.fromString({0}) == {1}")
@CsvSource({
"2, 2.0.0",
diff --git a/src/test/java/io/odh/test/unit/MultipartFormDataBodyPublisherTests.java b/src/test/java/io/odh/test/unit/MultipartFormDataBodyPublisherTests.java
index bd41a7d3..5c68d4be 100644
--- a/src/test/java/io/odh/test/unit/MultipartFormDataBodyPublisherTests.java
+++ b/src/test/java/io/odh/test/unit/MultipartFormDataBodyPublisherTests.java
@@ -8,8 +8,8 @@
import com.sun.net.httpserver.HttpServer;
import io.odh.test.TestSuite;
import io.odh.test.framework.ExtensionContextParameterResolver;
-import io.odh.test.framework.listeners.TestVisualSeparator;
import io.odh.test.platform.httpClient.MultipartFormDataBodyPublisher;
+import io.skodjob.testframe.annotations.TestVisualSeparator;
import lombok.SneakyThrows;
import org.hamcrest.Matchers;
import org.junit.jupiter.api.Assertions;
@@ -37,7 +37,8 @@
@Tag(TestSuite.UNIT)
@ExtendWith(ExtensionContextParameterResolver.class)
-public class MultipartFormDataBodyPublisherTests implements TestVisualSeparator {
+@TestVisualSeparator
+public class MultipartFormDataBodyPublisherTests {
@Test
public void testStringPart() throws IOException {
MultipartFormDataBodyPublisher publisher = new MultipartFormDataBodyPublisher()
diff --git a/src/test/java/io/odh/test/unit/UnitTests.java b/src/test/java/io/odh/test/unit/UnitTests.java
index ad1719cf..70520001 100644
--- a/src/test/java/io/odh/test/unit/UnitTests.java
+++ b/src/test/java/io/odh/test/unit/UnitTests.java
@@ -12,7 +12,6 @@
import io.fabric8.kubernetes.client.server.mock.KubernetesMockServer;
import io.odh.test.TestSuite;
import io.odh.test.framework.ExtensionContextParameterResolver;
-import io.odh.test.framework.listeners.TestVisualSeparator;
import io.opendatahub.datasciencecluster.v1.DataScienceCluster;
import io.opendatahub.datasciencecluster.v1.DataScienceClusterBuilder;
import io.opendatahub.datasciencecluster.v1.datascienceclusterspec.components.Codeflare;
@@ -27,6 +26,7 @@
import io.opendatahub.dscinitialization.v1.dscinitializationspec.servicemesh.ControlPlane;
import io.opendatahub.v1alpha.OdhDashboardConfig;
import io.opendatahub.v1alpha.OdhDashboardConfigBuilder;
+import io.skodjob.testframe.annotations.TestVisualSeparator;
import org.junit.jupiter.api.Tag;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@@ -37,8 +37,9 @@
@Tag(TestSuite.UNIT)
@ExtendWith(ExtensionContextParameterResolver.class)
+@TestVisualSeparator
@EnableKubernetesMockClient(crud = true)
-public class UnitTests implements TestVisualSeparator {
+public class UnitTests {
private KubernetesClient kubernetesClient;