A REST API than can dynamically manage and connect datasoures, including JDBC and Mongo

package com.example.datasource;

import com.mongodb.client.MongoClient;
import com.mongodb.client.MongoIterable;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import java.sql.Connection;
import java.sql.DatabaseMetaData;
import java.sql.ResultSet;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;

/**
 * Generic interface for managing data store clients/pools.
 * @param <R> Resource type (e.g. HikariDataSource, MongoClient)
 */
public interface DataStoreManager<R> {
    R getOrCreate(DataStoreConfig config);
    void removeAndClose(DataStoreConfig config);
}

/** Supported data store types. */
public enum DataStoreType {
    JDBC,
    MONGO
}

/**
 * Unified configuration for any data store.
 */
public class DataStoreConfig {
    private DataStoreType type;
    private String host;
    private int port;
    private String database;
    private String username;
    private String password;

    // JDBC-specific
    private Integer maxPoolSize;
    private Long leakDetectionThresholdMs;

    // Getters and setters omitted for brevity
    // ...
}

/**
 * JDBC-specific implementation of DataStoreManager.
 */
public class JdbcDataStoreManager implements DataStoreManager<HikariDataSource> {
    private final ConcurrentMap<String, HikariDataSource> pools = new ConcurrentHashMap<>();

    @Override
    public HikariDataSource getOrCreate(DataStoreConfig cfg) {
        String key = String.format("%s:%d/%s|%s", cfg.getHost(), cfg.getPort(), cfg.getDatabase(), cfg.getUsername());
        return pools.computeIfAbsent(key, k -> createPool(cfg));
    }

    @Override
    public void removeAndClose(DataStoreConfig cfg) {
        String key = String.format("%s:%d/%s|%s", cfg.getHost(), cfg.getPort(), cfg.getDatabase(), cfg.getUsername());
        HikariDataSource ds = pools.remove(key);
        if (ds != null) ds.close();
    }

    private HikariDataSource createPool(DataStoreConfig cfg) {
        HikariConfig hc = new HikariConfig();
        hc.setJdbcUrl(String.format("jdbc:oracle:thin:@%s:%d/%s", cfg.getHost(), cfg.getPort(), cfg.getDatabase()));
        hc.setUsername(cfg.getUsername());
        hc.setPassword(cfg.getPassword());
        if (cfg.getMaxPoolSize() != null) hc.setMaximumPoolSize(cfg.getMaxPoolSize());
        if (cfg.getLeakDetectionThresholdMs() != null) hc.setLeakDetectionThreshold(cfg.getLeakDetectionThresholdMs());
        return new HikariDataSource(hc);
    }
}

/**
 * MongoDB-specific implementation of DataStoreManager.
 */
public class MongoDataStoreManager implements DataStoreManager<MongoClient> {
    private final ConcurrentMap<String, MongoClient> clients = new ConcurrentHashMap<>();

    @Override
    public MongoClient getOrCreate(DataStoreConfig cfg) {
        String key = String.format("%s:%d/%s|%s", cfg.getHost(), cfg.getPort(), cfg.getDatabase(),
                cfg.getUsername() != null ? cfg.getUsername() : "");
        return clients.computeIfAbsent(key, k -> createClient(cfg));
    }

    @Override
    public void removeAndClose(DataStoreConfig cfg) {
        String key = String.format("%s:%d/%s|%s", cfg.getHost(), cfg.getPort(), cfg.getDatabase(),
                cfg.getUsername() != null ? cfg.getUsername() : "");
        MongoClient client = clients.remove(key);
        if (client != null) client.close();
    }

    private MongoClient createClient(DataStoreConfig cfg) {
        String uri;
        if (cfg.getUsername() != null && cfg.getPassword() != null) {
            uri = String.format("mongodb://%s:%s@%s:%d/%s", cfg.getUsername(), cfg.getPassword(), cfg.getHost(), cfg.getPort(), cfg.getDatabase());
        } else {
            uri = String.format("mongodb://%s:%d/%s", cfg.getHost(), cfg.getPort(), cfg.getDatabase());
        }
        return MongoClients.create(uri);
    }
}

/**
 * Factory for obtaining the correct DataStoreManager implementation.
 */
public class DataStoreManagerFactory {
    private static final JdbcDataStoreManager JDBC_MANAGER = new JdbcDataStoreManager();
    private static final MongoDataStoreManager MONGO_MANAGER = new MongoDataStoreManager();

    @SuppressWarnings("unchecked")
    public static <R> DataStoreManager<R> getManager(DataStoreConfig cfg) {
        switch (cfg.getType()) {
            case JDBC:  return (DataStoreManager<R>) JDBC_MANAGER;
            case MONGO: return (DataStoreManager<R>) MONGO_MANAGER;
            default:    throw new IllegalArgumentException("Unsupported type: " + cfg.getType());
        }
    }
}

/**
 * Unified controller fetching schema for both JDBC and MongoDB based on config.type.
 */
@RestController
@RequestMapping("/api/schema")
public class SchemaController {
    @PostMapping
    public ResponseEntity<List<String>> fetchSchema(@RequestBody DataStoreConfig cfg) {
        DataStoreType type = cfg.getType();
        DataStoreManager<?> mgr = DataStoreManagerFactory.getManager(cfg);
        List<String> names = new ArrayList<>();

        try {
            if (type == DataStoreType.JDBC) {
                HikariDataSource ds = (HikariDataSource) mgr.getOrCreate(cfg);
                try (Connection conn = ds.getConnection()) {
                    DatabaseMetaData meta = conn.getMetaData();
                    try (ResultSet rs = meta.getTables(null, cfg.getUsername().toUpperCase(), null, new String[]{"TABLE"})) {
                        while (rs.next()) names.add(rs.getString("TABLE_NAME"));
                    }
                }
            } else if (type == DataStoreType.MONGO) {
                MongoClient client = (MongoClient) mgr.getOrCreate(cfg);
                MongoIterable<String> cols = client.getDatabase(cfg.getDatabase()).listCollectionNames();
                for (String name : cols) names.add(name);
            } else {
                return ResponseEntity.badRequest().build();
            }
        } catch (Exception e) {
            return ResponseEntity.badRequest().build();
        }

        return ResponseEntity.ok(names);
    }
}
Read More

Q: Create a Spring Boot Rest Controller to accept an uploaded CSV file

import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;

@RestController
public class CsvController {

    @PostMapping("/upload")
    public List<String[]> handleFileUpload(@RequestParam("file") MultipartFile file) {
        List<String[]> rows = new ArrayList<>();

        if (file.isEmpty()) {
            return rows;
        }

        try (InputStreamReader reader = new InputStreamReader(file.getInputStream())) {
            CSVReader csvReader = new CSVReaderBuilder(reader).build();
            rows = csvReader.readAll();
        } catch (IOException e) {
            e.printStackTrace();
        }

        return rows;
    }
}

This controller uses Spring’s @PostMapping annotation to map the /upload endpoint to the handleFileUpload method. The method takes a MultipartFile object as a parameter, which represents the uploaded file.

The method checks if the file is empty and returns an empty list if it is. Otherwise, it reads the contents of the file using a CSVReader object from the opencsv library, which is capable of parsing CSV files. The resulting rows are returned as a list of string arrays.

Note that you’ll need to include the opencsv dependency in your pom.xml file for this code to work:

<dependency>
    <groupId>com.opencsv</groupId>
    <artifactId>opencsv</artifactId>
    <version>5.5.2</version>
</dependency>
Read More

Question

Use Locust to load test a REST service with following info:

  • need login first to get secCode, userCode and sessionId, share them to all users
  • post request to /v1/prt/getList with above info
  • post to /v1/prt/getAba with above info plus countryCode

Answer

Sure! Here’s an example script using Locust to load test a REST service with the given information:

from locust import HttpUser, task, between

class MyUser(HttpUser):
    wait_time = between(1, 5)

    def on_start(self):
        # login to get secCode, userCode, and sessionId
        response = self.client.post("/login", {"username": "my_username", "password": "my_password"})
        self.secCode = response.json()["secCode"]
        self.userCode = response.json()["userCode"]
        self.sessionId = response.json()["sessionId"]
        self.token = response.json()["token"]

    @task
    def get_list(self):
        # make a POST request to /v1/prt/getList with secCode, userCode, sessionId, and XBA-Token header
        headers = {"XBA-Token": self.token}
        data = {"secCode": self.secCode, "userCode": self.userCode, "sessionId": self.sessionId}
        self.client.post("/v1/prt/getList", headers=headers, data=data)

    @task
    def get_aba(self):
        # make a POST request to /v1/prt/getAba with secCode, userCode, sessionId, countryCode, and XBA-Token header
        headers = {"XBA-Token": self.token}
        data = {"secCode": self.secCode, "userCode": self.userCode, "sessionId": self.sessionId, "countryCode": "US"}
        self.client.post("/v1/prt/getAba", headers=headers, data=data)

This script defines a MyUser class that inherits from HttpUser and sets a wait time between 1 and 5 seconds. The on_start method logs in to the REST service and saves the secCode, userCode, and sessionId for future requests. The get_list task makes a POST request to /v1/prt/getList with the saved login information, and the get_aba task makes a POST request to /v1/prt/getAba with the saved login information and a hard-coded countryCode value of "US". The script also includes an XBA-Token header with the saved token value in each request. You can replace "my_username" and "my_password" with actual login credentials, and the "US" value for countryCode can be replaced with a valid country code for your REST service.

Read More

PlantUML activity diagram that has a lot of rules

PlantUML activity diagram that has a lot of rules

Question

Create a PlantUML activity diagram for the following flow:

  • Upload CSV file
  • Check if the user is already enabled. If yes, stop.
  • If no, check if user count(SSO id) > 1 based on bCode and userId. If yes, mark user has issue1 and stop.
  • If no, check if user count(SSO id) = 0 based on bCode and userId. If yes, mark user ready and stop.
  • If no, check if user count(SSO id) > 1 based on email. If yes, mark user has issue2 and stop.
  • If no, check if user count(bCode and userId) > 1 based on email. If yes, mark user has issue3 and stop.
  • If no, check if user count(bCode and userId) = 1 and count(SSO id) = 1 based on email. If yes, mark user already enabled and insert user to friendly and stop.
  • If no, stop

Answer

@startuml
start
:Upload CSV file;
if (Is user already enabled?) then (yes)
  stop
else (no)
  if (User count(SSO id) > 1 based on bCode and userId?) then (yes)
    :Mark user has issue1;
    stop
  else (no)
    if (User count(SSO id) = 0 based on bCode and userId?) then (yes)
      :Mark user ready;
      stop
    else (no)
      if (User count(SSO id) > 1 based on email?) then (yes)
        :Mark user has issue2;
        stop
      else (no)
        if (User count(bCode and userId) > 1 based on email?) then (yes)
          :Mark user has issue3;
          stop
        else (no)
          if (User count(bCode and userId) = 1 and count(SSO id) = 1 based on email?) then (yes)
            :Mark user already enabled;
            :Insert user to friendly;
            stop
          else (no)
            stop
          endif
        endif
      endif
    endif
  endif
endif
@enduml

The diagram starts with the “Upload CSV file” activity and then goes through a series of checks to determine the user’s status. If the user is already enabled, the flow stops. Otherwise, it checks for various conditions such as the number of users with the same SSO ID or email address. If any of these conditions are met, the user is marked with an issue and the flow stops. If all checks are passed, the user is marked as ready or already enabled, and in the latter case, the user is inserted into the friendly list. Finally, the flow ends with the “stop” activity.

Read More

PlantUML sequence diagram and component diagram

Question

Create a PlantUML sequence diagram with the following flow:

  • ABC UI -> ABC service -> SU service(TMX)
  • ABC service -> EFD
  • Option flow: Load Test -> ABC service
  • Option flow: e2e Test -> ABC UI

Answer

Here is the PlantUML sequence diagram with the flow:

@startuml
title ABC Sequence Diagram

actor User
User -> ABC_UI: Send Request
ABC_UI -> ABC_Service: Send Request
ABC_Service -> SU_Service: Send Request (TMX)
SU_Service -> ABC_Service: Send Response
ABC_Service -> EFD: Send Data
alt Load Test
    User -> ABC_Service: Send Load Test
    ABC_Service -> ABC_Service: Process Load Test
else e2e Test
    User -> ABC_UI: Send e2e Test
    ABC_UI -> ABC_Service: Send Request
    ABC_Service -> ABC_UI: Send Response
end

@enduml

Explanation:

  • The diagram starts with the User sending a request to the ABC_UI.
  • ABC_UI then sends the request to ABC_Service.
  • ABC_Service sends the request to SU_Service(TMX) and waits for a response.
  • SU_Service(TMX) processes the request and sends the response back to ABC_Service.
  • ABC_Service sends the data to EFD.
  • The diagram has two optional flows, one for Load Test and another for e2e Test:
    • If it is Load Test, the User sends a Load Test request to ABC_Service, and ABC_Service processes it.
    • If it is e2e Test, the User sends an e2e Test request to ABC_UI, and ABC_UI sends the request to ABC_Service, which sends a response back to ABC_UI.

Here is the PlantUML component diagram with the above information:

@startuml
title ABC Component Diagram

package "User Interface" {
  [ABC UI]
}

package "ABC Service" {
  [ABC Service]
}

package "SU Service" {
  [SU Service (TMX)]
}

package "External Service" {
  [EFD]
}

database "Data Storage" {
  [Database]
}

[ABC UI] --> [ABC Service]
[ABC Service] --> [SU Service (TMX)]
[ABC Service] --> [EFD]
[ABC Service] --> [Database]

@enduml

Explanation:

  • The component diagram shows the components involved in the ABC system, including the ABC UI, ABC Service, SU Service (TMX), EFD, and Database.
  • The ABC UI communicates with the ABC Service, which in turn communicates with the SU Service (TMX) and the EFD.
  • The ABC Service also interacts with the Database for data storage.
Read More

bash script to convert Openshift Secrets to hashicorp Vault

Create a bash script to convert Openshift Secrets my-secrets to Hashicorp Vault, with the following requirements:

  • Use a temp file to save all the secrets before putting them into Vault.
  • Add a flag to not run the Vault command.

Here’s an example bash script that can help you achieve the conversion of Openshift secrets to Hashicorp Vault. It uses a temp file to store the secrets before putting them into Vault, and has a flag to not run the Vault command if desired. Please note that this is just an example and you may need to modify it to suit your specific needs.

#!/bin/bash

# Set the name of the secrets to be converted
SECRETS_NAME=my-secrets

# Set the path to the temp file where the secrets will be stored
TEMP_FILE=/tmp/secrets.txt

# Set the path to the Vault server and token
VAULT_SERVER=http://localhost:8200
VAULT_TOKEN=mytoken

# Parse command-line arguments
RUN_VAULT_COMMAND=true
while getopts ":n:t:f:h" opt; do
  case ${opt} in
    n ) SECRETS_NAME=$OPTARG;;
    t ) VAULT_TOKEN=$OPTARG;;
    f ) TEMP_FILE=$OPTARG;;
    h ) echo "Usage: $0 [-n SECRETS_NAME] [-t VAULT_TOKEN] [-f TEMP_FILE] [-h] [--no-vault]
  -n SECRETS_NAME: The name of the secrets to be converted (default: my-secrets)
  -t VAULT_TOKEN: The token to use to access the Vault server (default: mytoken)
  -f TEMP_FILE: The path to the temp file where the secrets will be stored (default: /tmp/secrets.txt)
  -h: Show this help message
  --no-vault: Do not run the Vault command"
        exit 0;;
    - ) case "${OPTARG}" in
          no-vault) RUN_VAULT_COMMAND=false;;
          *) echo "Invalid argument: -${OPTARG}" >&2; exit 1;;
        esac;;
    \? ) echo "Invalid option: -$OPTARG" >&2; exit 1;;
  esac
done

# Get the secrets from Openshift and store them in the temp file
oc get secret $SECRETS_NAME -o json | jq -r '.data[] | @base64d' > $TEMP_FILE

# If the --no-vault flag was provided, exit without running the Vault command
if [ "$RUN_VAULT_COMMAND" = false ]; then
  echo "Secrets stored in $TEMP_FILE"
  exit 0
fi

# Loop through the secrets in the temp file and store them in Vault
while read line; do
  echo $line | vault kv put secret/$SECRETS_NAME -
done < $TEMP_FILE

# Clean up the temp file
rm $TEMP_FILE

echo "Secrets converted and stored in Vault"

To use this script, save it as a bash file (e.g., convert-secrets.sh), make it executable (chmod +x convert-secrets.sh), and run it with the desired flags (e.g., ./convert-secrets.sh -n my-secrets -t mytoken -f /tmp/secrets.txt --no-vault).

Read More