diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..00219ef --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +csi_pb2.py +csi_pb2_grpc.py diff --git a/.tool-versions b/.tool-versions new file mode 100644 index 0000000..85cfd4b --- /dev/null +++ b/.tool-versions @@ -0,0 +1 @@ +python 3.10.5 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..d653c02 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,12 @@ +FROM python:3.10-slim-bullseye + +RUN apt-get update -yq && \ + apt-get install -y --no-install-recommends glusterfs-client + +COPY requirements.txt / +RUN pip install --upgrade pip +RUN pip install -r /requirements.txt +RUN pip3 install -r /requirements.txt +COPY *.py / +RUN mkdir /csi +CMD python3 -u /main.py diff --git a/README.md b/README.md new file mode 100644 index 0000000..d98a68a --- /dev/null +++ b/README.md @@ -0,0 +1,6 @@ +# Intro + +This is a super alpha implementation that supports the CSI spec for directories on a gluster volume. +Each top level directory is treated as a volume. + +Currently tested on hashicorp nomad. diff --git a/__pycache__/csi_pb2.cpython-310.pyc b/__pycache__/csi_pb2.cpython-310.pyc new file mode 100644 index 0000000..e38a390 Binary files /dev/null and b/__pycache__/csi_pb2.cpython-310.pyc differ diff --git a/__pycache__/csi_pb2_grpc.cpython-310.pyc b/__pycache__/csi_pb2_grpc.cpython-310.pyc new file mode 100644 index 0000000..4f56426 Binary files /dev/null and b/__pycache__/csi_pb2_grpc.cpython-310.pyc differ diff --git a/bin/build.sh b/bin/build.sh new file mode 100755 index 0000000..b2565c3 --- /dev/null +++ b/bin/build.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +docker build -t ahelberg/gluster-dir-csi . diff --git a/bin/gen-protos.sh b/bin/gen-protos.sh new file mode 100755 index 0000000..b484485 --- /dev/null +++ b/bin/gen-protos.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +python -m grpc_tools.protoc -Icsi-spec/ --python_out=. --grpc_python_out=. csi-spec/csi.proto diff --git a/bin/push.sh b/bin/push.sh new file mode 100755 index 0000000..516d449 --- /dev/null +++ b/bin/push.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +docker push ahelberg/gluster-dir-csi diff --git a/gluster-dir-csi.nomad b/gluster-dir-csi.nomad new file mode 100644 index 0000000..1c00525 --- /dev/null +++ b/gluster-dir-csi.nomad @@ -0,0 +1,58 @@ +job "gluster-dir-csi" { + type = "system" + datacenters = ["dc1"] + + group "csi" { + + constraint { + operator = "distinct_hosts" + value = "true" + } + + # restart policy for failed portworx tasks + restart { + attempts = 3 + delay = "30s" + interval = "5m" + mode = "fail" + } + + # how to handle upgrades of portworx instances + update { + max_parallel = 1 + health_check = "checks" + min_healthy_time = "10s" + healthy_deadline = "9m" + auto_revert = true + canary = 0 + stagger = "30s" + } + + task "node" { + driver = "docker" + kill_timeout = "120s" # allow portworx 2 min to gracefully shut down + kill_signal = "SIGTERM" # use SIGTERM to shut down the nodes + + csi_plugin { + id = "gluster-dir" + type = "monolith" + mount_dir = "/csi" + } + + # container config + config { + #image = "alpine" + #command = "tail" + #args = ["-f", "/dev/zero"] + image = "ahelberg/gluster-dir-csi" + privileged = true + } + + # resource config + resources { + cpu = 1024 + memory = 500 + } + } + } +} diff --git a/main.py b/main.py new file mode 100644 index 0000000..aebdd3a --- /dev/null +++ b/main.py @@ -0,0 +1,126 @@ +from concurrent import futures + +import logging + +import socket +import os + +import grpc +import csi_pb2 +import csi_pb2_grpc + +import subprocess + +#csi_sock_full_path = "/Users/andre/Projects/opensource/gluster-dir-csi/csi.sock" +csi_sock_full_path = "/csi/csi.sock" +gluster_host = "10.0.114.218" +gluster_volume = "vol_main" + + +def unmount(path): + print("Unmounting", path) + return subprocess.run(["umount", path]) + +def mount(volume, path): + print("Mounting", volume, "at", path) + p = subprocess.run(["mount","-t", "glusterfs", "%s:/%s/%s" % (gluster_host, gluster_volume, volume), path]) + if p.returncode == 0: + return True + + # Might be a stale mount, unmount and try again + if p.returncode == 32: + return True + return p + + +class Identity(csi_pb2_grpc.Identity): + + def GetPluginInfo(self, request, context): + return csi_pb2.GetPluginInfoResponse(name='gluster-dir-csi',vendor_version="1") + + def Probe(self, request, context): + return csi_pb2.ProbeResponse() + + def GetPluginCapabilities(self, request, context): + return csi_pb2.GetPluginCapabilitiesResponse( + capabilities=[ + csi_pb2.PluginCapability( + service=csi_pb2.PluginCapability.Service(type=csi_pb2.PluginCapability.Service.Type.CONTROLLER_SERVICE) + ) + ]) + +class Controller(csi_pb2_grpc.Controller): + + def ControllerGetCapabilities(self, request, context): + return csi_pb2.ControllerGetCapabilitiesResponse(capabilities=[ + csi_pb2.ControllerServiceCapability(rpc = csi_pb2.ControllerServiceCapability.RPC(type=csi_pb2.ControllerServiceCapability.RPC.Type.CREATE_DELETE_VOLUME)), + ]) + + def CreateVolume(self, request, context): + name = request.name + print("CreateVolume", name) + p = subprocess.run(["mkdir", "-p", "/mnt/main/%s" % name]) + volume = csi_pb2.Volume(volume_id=name) + return csi_pb2.CreateVolumeResponse(volume=volume); + +class Node(csi_pb2_grpc.Node): + + def NodeGetInfo(self, request, context): + node_id = os.getenv("HOSTNAME") + return csi_pb2.NodeGetInfoResponse(node_id=node_id) + + def NodeGetCapabilities(self, request, context): + return csi_pb2.NodeGetCapabilitiesResponse(capabilities = [ + csi_pb2.NodeServiceCapability(rpc= csi_pb2.NodeServiceCapability.RPC(type = csi_pb2.NodeServiceCapability.RPC.Type.STAGE_UNSTAGE_VOLUME)) + ]); + + def NodePublishVolume(self,request,context): + volume_id = request.volume_id + path = request.target_path + print("Node Publish Volume", path) + p = subprocess.run(["mkdir", "-p", path], check=True) + res = mount(volume_id, path) + if res is True: + return csi_pb2.NodePublishVolumeResponse(); + print(res) + + def NodeUnpublishVolume(self,request,context): + path = request.target_path + print("NodeUnpublishVolume", path) + p = subprocess.run(["umount", path]) + return csi_pb2.NodeUnpublishVolumeResponse(); + + def NodeStageVolume(self, request, context): + volume_id = request.volume_id + path = request.staging_target_path + print("Node Stage Volume", path) + res = mount(volume_id, path) + if res is True: + return csi_pb2.NodeStageVolumeResponse() + print(res) + + def NodeUnstageVolume(self, request, context): + path = request.staging_target_path + print("NodeUnstageVolume", path) + p = subprocess.run(["umount", path]) + return csi_pb2.NodeUnstageVolumeResponse() + + +def serve(): + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + csi_pb2_grpc.add_IdentityServicer_to_server(Identity(), server) + csi_pb2_grpc.add_ControllerServicer_to_server(Controller(), server) + csi_pb2_grpc.add_NodeServicer_to_server(Node(), server) + + print("About to start listening on", csi_sock_full_path) + server.add_insecure_port(f'unix://%s' % csi_sock_full_path ) + server.start() + print("Waiting for termination") + server.wait_for_termination() + +if __name__ == '__main__': + p = subprocess.run(["mkdir", "-p", "/mnt/main"]) + p = subprocess.run(["mount", "-t", "glusterfs", "%s:/%s" % (gluster_host, gluster_volume), "/mnt/main"],check=True) + + logging.basicConfig() + serve() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..5c38c41 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,5 @@ +grpcio==1.47.0 +grpcio-tools==1.47.0 +protobuf==3.20.1 +googleapis-common-protos==1.53.0 +six==1.16.0 diff --git a/test_part1.sh b/test_part1.sh new file mode 100755 index 0000000..7abb078 --- /dev/null +++ b/test_part1.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +./build.sh && docker run --rm -ti --privileged -v "$PWD/docker-csi:/csi" ahelberg/gluster-dir-csi