Initial commit

This commit is contained in:
André Helberg 2022-07-21 12:21:17 +02:00
parent 95f91cd87d
commit 0be4bce96f
13 changed files with 221 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
csi_pb2.py
csi_pb2_grpc.py

1
.tool-versions Normal file
View File

@ -0,0 +1 @@
python 3.10.5

12
Dockerfile Normal file
View File

@ -0,0 +1,12 @@
FROM python:3.10-slim-bullseye
RUN apt-get update -yq && \
apt-get install -y --no-install-recommends glusterfs-client
COPY requirements.txt /
RUN pip install --upgrade pip
RUN pip install -r /requirements.txt
RUN pip3 install -r /requirements.txt
COPY *.py /
RUN mkdir /csi
CMD python3 -u /main.py

6
README.md Normal file
View File

@ -0,0 +1,6 @@
# Intro
This is a super alpha implementation that supports the CSI spec for directories on a gluster volume.
Each top level directory is treated as a volume.
Currently tested on hashicorp nomad.

Binary file not shown.

Binary file not shown.

3
bin/build.sh Executable file
View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
docker build -t ahelberg/gluster-dir-csi .

3
bin/gen-protos.sh Executable file
View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
python -m grpc_tools.protoc -Icsi-spec/ --python_out=. --grpc_python_out=. csi-spec/csi.proto

3
bin/push.sh Executable file
View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
docker push ahelberg/gluster-dir-csi

58
gluster-dir-csi.nomad Normal file
View File

@ -0,0 +1,58 @@
job "gluster-dir-csi" {
type = "system"
datacenters = ["dc1"]
group "csi" {
constraint {
operator = "distinct_hosts"
value = "true"
}
# restart policy for failed portworx tasks
restart {
attempts = 3
delay = "30s"
interval = "5m"
mode = "fail"
}
# how to handle upgrades of portworx instances
update {
max_parallel = 1
health_check = "checks"
min_healthy_time = "10s"
healthy_deadline = "9m"
auto_revert = true
canary = 0
stagger = "30s"
}
task "node" {
driver = "docker"
kill_timeout = "120s" # allow portworx 2 min to gracefully shut down
kill_signal = "SIGTERM" # use SIGTERM to shut down the nodes
csi_plugin {
id = "gluster-dir"
type = "monolith"
mount_dir = "/csi"
}
# container config
config {
#image = "alpine"
#command = "tail"
#args = ["-f", "/dev/zero"]
image = "ahelberg/gluster-dir-csi"
privileged = true
}
# resource config
resources {
cpu = 1024
memory = 500
}
}
}
}

126
main.py Normal file
View File

@ -0,0 +1,126 @@
from concurrent import futures
import logging
import socket
import os
import grpc
import csi_pb2
import csi_pb2_grpc
import subprocess
#csi_sock_full_path = "/Users/andre/Projects/opensource/gluster-dir-csi/csi.sock"
csi_sock_full_path = "/csi/csi.sock"
gluster_host = "10.0.114.218"
gluster_volume = "vol_main"
def unmount(path):
print("Unmounting", path)
return subprocess.run(["umount", path])
def mount(volume, path):
print("Mounting", volume, "at", path)
p = subprocess.run(["mount","-t", "glusterfs", "%s:/%s/%s" % (gluster_host, gluster_volume, volume), path])
if p.returncode == 0:
return True
# Might be a stale mount, unmount and try again
if p.returncode == 32:
return True
return p
class Identity(csi_pb2_grpc.Identity):
def GetPluginInfo(self, request, context):
return csi_pb2.GetPluginInfoResponse(name='gluster-dir-csi',vendor_version="1")
def Probe(self, request, context):
return csi_pb2.ProbeResponse()
def GetPluginCapabilities(self, request, context):
return csi_pb2.GetPluginCapabilitiesResponse(
capabilities=[
csi_pb2.PluginCapability(
service=csi_pb2.PluginCapability.Service(type=csi_pb2.PluginCapability.Service.Type.CONTROLLER_SERVICE)
)
])
class Controller(csi_pb2_grpc.Controller):
def ControllerGetCapabilities(self, request, context):
return csi_pb2.ControllerGetCapabilitiesResponse(capabilities=[
csi_pb2.ControllerServiceCapability(rpc = csi_pb2.ControllerServiceCapability.RPC(type=csi_pb2.ControllerServiceCapability.RPC.Type.CREATE_DELETE_VOLUME)),
])
def CreateVolume(self, request, context):
name = request.name
print("CreateVolume", name)
p = subprocess.run(["mkdir", "-p", "/mnt/main/%s" % name])
volume = csi_pb2.Volume(volume_id=name)
return csi_pb2.CreateVolumeResponse(volume=volume);
class Node(csi_pb2_grpc.Node):
def NodeGetInfo(self, request, context):
node_id = os.getenv("HOSTNAME")
return csi_pb2.NodeGetInfoResponse(node_id=node_id)
def NodeGetCapabilities(self, request, context):
return csi_pb2.NodeGetCapabilitiesResponse(capabilities = [
csi_pb2.NodeServiceCapability(rpc= csi_pb2.NodeServiceCapability.RPC(type = csi_pb2.NodeServiceCapability.RPC.Type.STAGE_UNSTAGE_VOLUME))
]);
def NodePublishVolume(self,request,context):
volume_id = request.volume_id
path = request.target_path
print("Node Publish Volume", path)
p = subprocess.run(["mkdir", "-p", path], check=True)
res = mount(volume_id, path)
if res is True:
return csi_pb2.NodePublishVolumeResponse();
print(res)
def NodeUnpublishVolume(self,request,context):
path = request.target_path
print("NodeUnpublishVolume", path)
p = subprocess.run(["umount", path])
return csi_pb2.NodeUnpublishVolumeResponse();
def NodeStageVolume(self, request, context):
volume_id = request.volume_id
path = request.staging_target_path
print("Node Stage Volume", path)
res = mount(volume_id, path)
if res is True:
return csi_pb2.NodeStageVolumeResponse()
print(res)
def NodeUnstageVolume(self, request, context):
path = request.staging_target_path
print("NodeUnstageVolume", path)
p = subprocess.run(["umount", path])
return csi_pb2.NodeUnstageVolumeResponse()
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
csi_pb2_grpc.add_IdentityServicer_to_server(Identity(), server)
csi_pb2_grpc.add_ControllerServicer_to_server(Controller(), server)
csi_pb2_grpc.add_NodeServicer_to_server(Node(), server)
print("About to start listening on", csi_sock_full_path)
server.add_insecure_port(f'unix://%s' % csi_sock_full_path )
server.start()
print("Waiting for termination")
server.wait_for_termination()
if __name__ == '__main__':
p = subprocess.run(["mkdir", "-p", "/mnt/main"])
p = subprocess.run(["mount", "-t", "glusterfs", "%s:/%s" % (gluster_host, gluster_volume), "/mnt/main"],check=True)
logging.basicConfig()
serve()

5
requirements.txt Normal file
View File

@ -0,0 +1,5 @@
grpcio==1.47.0
grpcio-tools==1.47.0
protobuf==3.20.1
googleapis-common-protos==1.53.0
six==1.16.0

2
test_part1.sh Executable file
View File

@ -0,0 +1,2 @@
#!/usr/bin/env bash
./build.sh && docker run --rm -ti --privileged -v "$PWD/docker-csi:/csi" ahelberg/gluster-dir-csi