Module tests.test_rwx
Functions
def test_encrypted_rwx_volume(client,
core_api,
statefulset,
storage_class,
crypto_secret,
pvc,
make_deployment_with_pvc)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_encrypted_rwx_volume(client, core_api, statefulset, storage_class, crypto_secret, pvc, make_deployment_with_pvc): # NOQA """ Test creating encrypted rwx volume and use the secret in non longhorn-system namespace. 1. Create crypto secret in non longhorn-system namespace. 2. Create a storage class. 3. Create a deployment with a PVC and the pods should be able to running. """ namespace = 'default' # Create crypto secret secret = crypto_secret(namespace) create_crypto_secret(secret, namespace) # Create storage class storage_class['reclaimPolicy'] = 'Delete' storage_class['parameters']['csi.storage.k8s.io/provisioner-secret-name'] = 'longhorn-crypto' # NOQA storage_class['parameters']['csi.storage.k8s.io/provisioner-secret-namespace'] = namespace # NOQA storage_class['parameters']['csi.storage.k8s.io/node-publish-secret-name'] = 'longhorn-crypto' # NOQA storage_class['parameters']['csi.storage.k8s.io/node-publish-secret-namespace'] = namespace # NOQA storage_class['parameters']['csi.storage.k8s.io/node-stage-secret-name'] = 'longhorn-crypto' # NOQA storage_class['parameters']['csi.storage.k8s.io/node-stage-secret-namespace'] = namespace # NOQA storage_class['parameters']['encrypted'] = 'true' create_storage_class(storage_class) # Create deployment with PVC pvc_name = 'pvc-deployment-with-encrypted-rwx-volume' pvc['metadata']['name'] = pvc_name pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['accessModes'] = ['ReadWriteMany'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') deployment = make_deployment_with_pvc( 'pvc-deployment-with-encrypted-rwx-volume', pvc_name, replicas=3) apps_api = get_apps_api_client() create_and_wait_deployment(apps_api, deployment) # Clean up deployment and volume delete_and_wait_deployment(apps_api, deployment["metadata"]["name"]) delete_and_wait_pvc(core_api, pvc_name)Test creating encrypted rwx volume and use the secret in non longhorn-system namespace.
- Create crypto secret in non longhorn-system namespace.
- Create a storage class.
- Create a deployment with a PVC and the pods should be able to running.
def test_restore_rwo_volume_to_rwx(set_random_backupstore,
client,
core_api,
volume_name,
pvc,
csi_pv,
pod_make,
make_deployment_with_pvc)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_restore_rwo_volume_to_rwx(set_random_backupstore, client, core_api, volume_name, pvc, csi_pv, pod_make, make_deployment_with_pvc): # NOQA """ Test restoring a rwo to a rwx volume. 1. Create a volume with 'accessMode' rwo. 2. Create a PV and a PVC with access mode 'readwriteonce' and attach to the volume. 3. Create a pod and attach to the PVC. 4. Write some data into the pod and compute md5sum. 5. Take a backup of the volume. 6. Restore the backup with 'accessMode' rwx. 7. Create PV and PVC and attach to 2 pods. 8. Verify the data. """ data_path = "/data/test" pod_name, pv_name, pvc_name, md5sum = \ prepare_pod_with_data_in_mb(client, core_api, csi_pv, pvc, pod_make, volume_name, data_size_in_mb=DATA_SIZE_IN_MB_1, data_path=data_path) snap = create_snapshot(client, volume_name) volume = client.by_id_volume(volume_name) volume.snapshotBackup(name=snap.name) wait_for_backup_completion(client, volume_name, snap.name) bv, b1 = find_backup(client, volume_name, snap.name) restore_volume_name = 'restored-rwx-volume' restore_pv_name = restore_volume_name + "-pv" restore_pvc_name = restore_volume_name + "-pvc" client.create_volume(name=restore_volume_name, size=str(1 * Gi), numberOfReplicas=3, fromBackup=b1.url, accessMode='rwx', dataEngine=DATA_ENGINE) wait_for_volume_creation(client, restore_volume_name) restore_volume = wait_for_volume_detached(client, restore_volume_name) create_pv_for_volume(client, core_api, restore_volume, restore_pv_name) create_pvc_for_volume(client, core_api, restore_volume, restore_pvc_name) deployment = make_deployment_with_pvc( 'deployment-multi-pods-test', restore_pvc_name, replicas=2) apps_api = get_apps_api_client() create_and_wait_deployment(apps_api, deployment) deployment_label_selector = \ "name=" + deployment["metadata"]["labels"]["name"] deployment_pod_list = \ core_api.list_namespaced_pod(namespace="default", label_selector=deployment_label_selector) pod_name_1 = deployment_pod_list.items[0].metadata.name pod_name_2 = deployment_pod_list.items[1].metadata.name md5sum_pod1 = get_pod_data_md5sum(core_api, pod_name_1, data_path) md5sum_pod2 = get_pod_data_md5sum(core_api, pod_name_2, data_path) assert md5sum == md5sum_pod1 == md5sum_pod2Test restoring a rwo to a rwx volume.
- Create a volume with 'accessMode' rwo.
- Create a PV and a PVC with access mode 'readwriteonce' and attach to the volume.
- Create a pod and attach to the PVC.
- Write some data into the pod and compute md5sum.
- Take a backup of the volume.
- Restore the backup with 'accessMode' rwx.
- Create PV and PVC and attach to 2 pods.
- Verify the data.
-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_rwx_delete_share_manager_pod(client, core_api, statefulset, storage_class): # NOQA """ Test moving of Share manager pod from one node to another. 1. Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode is 'RWX'. 2. Wait for StatefulSet to come up healthy. 3. Write data and compute md5sum. 4. Delete the share manager pod. 5. The workload pod should still be functioning and the volume should remain attached. 6. Check the data md5sum in statefulSet. 7. Write more data to it and compute md5sum. 8. Check the data md5sum in share manager volume. """ create_storage_class(storage_class) statefulset_name = 'statefulset-delete-share-manager-pods-test' statefulset['metadata']['name'] = \ statefulset['spec']['selector']['matchLabels']['app'] = \ statefulset['spec']['serviceName'] = \ statefulset['spec']['template']['metadata']['labels']['app'] = \ statefulset_name statefulset['spec']['replicas'] = 1 statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] create_and_wait_statefulset(statefulset) pod_name = statefulset_name + '-' + '0' pvc_name = \ statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name'] \ + '-' + statefulset_name + '-0' pv_name = get_volume_name(core_api, pvc_name) share_manager_name = 'share-manager-' + pv_name test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data, filename='test1') delete_and_wait_pod(core_api, share_manager_name, namespace=LONGHORN_NAMESPACE) wait_for_pod_remount(core_api, pod_name) test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data_2, filename='test2') command1 = 'cat /export/' + pv_name + '/test1' share_manager_data_1 = exec_command_in_pod( core_api, command1, share_manager_name, LONGHORN_NAMESPACE) assert test_data == share_manager_data_1 command2 = 'cat /export/' + pv_name + '/test2' share_manager_data_2 = exec_command_in_pod( core_api, command2, share_manager_name, LONGHORN_NAMESPACE) assert test_data_2 == share_manager_data_2Test moving of Share manager pod from one node to another.
- Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode is 'RWX'.
- Wait for StatefulSet to come up healthy.
- Write data and compute md5sum.
- Delete the share manager pod.
- The workload pod should still be functioning and the volume should remain attached.
- Check the data md5sum in statefulSet.
- Write more data to it and compute md5sum.
- Check the data md5sum in share manager volume.
def test_rwx_deployment_with_multi_pods(client, core_api, pvc, make_deployment_with_pvc, storage_class)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_rwx_deployment_with_multi_pods(client, core_api, pvc, make_deployment_with_pvc, storage_class): # NOQA """ Test deployment of 2 pods with same PVC. 1. Create a volume with 'accessMode' rwx. 2. Create a PV and a PVC with access mode 'readwritemany' and attach to the volume. 3. Create a deployment of 2 pods with PVC created 4. Wait for 2 pods to come up healthy. 5. Write data in both pods and compute md5sum. 6. Check the data md5sum in the share manager pod. """ create_storage_class(storage_class) pvc_name = 'pvc-deployment-multi-pods-test' pvc['metadata']['name'] = pvc_name pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['accessModes'] = ['ReadWriteMany'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') deployment = make_deployment_with_pvc( 'deployment-multi-pods-test', pvc_name, replicas=2) apps_api = get_apps_api_client() create_and_wait_deployment(apps_api, deployment) pv_name = get_volume_name(core_api, pvc_name) share_manager_name = 'share-manager-' + pv_name deployment_label_selector = "name=" + \ deployment["metadata"]["labels"]["name"] deployment_pod_list = \ core_api.list_namespaced_pod(namespace="default", label_selector=deployment_label_selector) assert deployment_pod_list.items.__len__() == 2 pod_name_1 = deployment_pod_list.items[0].metadata.name test_data_1 = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name_1, test_data_1, filename='test1') pod_name_2 = deployment_pod_list.items[1].metadata.name command = 'cat /data/test1' pod_data_2 = exec_command_in_pod(core_api, command, pod_name_2, 'default') assert test_data_1 == pod_data_2 test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name_2, test_data_2, filename='test2') command = 'cat /export' + '/' + pv_name + '/' + 'test1' share_manager_data_1 = exec_command_in_pod( core_api, command, share_manager_name, LONGHORN_NAMESPACE) assert test_data_1 == share_manager_data_1 command = 'cat /export' + '/' + pv_name + '/' + 'test2' share_manager_data_2 = exec_command_in_pod( core_api, command, share_manager_name, LONGHORN_NAMESPACE) assert test_data_2 == share_manager_data_2Test deployment of 2 pods with same PVC.
- Create a volume with 'accessMode' rwx.
- Create a PV and a PVC with access mode 'readwritemany' and attach to the volume.
- Create a deployment of 2 pods with PVC created
- Wait for 2 pods to come up healthy.
- Write data in both pods and compute md5sum.
- Check the data md5sum in the share manager pod.
def test_rwx_multi_statefulset_with_same_pvc(client, core_api, pvc, statefulset, pod, storage_class)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_rwx_multi_statefulset_with_same_pvc(client, core_api, pvc, statefulset, pod, storage_class): # NOQA """ Test writing of data into a volume from multiple pods using same PVC 1. Create a volume with 'accessMode' rwx. 2. Create a PV and a PVC with access mode 'readwritemany' and attach to the volume. 3. Deploy a StatefulSet of 2 pods with the existing PVC above created. 4. Wait for both pods to come up. 5. Create a pod with the existing PVC above created. 6. Wait for StatefulSet to come up healthy. 7. Write data all three pods and compute md5sum. 8. Check the data md5sum in the share manager pod. """ create_storage_class(storage_class) pvc_name = 'pvc-multi-pods-test' statefulset_name = 'statefulset-rwx-same-pvc-test' pod_name = 'pod-rwx-same-pvc-test' pvc['metadata']['name'] = pvc_name pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['accessModes'] = ['ReadWriteMany'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') statefulset['metadata']['name'] = \ statefulset['spec']['selector']['matchLabels']['app'] = \ statefulset['spec']['serviceName'] = \ statefulset['spec']['template']['metadata']['labels']['app'] = \ statefulset_name statefulset['spec']['template']['spec']['volumes'] = \ [create_pvc_spec(pvc_name)] del statefulset['spec']['volumeClaimTemplates'] create_and_wait_statefulset(statefulset) pv_name = get_volume_name(core_api, pvc_name) share_manager_name = 'share-manager-' + pv_name test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, statefulset_name + '-0', test_data, filename='test1') assert test_data == read_volume_data(core_api, statefulset_name + '-1', filename='test1') pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [create_pvc_spec(pvc_name)] create_and_wait_pod(core_api, pod) assert test_data == read_volume_data(core_api, pod_name, filename='test1') test_data_2 = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data_2, filename='test2') command1 = 'cat /export' + '/' + pv_name + '/' + 'test1' command2 = 'cat /export' + '/' + pv_name + '/' + 'test2' assert test_data == exec_command_in_pod( core_api, command1, share_manager_name, LONGHORN_NAMESPACE) assert test_data_2 == exec_command_in_pod( core_api, command2, share_manager_name, LONGHORN_NAMESPACE)Test writing of data into a volume from multiple pods using same PVC
- Create a volume with 'accessMode' rwx.
- Create a PV and a PVC with access mode 'readwritemany' and attach to the volume.
- Deploy a StatefulSet of 2 pods with the existing PVC above created.
- Wait for both pods to come up.
- Create a pod with the existing PVC above created.
- Wait for StatefulSet to come up healthy.
- Write data all three pods and compute md5sum.
- Check the data md5sum in the share manager pod.
def test_rwx_offline_expansion(client, core_api, pvc, make_deployment_with_pvc, storage_class)-
Expand source code
@pytest.mark.v2_volume_test def test_rwx_offline_expansion(client, core_api, pvc, make_deployment_with_pvc, storage_class): # NOQA """ Related issue : https://github.com/longhorn/longhorn/issues/2181 https://github.com/longhorn/longhorn/issues/2484 This test case does not cover the UI test mentioned in the related issue's test steps." Given - Create a rwx pvc using longhorn storage class of size 1 Gi. And - Attach it to a workload (deployment) and write some data. - Scale down the workload, wait volume detached - Share manager pod will terminate automatically - Expand the volume to 4 Gi, wait exoansion complete When - Scale up workload Then - Share manager pod created - Volume healthy And - 1.5 Gi of data is successfully written to the expanded volume """ create_storage_class(storage_class) pvc_name = 'pvc-deployment-rwx-expand-test' pvc['metadata']['name'] = pvc_name pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['accessModes'] = ['ReadWriteMany'] pvc['spec']['resources']['requests']['storage'] = str(1 * Gi) core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') deployment = make_deployment_with_pvc( 'deployment-rwx-expand-test', pvc_name, replicas=1) apps_api = get_apps_api_client() create_and_wait_deployment(apps_api, deployment) pv_name = get_volume_name(core_api, pvc_name) deployment_label_selector = "name=" + \ deployment["metadata"]["labels"]["name"] deployment_pod_list = \ core_api.list_namespaced_pod(namespace="default", label_selector=deployment_label_selector) pod_name = deployment_pod_list.items[0].metadata.name test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, pod_name, test_data, filename='test') deployment['spec']['replicas'] = 0 apps_api.patch_namespaced_deployment(body=deployment, namespace='default', name=deployment["metadata"]["name"]) wait_for_volume_detached(client, pv_name) expand_and_wait_for_pvc(core_api, pvc, EXPANDED_VOLUME_SIZE*Gi) wait_for_volume_expansion(client, pv_name) wait_for_volume_detached(client, pv_name) deployment['spec']['replicas'] = 1 apps_api.patch_namespaced_deployment(body=deployment, namespace='default', name=deployment["metadata"]["name"]) wait_deployment_replica_ready(apps_api, deployment["metadata"]["name"], 1) wait_for_volume_healthy(client, pv_name) deployment_pod_list = \ core_api.list_namespaced_pod(namespace="default", label_selector=deployment_label_selector) pod_name = deployment_pod_list.items[0].metadata.name # check data written before expansion resp = read_volume_data(core_api, pod_name, "test") assert resp == test_data data_size_in_mb = 1536 write_pod_volume_random_data(core_api, pod_name, "/data/test2", data_size_in_mb) command = 'stat -c \"%s\" /data/test2' data_size_in_pod = exec_command_in_pod(core_api, command, pod_name, 'default') assert int(data_size_in_pod)/1024/1024 == data_size_in_mbRelated issue : https://github.com/longhorn/longhorn/issues/2181 https://github.com/longhorn/longhorn/issues/2484
This test case does not cover the UI test mentioned in the related issue's test steps."
Given - Create a rwx pvc using longhorn storage class of size 1 Gi.
And - Attach it to a workload (deployment) and write some data. - Scale down the workload, wait volume detached - Share manager pod will terminate automatically - Expand the volume to 4 Gi, wait exoansion complete
When - Scale up workload
Then - Share manager pod created - Volume healthy
And - 1.5 Gi of data is successfully written to the expanded volume
def test_rwx_online_expansion()-
Expand source code
@pytest.mark.skip(reason="TODO") def test_rwx_online_expansion(): # NOQA """ Related issue : https://github.com/longhorn/longhorn/issues/2181 https://github.com/longhorn/longhorn/issues/2484 This test case does not cover the UI test mentioned in the related issue's test steps." Given - Create a rwx pvc using longhorn storage class of size 1 Gi. And - Attach it to a workload (deployment) and write some data. When - Expand the volume to 5 Gi Then - Expansion of volume is completed. - Share manager pod not restart And - 1.5 Gi of data is successfully written to the expanded volume. """ passRelated issue : https://github.com/longhorn/longhorn/issues/2181 https://github.com/longhorn/longhorn/issues/2484
This test case does not cover the UI test mentioned in the related issue's test steps."
Given - Create a rwx pvc using longhorn storage class of size 1 Gi.
And - Attach it to a workload (deployment) and write some data.
When - Expand the volume to 5 Gi
Then - Expansion of volume is completed. - Share manager pod not restart
And - 1.5 Gi of data is successfully written to the expanded volume.
def test_rwx_parallel_writing(client, core_api, statefulset, pod, storage_class)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_rwx_parallel_writing(client, core_api, statefulset, pod, storage_class): # NOQA """ Test parallel writing of data 1. Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode is 'RWX'. 2. Wait for StatefulSet to come up healthy. 3. Create another statefulSet with same pvc which got created with first statefulSet. 4. Wait for statefulSet to come up healthy. 5. Start writing 800 MB data in first statefulSet `file 1` and start writing 500 MB data in second statefulSet `file 2`. 6. Compute md5sum. 7. Check the data md5sum in share manager pod volume """ create_storage_class(storage_class) statefulset_name = 'statefulset-rwx-parallel-writing-test' statefulset['metadata']['name'] = \ statefulset['spec']['selector']['matchLabels']['app'] = \ statefulset['spec']['serviceName'] = \ statefulset['spec']['template']['metadata']['labels']['app'] = \ statefulset_name statefulset['spec']['replicas'] = 1 statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] create_and_wait_statefulset(statefulset) statefulset_pod_name = statefulset_name + '-0' pvc_name = \ statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name'] \ + '-' + statefulset_name + '-0' pv_name = get_volume_name(core_api, pvc_name) share_manager_name = 'share-manager-' + pv_name pod_name = 'pod-parallel-write-test' pod['metadata']['name'] = pod_name pod['spec']['volumes'] = [create_pvc_spec(pvc_name)] create_and_wait_pod(core_api, pod) with Pool(2) as p: p.map(write_data_into_pod, [statefulset_pod_name + ':/data/test1', pod_name + ':/data/test2']) md5sum1 = get_pod_data_md5sum(core_api, statefulset_pod_name, 'data/test1') md5sum2 = get_pod_data_md5sum(core_api, pod_name, 'data/test2') command1 = 'md5sum /export' + '/' + pv_name + '/' + 'test1' + \ " | awk '{print $1}'" share_manager_data1 = exec_command_in_pod(core_api, command1, share_manager_name, LONGHORN_NAMESPACE) assert md5sum1 == share_manager_data1 command2 = 'md5sum /export' + '/' + pv_name + '/' + 'test2' + \ " | awk '{print $1}'" share_manager_data2 = exec_command_in_pod(core_api, command2, share_manager_name, LONGHORN_NAMESPACE) assert md5sum2 == share_manager_data2Test parallel writing of data
- Create a StatefulSet of 1 pod with VolumeClaimTemplate where accessMode is 'RWX'.
- Wait for StatefulSet to come up healthy.
- Create another statefulSet with same pvc which got created with first statefulSet.
- Wait for statefulSet to come up healthy.
- Start writing 800 MB data in first statefulSet
file 1and start writing 500 MB data in second statefulSetfile 2. - Compute md5sum.
- Check the data md5sum in share manager pod volume
def test_rwx_statefulset_scale_down_up(client, core_api, statefulset, storage_class)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_rwx_statefulset_scale_down_up(client, core_api, statefulset, storage_class): # NOQA """ Test Scaling up and down of pods attached to rwx volume. 1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode is 'RWX'. 2. Wait for StatefulSet pods to come up healthy. 3. Write data and compute md5sum in the both pods. 4. Delete the pods. 5. Wait for the pods to be terminated. 6. Verify the share manager pods are no longer available and the volume is detached. 6. Recreate the pods 7. Wait for new pods to come up. 8. Check the data md5sum in new pods. """ create_storage_class(storage_class) statefulset_name = 'statefulset-rwx-scale-down-up-test' share_manager_name = [] statefulset['metadata']['name'] = \ statefulset['spec']['selector']['matchLabels']['app'] = \ statefulset['spec']['serviceName'] = \ statefulset['spec']['template']['metadata']['labels']['app'] = \ statefulset_name statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] create_and_wait_statefulset(statefulset) for i in range(2): pvc_name = \ statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\ + '-' + statefulset_name + '-' + str(i) pv_name = get_volume_name(core_api, pvc_name) assert pv_name is not None share_manager_name.append('share-manager-' + pv_name) check_pod_existence(core_api, share_manager_name[i], namespace=LONGHORN_NAMESPACE) md5sum_pod = [] for i in range(2): test_pod_name = statefulset_name + '-' + str(i) test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, test_pod_name, test_data) md5sum_pod.append(test_data) statefulset['spec']['replicas'] = replicas = 0 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={ 'spec': { 'replicas': replicas } }) for i in range(DEFAULT_STATEFULSET_TIMEOUT): s_set = apps_api.read_namespaced_stateful_set( name=statefulset['metadata']['name'], namespace='default') # s_set is none if statefulset is not yet created if s_set is not None and s_set.status.ready_replicas == replicas or \ (replicas == 0 and not s_set.status.ready_replicas): break time.sleep(DEFAULT_STATEFULSET_INTERVAL) found = True for i in range(RETRY_COUNTS): pods = core_api.list_namespaced_pod(namespace=LONGHORN_NAMESPACE) pod_names = [] for item in pods.items: pod_names.append(item.metadata.name) if share_manager_name[0] not in pod_names and \ share_manager_name[1] not in pod_names: found = False break time.sleep(RETRY_INTERVAL) assert not found statefulset['spec']['replicas'] = replicas = 2 apps_api = get_apps_api_client() apps_api.patch_namespaced_stateful_set( name=statefulset_name, namespace='default', body={ 'spec': { 'replicas': replicas } }) wait_statefulset(statefulset) for i in range(2): test_pod_name = statefulset_name + '-' + str(i) command = 'cat /data/test' pod_data = exec_command_in_pod(core_api, command, test_pod_name, 'default') assert pod_data == md5sum_pod[i]Test Scaling up and down of pods attached to rwx volume.
- Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode is 'RWX'.
- Wait for StatefulSet pods to come up healthy.
- Write data and compute md5sum in the both pods.
- Delete the pods.
- Wait for the pods to be terminated.
- Verify the share manager pods are no longer available and the volume is detached.
- Recreate the pods
- Wait for new pods to come up.
- Check the data md5sum in new pods.
def test_rwx_volume_mount_options(client, core_api, storage_class, pvc, make_deployment_with_pvc)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_rwx_volume_mount_options(client, core_api, storage_class, pvc, make_deployment_with_pvc): # NOQA """ Test creating rwx volume with custom mount options non longhorn-system namespace. 1. Create a storage class with nfsOptions parameter. 2. Create a deployment with a PVC and the pods should be able to run. 3. Check the mounts on the deployment pods. """ # Create storage class storage_class['reclaimPolicy'] = 'Delete' storage_class['parameters']['nfsOptions'] = 'vers=4.2,soft,noresvport,timeo=600,retrans=4' # NOQA create_storage_class(storage_class) # Create deployment with PVC pvc_name = 'pvc-deployment-with-custom-mount-options-volume' pvc['metadata']['name'] = pvc_name pvc['spec']['storageClassName'] = storage_class['metadata']['name'] pvc['spec']['accessModes'] = ['ReadWriteMany'] core_api.create_namespaced_persistent_volume_claim( body=pvc, namespace='default') deployment = make_deployment_with_pvc( 'deployment-with-custom-mount-options-volume', pvc_name, replicas=2) apps_api = get_apps_api_client() create_and_wait_deployment(apps_api, deployment) # Check mount options on deployment pods deployment_label_selector = "name=" + \ deployment["metadata"]["labels"]["name"] deployment_pod_list = \ core_api.list_namespaced_pod(namespace="default", label_selector=deployment_label_selector) assert deployment_pod_list.items.__len__() == 2 pod_name_1 = deployment_pod_list.items[0].metadata.name pod_name_2 = deployment_pod_list.items[1].metadata.name command = "cat /proc/mounts | grep 'nfs'" mount_options_1 = exec_command_in_pod(core_api, command, pod_name_1, 'default') mount_options_2 = exec_command_in_pod(core_api, command, pod_name_2, 'default') # print(f'mount_options_1={mount_options_1}') # print(f'mount_options_2={mount_options_2}') assert "vers=4.2" in mount_options_1 assert "vers=4.2" in mount_options_2 # Clean up deployment and volume delete_and_wait_deployment(apps_api, deployment["metadata"]["name"]) delete_and_wait_pvc(core_api, pvc_name)Test creating rwx volume with custom mount options non longhorn-system namespace.
- Create a storage class with nfsOptions parameter.
- Create a deployment with a PVC and the pods should be able to run.
- Check the mounts on the deployment pods.
def test_rwx_with_statefulset_multi_pods(client, core_api, statefulset, storage_class)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_rwx_with_statefulset_multi_pods(client, core_api, statefulset, storage_class): # NOQA """ Test creation of share manager pod and rwx volumes from 2 pods. 1. Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode is 'RWX'. 2. Wait for both pods to come up running. 3. Verify there are two share manager pods created in the longhorn namespace and they have the directory with the PV name in the path `/export` 4. Write data in both pods and compute md5sum. 5. Compare md5sum of the data with the data written the share manager. """ create_storage_class(storage_class) statefulset_name = 'statefulset-rwx-multi-pods-test' share_manager_name = [] volumes_name = [] statefulset['metadata']['name'] = \ statefulset['spec']['selector']['matchLabels']['app'] = \ statefulset['spec']['serviceName'] = \ statefulset['spec']['template']['metadata']['labels']['app'] = \ statefulset_name statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] create_and_wait_statefulset(statefulset) for i in range(2): pvc_name = \ statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name']\ + '-' + statefulset_name + '-' + str(i) pv_name = get_volume_name(core_api, pvc_name) assert pv_name is not None volumes_name.append(pv_name) share_manager_name.append('share-manager-' + pv_name) check_pod_existence(core_api, share_manager_name[i], namespace=LONGHORN_NAMESPACE) command = "ls /export | grep -i 'pvc' | wc -l" assert exec_command_in_pod( core_api, command, share_manager_name[0], LONGHORN_NAMESPACE) == '1' assert exec_command_in_pod( core_api, command, share_manager_name[1], LONGHORN_NAMESPACE) == '1' md5sum_pod = [] for i in range(2): test_pod_name = statefulset_name + '-' + str(i) test_data = generate_random_data(VOLUME_RWTEST_SIZE) write_pod_volume_data(core_api, test_pod_name, test_data) md5sum_pod.append(test_data) for i in range(2): command = 'cat /export' + '/' + volumes_name[i] + '/' + 'test' pod_data = exec_command_in_pod( core_api, command, share_manager_name[i], LONGHORN_NAMESPACE) assert pod_data == md5sum_pod[i]Test creation of share manager pod and rwx volumes from 2 pods.
- Create a StatefulSet of 2 pods with VolumeClaimTemplate where accessMode is 'RWX'.
- Wait for both pods to come up running.
- Verify there are two share manager pods created in the longhorn
namespace and they have the directory with the PV name in the
path
/export - Write data in both pods and compute md5sum.
- Compare md5sum of the data with the data written the share manager.
def test_sm_pod_recreate_backoff(client, core_api, statefulset, storage_class)-
Expand source code
@pytest.mark.v2_volume_test # NOQA def test_sm_pod_recreate_backoff(client, core_api, statefulset, storage_class): # NOQA """ Related to https://github.com/longhorn/longhorn/issues/11939 Test that repeatedly recreating the share manager pod doesn't cause the longhorn-manager pod to fail because of nil pointer dereference. 1. Create a StatefulSet with RWX volume. 2. Get the share manager pod. 3. Find the longhorn-manager pod on the same node and store its restart count. 4. Repeatedly delete the share manager pod to simulate pod crashes and to cause longhorn-manager to recreate it. 5. Verify the longhorn-manager pod hasn't crashed by checking its restart count. """ create_storage_class(storage_class) statefulset_name = statefulset['metadata']['name'] statefulset['spec']['replicas'] = 1 statefulset['spec']['volumeClaimTemplates'][0]['spec']['storageClassName']\ = storage_class['metadata']['name'] statefulset['spec']['volumeClaimTemplates'][0]['spec']['accessModes'] \ = ['ReadWriteMany'] create_and_wait_statefulset(statefulset) pvc_name = \ statefulset['spec']['volumeClaimTemplates'][0]['metadata']['name'] \ + '-' + statefulset_name + '-0' pv_name = get_volume_name(core_api, pvc_name) share_manager_name = 'share-manager-' + pv_name share_manager_pod = core_api.read_namespaced_pod( name=share_manager_name, namespace=LONGHORN_NAMESPACE ) share_manager_node = share_manager_pod.spec.node_name longhorn_manager_pods = core_api.list_namespaced_pod( namespace=LONGHORN_NAMESPACE, label_selector="app=longhorn-manager" ) longhorn_manager_pod = None for lhm_pod in longhorn_manager_pods.items: if lhm_pod.spec.node_name == share_manager_node: longhorn_manager_pod = lhm_pod break assert longhorn_manager_pod is not None lhm_pod_uid = longhorn_manager_pod.metadata.uid lhm_pod_restart_count = \ longhorn_manager_pod.status.container_statuses[0].restart_count for i in range(10): for _ in range(RETRY_COUNTS): if check_pod_existence(core_api, share_manager_name, namespace=LONGHORN_NAMESPACE): break time.sleep(RETRY_INTERVAL) delete_and_wait_pod(core_api, share_manager_name, namespace=LONGHORN_NAMESPACE) longhorn_manager_pod = core_api.read_namespaced_pod( name=longhorn_manager_pod.metadata.name, namespace=LONGHORN_NAMESPACE ) assert longhorn_manager_pod.metadata.uid == lhm_pod_uid assert longhorn_manager_pod.status.container_statuses[0].restart_count \ == lhm_pod_restart_count assert longhorn_manager_pod.status.phase == "Running"Related to https://github.com/longhorn/longhorn/issues/11939
Test that repeatedly recreating the share manager pod doesn't cause the longhorn-manager pod to fail because of nil pointer dereference.
- Create a StatefulSet with RWX volume.
- Get the share manager pod.
- Find the longhorn-manager pod on the same node and store its restart count.
- Repeatedly delete the share manager pod to simulate pod crashes and to cause longhorn-manager to recreate it.
- Verify the longhorn-manager pod hasn't crashed by checking its restart count.
def write_data_into_pod(pod_name_and_data_path)-
Expand source code
def write_data_into_pod(pod_name_and_data_path): pod_info = pod_name_and_data_path.split(':') core_api = get_core_api_client() # NOQA write_pod_volume_random_data(core_api, pod_info[0], pod_info[1], DATA_SIZE_IN_MB_3)