diff --git a/CHANGELOG.md b/CHANGELOG.md index d364ea66d..f98996031 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,10 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) --> ## [Unreleased](https://github.com/cyverse/atmosphere/compare/v36-6...HEAD) - YYYY-MM-DD +### Changed + - Update and add Jetstream scripts + ([#735](https://github.com/cyverse/atmosphere/pull/735)) + ## [v36-6](https://github.com/cyverse/atmosphere/compare/v36-5...v36-6) - 2019-10-22 ### Changed diff --git a/scripts/application_to_provider.py b/scripts/application_to_provider.py index 00dbefaa1..fd4f29bb5 100755 --- a/scripts/application_to_provider.py +++ b/scripts/application_to_provider.py @@ -17,7 +17,6 @@ import core.models import service.driver from chromogenic.clean import mount_and_clean -from rtwo.drivers.common import _connect_to_glance from atmosphere.settings import secrets description = """ @@ -214,23 +213,17 @@ def main( dprov_acct_driver = service.driver.get_account_driver( dprov, raise_exception=True ) - if dst_glance_client_version: - dprov_keystone_client = dprov_acct_driver.image_manager.keystone - dprov_glance_client = _connect_to_glance( - dprov_keystone_client, version=dst_glance_client_version - ) - else: - dprov_glance_client = dprov_acct_driver.image_manager.glance + + dprov_glance_client = dprov_acct_driver.image_manager.glance dprov_atmo_admin_uname = dprov.admin.project_name() dprov_atmo_admin_uuid = dprov_acct_driver.get_project( dprov_atmo_admin_uname ).id - # Get application-specific metadata from Atmosphere(2) and resolve identifiers on destination provider - # Get application owner UUID in destination provider app_creator_uname = app.created_by_identity.project_name() + try: dprov_app_owner_uuid = dprov_acct_driver.get_project( app_creator_uname, raise_exception=True @@ -325,15 +318,8 @@ def main( sprov_acct_driver = service.driver.get_account_driver( sprov, raise_exception=True ) - if src_glance_client_version == 1: - sprov_keystone_client = service.driver.get_account_driver( - sprov, raise_exception=True - ) - sprov_glance_client = _connect_to_glance( - sprov_keystone_client, version=src_glance_client_version - ) - else: - sprov_glance_client = sprov_acct_driver.image_manager.glance + + sprov_glance_client = sprov_acct_driver.image_manager.glance # Get source image metadata from Glance, and determine if image is AMI-based sprov_glance_image = sprov_glance_client.images.get(sprov_img_uuid) @@ -425,19 +411,13 @@ def main( if ami else sprov_glance_image.container_format, disk_format="ami" if ami else sprov_glance_image.disk_format ) - if dst_glance_client_version == 1: - metadata['is_public'] = False if app.private else True - elif dst_glance_client_version >= 2.5: - metadata['visibility'] = "shared" if app.private else "public" - else: - metadata['visibility'] = "private" if app.private else "public" - # Glance v1 client throws exception on line breaks - if dst_glance_client_version == 1: - app_description = app.description.replace('\r', - '').replace('\n', ' -- ') + if app.private: + metadata['visibility'] = "shared" else: - app_description = app.description + metadata['visibility'] = "public" + + app_description = app.description atmo_metadata = dict( tags=app_tags, application_name=app.name, @@ -469,13 +449,8 @@ def main( del custom_metadata[key] atmo_metadata.update(custom_metadata) - # Set image metadata (this is always done) - if dst_glance_client_version == 1: - metadata['properties'] = atmo_metadata - dprov_glance_client.images.update(dprov_glance_image.id, **metadata) - else: - metadata.update(atmo_metadata) - dprov_glance_client.images.update(dprov_glance_image.id, **metadata) + metadata.update(atmo_metadata) + dprov_glance_client.images.update(dprov_glance_image.id, **metadata) logging.info( "Populated Glance image metadata: {0}".format( @@ -494,6 +469,12 @@ def main( dprov_glance_client.image_members.create( dprov_glance_image.id, add_member_uuid ) + + # Added to change member status to accepted + dprov_glance_client.image_members.update( + dprov_glance_image.id, add_member_uuid, 'accepted' + ) + else: dprov_img_prior_members.remove(add_member_uuid) for del_member_uuid in dprov_img_prior_members: @@ -758,11 +739,7 @@ def migrate_image_data_glance( logging.debug("Attempting to upload image data to destination provider") with open(local_path, 'rb') as img_file: try: - # "Upload" method is different for Glance client v1, than for v2 - if dst_glance_client_version == 1: - dst_glance_client.images.update(img_uuid, data=img_file) - else: - dst_glance_client.images.upload(img_uuid, img_file) + dst_glance_client.images.upload(img_uuid, img_file) if local_img_checksum == dst_glance_client.images.get( img_uuid ).checksum: @@ -818,13 +795,8 @@ def migrate_image_data_irods( irods_conn.get('username'), irods_conn.get('password'), irods_conn.get('host'), irods_conn.get('port'), dst_data_obj_path ) - # Assumption that iRODS copy will always be correct+complete, not inspecting checksums afterward? - if dst_glance_client_version == 1: - dst_glance_client.images.update(img_uuid, location=dst_img_location) - else: - dst_glance_client.images.add_location( - img_uuid, dst_img_location, dict() - ) + + dst_glance_client.images.add_location(img_uuid, dst_img_location, dict()) logging.info("Set image location in Glance") return True diff --git a/scripts/end_date_volumes_invariant_13a.py b/scripts/end_date_volumes_invariant_13a.py new file mode 100755 index 000000000..d4f2c2dad --- /dev/null +++ b/scripts/end_date_volumes_invariant_13a.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python +import argparse +import django +django.setup() + +from core.models import Volume +from django.db import connection +from django.db.models.functions import Now + + +def main(): + ''' + This script will end date volumes that come up for Invariant #13a on + https://tasmo.atmo.cloud and will be run via cron every ___________. + ''' + # Dry run option + parser = argparse.ArgumentParser() + parser.add_argument( + "--dry-run", + action="store_true", + help="Do not actually end-date any volumes" + ) + args = parser.parse_args() + if args.dry_run: + print 'DRY RUN -- No Volumes will be end-dated' + + volumes_from_invariant_13a = [] + + # This query comes from here: https://tasmo.atmo.cloud/queries/64/source#87 + query = '''WITH volumes_users_allocations AS + ( SELECT volume.id AS volume_id, volume.name AS volume_name, volume.description AS volume_description, + proj.name AS atmo_project_name, proj.description AS atmo_project_description, au.id AS user_id, + au.username, au.is_staff, au.is_superuser, + CASE + WHEN ins_src.provider_id = 4 THEN 'IU' + WHEN ins_src.provider_id = 5 THEN 'TACC' + ELSE 'UNKNOWN' + END AS src_provider, ins_src.identifier AS openstack_identifier, ins_src.start_date, ins_src.end_date, + string_agg(current_als.name, ',') AS current_allocation_sources + FROM volume + LEFT OUTER JOIN instance_source ins_src ON volume.instance_source_id = ins_src.id + LEFT OUTER JOIN project proj ON volume.project_id = proj.id + LEFT OUTER JOIN atmosphere_user au ON ins_src.created_by_id = au.id + LEFT OUTER JOIN user_allocation_source current_uals ON au.id = current_uals.user_id + LEFT OUTER JOIN allocation_source current_als ON current_uals.allocation_source_id = current_als.id + GROUP BY volume.id, proj.id, au.id, ins_src.id), user_allocation_source_deleted_events AS + ( SELECT DISTINCT event_table.name AS event_name, event_table.entity_id AS username, + event_table.payload :: json ->> 'allocation_source_name' AS allocation_source_name, + max(TIMESTAMP) AS last_event, min(TIMESTAMP) AS first_event + FROM event_table + WHERE event_table.name = 'user_allocation_source_deleted' + GROUP BY event_table.name, event_table.entity_id, event_table.payload :: json ->> 'allocation_source_name' ), + user_allocation_source_deleted_events_grouped AS + ( SELECT DISTINCT event_name, username, string_agg(DISTINCT allocation_source_name, ',') AS historic_allocation_sources, + max(last_event) AS last_event, min(first_event) AS first_event + FROM user_allocation_source_deleted_events + GROUP BY event_name, username ), users_with_no_allocation_sources AS + ( SELECT au.id AS user_id, au.username, au.is_staff, au.is_superuser + FROM atmosphere_user au + LEFT OUTER JOIN user_allocation_source uas ON au.id = uas.user_id + WHERE uas.id IS NULL ), + users_with_no_allocation_source_over_six_months AS + ( SELECT uwnas.user_id, uwnas.username, uwnas.is_staff, uwnas.is_superuser, uasdeg.last_event, uasdeg.historic_allocation_sources + FROM users_with_no_allocation_sources uwnas + LEFT OUTER JOIN user_allocation_source_deleted_events_grouped uasdeg ON uasdeg.username = uwnas.username + WHERE uasdeg.last_event IS NULL OR uasdeg.last_event < NOW() - INTERVAL '6 months' ), + active_volumes_for_users_with_no_allocation_source_over_six_months AS + ( SELECT * FROM volumes_users_allocations vua + LEFT JOIN users_with_no_allocation_source_over_six_months uwnasosm ON vua.user_id = uwnasosm.user_id + WHERE uwnasosm.user_id IS NOT NULL AND vua.end_date IS NULL AND vua.username <> 'atmoadmin' ), + instancesources_appversions_apps AS + ( SELECT DISTINCT isrc.identifier AS openstack_image_identifier, isrc.start_date AS isrc_start_date, + isrc.end_date AS isrc_end_date, + CASE + WHEN isrc.provider_id = 4 THEN 'IU' + WHEN isrc.provider_id = 5 THEN 'TACC' + ELSE 'UNKNOWN' + END AS isrc_provider, appv.created_by_id AS appv_created_by_id, appv.start_date AS appv_start_date, + appv.end_date AS appv_end_date, appv.name AS appv_name, app.created_by_id AS app_created_by_id, + app.name AS app_name, app.description AS app_description, app.start_date AS app_start_date, app.end_date AS app_end_date + FROM application_version appv + LEFT OUTER JOIN provider_machine pm ON appv.id = pm.application_version_id + LEFT OUTER JOIN application app ON app.id = appv.application_id + LEFT OUTER JOIN instance_source isrc ON pm.instance_source_id = isrc.id ), + instancesources_appversions_apps_instances AS + ( SELECT DISTINCT isrc.identifier AS openstack_image_identifier, isrc.start_date AS isrc_start_date, + isrc.end_date AS isrc_end_date, appv.created_by_id AS appv_created_by_id, appv.start_date AS appv_start_date, + appv.end_date AS appv_end_date, app.created_by_id AS app_created_by_id, app.start_date AS app_start_date, + app.end_date AS app_end_date, ins.id AS instance_id, ins.created_by_id AS instance_created_by_id, + ins.start_date AS instance_start_date, ins.end_date AS instance_end_date + FROM application_version appv + LEFT OUTER JOIN provider_machine pm ON appv.id = pm.application_version_id + LEFT OUTER JOIN application app ON app.id = appv.application_id + LEFT OUTER JOIN instance_source isrc ON pm.instance_source_id = isrc.id + LEFT OUTER JOIN instance ins ON isrc.id = ins.source_id ), + images_users_allocations_agg AS + ( SELECT DISTINCT isrc.identifier AS openstack_identifier, jsonb_agg(DISTINCT isrc.*) AS instance_sources, + jsonb_agg(DISTINCT pm.*) AS provider_machine, jsonb_agg(DISTINCT app.*) AS applications, + jsonb_agg(DISTINCT appv.*) AS application_versions, jsonb_agg(DISTINCT ins.*) AS instances + FROM application_version appv + LEFT OUTER JOIN provider_machine pm ON appv.id = pm.application_version_id + LEFT OUTER JOIN application app ON app.id = appv.application_id + LEFT OUTER JOIN instance_source isrc ON pm.instance_source_id = isrc.id + LEFT OUTER JOIN instance ins ON isrc.id = ins.source_id + GROUP BY isrc.identifier ), active_instancesources_and_appversions_for_users_with_no_allocation_source_over_six_months AS + ( SELECT iaa.*, uwnasosm.username AS created_by_user_username, uwnasosm.is_staff AS created_by_user_is_staff, + uwnasosm.is_superuser AS created_by_user_is_superuser, uwnasosm.last_event AS created_by_user_last_allocation_end_date, + uwnasosm.historic_allocation_sources AS created_by_user_historic_allocation_sources + FROM instancesources_appversions_apps iaa + LEFT JOIN users_with_no_allocation_source_over_six_months uwnasosm ON iaa.appv_created_by_id = uwnasosm.user_id + WHERE uwnasosm.user_id IS NOT NULL AND (isrc_end_date IS NULL OR appv_end_date IS NULL OR app_end_date IS NULL) + AND uwnasosm.username NOT IN ('admin', 'atmoadmin')), aiaafuwnasosm_with_current_allocation_sources AS + ( SELECT aiaafuwnasosm.openstack_image_identifier, aiaafuwnasosm.isrc_provider, aiaafuwnasosm.isrc_end_date, + aiaafuwnasosm.isrc_start_date, aiaafuwnasosm.appv_name, aiaafuwnasosm.appv_start_date, aiaafuwnasosm.appv_end_date, + aiaafuwnasosm.appv_created_by_id, aiaafuwnasosm.app_end_date, aiaafuwnasosm.app_start_date, aiaafuwnasosm.app_description, + aiaafuwnasosm.app_name, aiaafuwnasosm.app_created_by_id, aiaafuwnasosm.created_by_user_username, + aiaafuwnasosm.created_by_user_is_staff, aiaafuwnasosm.created_by_user_is_superuser, + aiaafuwnasosm.created_by_user_last_allocation_end_date, aiaafuwnasosm.created_by_user_historic_allocation_sources, + string_agg(DISTINCT current_als.name, ',') AS current_allocation_sources + FROM active_instancesources_and_appversions_for_users_with_no_allocation_source_over_six_months aiaafuwnasosm + LEFT OUTER JOIN user_allocation_source current_uals ON aiaafuwnasosm.appv_created_by_id = current_uals.user_id + LEFT OUTER JOIN allocation_source current_als ON current_uals.allocation_source_id = current_als.id + GROUP BY aiaafuwnasosm.openstack_image_identifier, aiaafuwnasosm.isrc_provider, aiaafuwnasosm.isrc_end_date, + aiaafuwnasosm.isrc_start_date, aiaafuwnasosm.appv_name, aiaafuwnasosm.appv_start_date, aiaafuwnasosm.appv_end_date, + aiaafuwnasosm.appv_created_by_id, aiaafuwnasosm.app_end_date, aiaafuwnasosm.app_start_date, + aiaafuwnasosm.app_description, aiaafuwnasosm.app_name, aiaafuwnasosm.app_created_by_id, + aiaafuwnasosm.created_by_user_username, aiaafuwnasosm.created_by_user_is_staff, aiaafuwnasosm.created_by_user_is_superuser, + aiaafuwnasosm.created_by_user_last_allocation_end_date, aiaafuwnasosm.created_by_user_historic_allocation_sources + ORDER BY aiaafuwnasosm.created_by_user_last_allocation_end_date ASC ) + SELECT * FROM active_volumes_for_users_with_no_allocation_source_over_six_months avfuwnasosm ORDER BY last_event ASC;''' + + # Use the query above to get volumes listed for Invariant #13a + with connection.cursor() as cursor: + cursor.execute(query) + + # Get the results as a dictionary + rows = dictfetchall(cursor) + + # If there are any results from the query + if rows: + volumes = Volume.objects.all() + + # Get the Volume object and put it into our list + for row in rows: + volume = volumes.get(pk=row['volume_id']) + volumes_from_invariant_13a.append(volume) + + print 'Here are volumes from invariant 13a:' + ctr = 1 + for vol in volumes_from_invariant_13a: + print ctr + ctr = ctr + 1 + print vol.name.encode('utf-8') + print vol + if not args.dry_run: + vol.end_date = Now() + vol.save() + print 'End-dated %s' % vol + print '----' + + +# Helper function to get query results as a dictionary +def dictfetchall(cursor): + columns = [col[0] for col in cursor.description] + return [dict(zip(columns, row)) for row in cursor.fetchall()] + + +if __name__ == "__main__": + main() diff --git a/scripts/fix-deb.sh b/scripts/fix-deb.sh new file mode 100755 index 000000000..14a9f42d8 --- /dev/null +++ b/scripts/fix-deb.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +deb_url='https://github.com/cyverse/atmosphere-ansible/raw/master/ansible/roles/atmo-vnc/files/VNC-Server-5.2.3-Linux-x64.deb' +deb_path='/opt/dev/atmosphere-ansible/ansible/roles/atmo-vnc/files/VNC-Server-5.2.3-Linux-x64.deb' + +if [ ! -f "$deb_path" ]; then + echo "$(date): '$deb_path' does not exist" + wget --quiet $deb_url -O $deb_path + + # Now check again + if [ ! -f "$deb_path" ]; then + echo "$(date): '$deb_path' still does not exist. Try again..." + else + echo "$(date): '$deb_path' now exists. Fixed!" + fi +fi diff --git a/scripts/jetstream/cron_end_date_volumes_invariant_13a.sh b/scripts/jetstream/cron_end_date_volumes_invariant_13a.sh new file mode 100755 index 000000000..fefb039b9 --- /dev/null +++ b/scripts/jetstream/cron_end_date_volumes_invariant_13a.sh @@ -0,0 +1,10 @@ +#!/bin/bash +HOME="/opt/dev/atmosphere" +VIRTUAL="/opt/env/atmo" +export DJANGO_SETTINGS_MODULE="atmosphere.settings" +export PYTHONPATH="$HOME:$PYTHONPATH" +cd $HOME +. $VIRTUAL/bin/activate +echo $PATH +echo $PYTHONPATH +$HOME/scripts/end_date_volumes_invariant_13a.py diff --git a/scripts/jetstream/cron_shelve_instances_invariant_12.sh b/scripts/jetstream/cron_shelve_instances_invariant_12.sh new file mode 100755 index 000000000..9bf830d01 --- /dev/null +++ b/scripts/jetstream/cron_shelve_instances_invariant_12.sh @@ -0,0 +1,10 @@ +#!/bin/bash +HOME="/opt/dev/atmosphere" +VIRTUAL="/opt/env/atmo" +export DJANGO_SETTINGS_MODULE="atmosphere.settings" +export PYTHONPATH="$HOME:$PYTHONPATH" +cd $HOME +. $VIRTUAL/bin/activate +echo $PATH +echo $PYTHONPATH +$HOME/scripts/shelve_instances_invariant_12.py diff --git a/scripts/shelve_instances_invariant_12.py b/scripts/shelve_instances_invariant_12.py new file mode 100755 index 000000000..c6e7100da --- /dev/null +++ b/scripts/shelve_instances_invariant_12.py @@ -0,0 +1,141 @@ +#!/usr/bin/env python +import django +django.setup() + +from django.conf import settings +from core.models import Provider, Instance, Identity +from service.instance import shelve_instance +from service.cache import get_cached_driver + + +def main(): + ''' + This script will set active instances that come up for Invariant #12 on + https://tasmo.atmo.cloud to 'shelved' status and will be run via cron + every Tuesday morning. + ''' + query = '''SELECT + instance.id, + instance.name, + instance.provider_alias, + CASE + WHEN instance_source.provider_id = 4 + THEN 'IU' + WHEN instance_source.provider_id = 5 + THEN 'TACC' + ELSE 'UNKNOWN' END AS instance_provider_label, + instance.start_date, + instance.end_date, + last_status.name AS last_status, + last_status.activity AS last_status_activity, + last_status.start_date AS last_status_start_date, + last_status.end_date AS last_status_end_date, + i_als.name AS instance_allocation_source, + au.username, + au.is_staff, + au.is_superuser, + string_agg(current_als.name, ',') AS current_allocation_sources + FROM instance + LEFT OUTER JOIN instance_allocation_source_snapshot ialss ON instance.id = ialss.instance_id + LEFT OUTER JOIN allocation_source i_als ON ialss.allocation_source_id = i_als.id + LEFT OUTER JOIN atmosphere_user au ON instance.created_by_id = au.id + LEFT OUTER JOIN user_allocation_source uals + ON au.id = uals.user_id AND ialss.allocation_source_id = uals.allocation_source_id + LEFT OUTER JOIN user_allocation_source current_uals on au.id = current_uals.user_id + LEFT OUTER JOIN allocation_source current_als on current_uals.allocation_source_id = current_als.id + LEFT OUTER JOIN instance_source ON instance.source_id = instance_source.id + LEFT JOIN LATERAL + ( + SELECT + ish.start_date, + ish.end_date, + status.name, + ish.activity + FROM instance_status_history ish + LEFT JOIN instance_status status on ish.status_id = status.id + WHERE ish.instance_id = instance.id + ORDER BY ish.id DESC + LIMIT 1 + ) last_status ON TRUE + WHERE + instance.end_date IS NULL + AND last_status.name NOT IN ('shelved_offloaded', 'shelved') + AND (uals.allocation_source_id IS NULL + OR ialss.allocation_source_id IS NULL) + GROUP BY + instance.id, + i_als.id, + instance_source.provider_id, + au.id, + last_status.start_date, + last_status.end_date, + last_status.name, + last_status.activity''' + + active_instances_to_shelve = [] + + query_instances = Instance.objects.raw(raw_query=query) + + # Getting whitelisted allocation sources + whitelist = getattr(settings, "ALLOCATION_OVERRIDES_NEVER_ENFORCE") + + # Only want ones that are not by 'atmoadmin' + for instance in query_instances: + if instance.created_by.username != 'atmoadmin' and instance.allocation_source.name not in whitelist: + active_instances_to_shelve.append(instance) + + # Here they are, set them to shelved + for inst in active_instances_to_shelve: + reclaim_ip = True + + provider_id = inst.source.provider_id + provider = Provider.objects.get(pk=provider_id) + + if not provider: + print 'Provider not found, skipping' # output to log in service + continue + + identity = Identity.objects.get( + created_by__username=inst.username, provider=provider + ) + + try: + driver = get_cached_driver(identity=identity) + esh_instance = driver.get_instance(inst.provider_alias) + '''if driver: + print 'got driver' + print inst.name + print inst.provider_alias + print inst.allocation_source.name + print identity.provider.id + print identity.id + print identity.created_by + print inst.last_status + print '***' + else: + print 'no driver' + ''' + + if inst.last_status == 'active' or inst.last_status == 'shutoff' or \ + inst.last_status == 'deploy_error' or inst.last_status == 'deploying' or \ + inst.last_status == 'suspended': + shelve_instance( + driver, esh_instance, identity.provider.uuid, identity.uuid, + identity.created_by, reclaim_ip + ) + print "Shelved instance %s (%s) on allocation %s for user %s" % ( + inst.id, inst.name, inst.allocation_source.name, + inst.created_by.username + ) + if inst.last_status == 'error': + raise Exception('Did not shelve instance due to ERROR status') + except Exception as e: + print "Could not shelve Instance %s (%s) on allocation %s for user %s - Exception: %s" % ( + inst.id, inst.name, inst.allocation_source.name, + inst.created_by.username, e + ) + continue + + +if __name__ == "__main__": + main()