2026-03-18 16:01:54 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
|
|
|
|
|
|
import argparse
|
|
|
|
|
from collections import namedtuple
|
|
|
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
2026-04-15 16:02:23 +01:00
|
|
|
import datetime
|
2026-04-17 13:54:49 +01:00
|
|
|
from itertools import cycle
|
2026-04-15 16:02:23 +01:00
|
|
|
import logging
|
2026-03-18 16:01:54 +00:00
|
|
|
from pathlib import Path
|
|
|
|
|
import subprocess
|
2026-04-08 13:55:47 +01:00
|
|
|
import tempfile
|
2026-03-18 16:01:54 +00:00
|
|
|
import time
|
|
|
|
|
from uuid import uuid4
|
|
|
|
|
|
|
|
|
|
import alibabacloud_credentials as credentials
|
|
|
|
|
import alibabacloud_credentials.client
|
|
|
|
|
import alibabacloud_credentials.models
|
|
|
|
|
import alibabacloud_ecs20140526 as ecs
|
|
|
|
|
import alibabacloud_ecs20140526.client
|
|
|
|
|
import alibabacloud_ecs20140526.models
|
|
|
|
|
import alibabacloud_oss_v2 as oss
|
|
|
|
|
import alibabacloud_tea_openapi as openapi
|
|
|
|
|
import alibabacloud_tea_openapi.client
|
|
|
|
|
import alibabacloud_tea_openapi.models
|
|
|
|
|
import alibabacloud_tea_util as util
|
|
|
|
|
import alibabacloud_tea_util.client
|
|
|
|
|
import alibabacloud_tea_util.models
|
|
|
|
|
|
2026-04-15 16:02:23 +01:00
|
|
|
# For regions in mainland China, the Chinese state censorship laws
|
|
|
|
|
# prohibit direct access to OSS bucket contents.
|
|
|
|
|
#
|
|
|
|
|
# We work around this restriction by creating a temporary ECS instance
|
|
|
|
|
# in each region to access OSS via the internal OSS endpoints, which
|
|
|
|
|
# are not subject to these restrictions. Yes, this is absurd.
|
|
|
|
|
|
|
|
|
|
logger = logging.getLogger('ali-import')
|
2026-03-18 16:01:54 +00:00
|
|
|
|
2026-04-15 16:02:23 +01:00
|
|
|
ECS_ENDPOINT = 'ecs.aliyuncs.com'
|
2026-03-18 16:01:54 +00:00
|
|
|
|
|
|
|
|
OSS_BUCKET_NAME_LEN = 63
|
|
|
|
|
|
|
|
|
|
IPXE_STORAGE_PREFIX = 'ipxe-upload-temp-'
|
2026-04-15 16:02:23 +01:00
|
|
|
IPXE_STORAGE_TAG = 'ipxe-upload-temp'
|
2026-03-18 16:01:54 +00:00
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
Clients = namedtuple('Clients', ['region', 'ecs', 'oss'])
|
|
|
|
|
Image = namedtuple('Image',
|
|
|
|
|
['path', 'family', 'name', 'arch', 'mode', 'public'])
|
2026-04-15 16:02:23 +01:00
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
def image(filename, basefamily, basename, public):
|
2026-03-18 16:01:54 +00:00
|
|
|
"""Construct image description"""
|
2026-04-08 13:55:47 +01:00
|
|
|
with tempfile.NamedTemporaryFile(mode='w+t') as mtoolsrc:
|
|
|
|
|
mtoolsrc.writelines([
|
|
|
|
|
'drive D:', f'file="{filename}"',
|
|
|
|
|
'drive P:', f'file="{filename}"', 'partition=4',
|
|
|
|
|
])
|
|
|
|
|
mtoolsrc.flush()
|
|
|
|
|
mdir = subprocess.run(['mdir', '-b', 'D:/EFI/BOOT', 'P:/EFI/BOOT'],
|
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
|
|
|
|
check=False, env={'MTOOLSRC': mtoolsrc.name})
|
2026-03-18 16:01:54 +00:00
|
|
|
mapping = {
|
|
|
|
|
b'BOOTX64.EFI': 'x86_64',
|
|
|
|
|
b'BOOTAA64.EFI': 'arm64',
|
|
|
|
|
}
|
|
|
|
|
uefi = [v for k, v in mapping.items() if k in mdir.stdout]
|
|
|
|
|
suffix = ('-uefi-%s' % uefi[0].replace('_', '-') if len(uefi) == 1 else
|
|
|
|
|
'-uefi-multi' if uefi else '')
|
|
|
|
|
path = Path(filename)
|
|
|
|
|
family = '%s%s' % (basefamily, suffix)
|
|
|
|
|
name = '%s%s' % (basename, suffix)
|
|
|
|
|
arch = uefi[0] if len(uefi) == 1 else None if uefi else 'x86_64'
|
|
|
|
|
mode = 'UEFI' if uefi else 'BIOS'
|
2026-04-17 13:54:49 +01:00
|
|
|
return Image(path, family, name, arch, mode, public)
|
2026-03-18 16:01:54 +00:00
|
|
|
|
|
|
|
|
def all_regions():
|
|
|
|
|
"""Get list of all regions"""
|
|
|
|
|
cred = credentials.client.Client()
|
|
|
|
|
conf = openapi.models.Config(credential=cred, endpoint=ECS_ENDPOINT)
|
|
|
|
|
client = ecs.client.Client(conf)
|
|
|
|
|
req = ecs.models.DescribeRegionsRequest()
|
|
|
|
|
rsp = client.describe_regions(req)
|
|
|
|
|
regions = sorted(x.region_id for x in rsp.body.regions.region)
|
|
|
|
|
return regions
|
|
|
|
|
|
2026-04-15 16:02:23 +01:00
|
|
|
def all_clients(region):
|
2026-03-18 16:01:54 +00:00
|
|
|
"""Construct all per-region clients"""
|
|
|
|
|
cred = credentials.client.Client()
|
|
|
|
|
ecsconf = openapi.models.Config(credential=cred, region_id=region)
|
|
|
|
|
osscred = oss.credentials.EnvironmentVariableCredentialsProvider()
|
|
|
|
|
ossconf = oss.config.Config(credentials_provider=osscred, region=region)
|
|
|
|
|
clients = Clients(
|
|
|
|
|
region=region,
|
|
|
|
|
ecs=ecs.client.Client(ecsconf),
|
|
|
|
|
oss=oss.client.Client(ossconf),
|
|
|
|
|
)
|
|
|
|
|
return clients
|
|
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
def delete_temp_bucket(clients, bucket):
|
2026-03-18 16:01:54 +00:00
|
|
|
"""Remove temporary bucket"""
|
2026-04-15 16:02:23 +01:00
|
|
|
logger.info("%s: deleting %s" % (clients.region, bucket))
|
2026-03-18 16:01:54 +00:00
|
|
|
assert bucket.startswith(IPXE_STORAGE_PREFIX)
|
2026-04-17 13:54:49 +01:00
|
|
|
req = oss.models.ListObjectsV2Request(
|
|
|
|
|
bucket=bucket,
|
|
|
|
|
prefix=IPXE_STORAGE_PREFIX,
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.oss.list_objects_v2(req)
|
|
|
|
|
delete = [x.key for x in rsp.contents or ()]
|
|
|
|
|
if delete:
|
|
|
|
|
req = oss.models.DeleteMultipleObjectsRequest(
|
|
|
|
|
bucket=bucket,
|
|
|
|
|
objects=[oss.models.DeleteObject(x) for x in delete],
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.oss.delete_multiple_objects(req)
|
|
|
|
|
req = oss.models.DeleteBucketRequest(bucket=bucket)
|
|
|
|
|
rsp = clients.oss.delete_bucket(req)
|
2026-03-18 16:01:54 +00:00
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
def create_temp_bucket(clients):
|
2026-03-18 16:01:54 +00:00
|
|
|
"""Create temporary bucket (and remove any stale temporary buckets)"""
|
2026-04-17 13:54:49 +01:00
|
|
|
if clients.region.startswith('cn-'):
|
|
|
|
|
# Object storage is non-functional in Chinese mainland regions
|
|
|
|
|
# due to censorship restrictions
|
|
|
|
|
return None
|
2026-03-18 16:01:54 +00:00
|
|
|
prefix = '%s%s-' % (IPXE_STORAGE_PREFIX, clients.region)
|
|
|
|
|
req = oss.models.ListBucketsRequest(prefix=prefix)
|
|
|
|
|
rsp = clients.oss.list_buckets(req)
|
|
|
|
|
buckets = [x.name for x in rsp.buckets or ()]
|
|
|
|
|
for bucket in buckets:
|
2026-04-17 13:54:49 +01:00
|
|
|
delete_temp_bucket(clients, bucket)
|
2026-03-18 16:01:54 +00:00
|
|
|
bucket = ('%s%s' % (prefix, uuid4()))[:OSS_BUCKET_NAME_LEN]
|
|
|
|
|
req = oss.models.PutBucketRequest(bucket=bucket)
|
2026-04-17 13:54:49 +01:00
|
|
|
rsp = clients.oss.put_bucket(req)
|
2026-04-15 16:02:23 +01:00
|
|
|
logger.info("%s: created %s" % (clients.region, bucket))
|
2026-03-18 16:01:54 +00:00
|
|
|
return bucket
|
|
|
|
|
|
|
|
|
|
def upload_image(clients, bucket, image):
|
|
|
|
|
"""Upload disk image to uncensored bucket"""
|
2026-04-15 16:02:23 +01:00
|
|
|
logger.info("%s: uploading %s" % (clients.region, image.name))
|
2026-03-18 16:01:54 +00:00
|
|
|
key = '%s%s' % (IPXE_STORAGE_PREFIX, uuid4())
|
|
|
|
|
req = oss.models.PutObjectRequest(bucket=bucket, key=key)
|
|
|
|
|
rsp = clients.oss.put_object_from_file(req, image.path)
|
|
|
|
|
return key
|
|
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
def delete_image(clients, name):
|
|
|
|
|
"""Remove existing image (if applicable)"""
|
2026-03-18 16:01:54 +00:00
|
|
|
req = ecs.models.DescribeImagesRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
image_name=name,
|
|
|
|
|
image_owner_alias='self',
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.describe_images(req)
|
|
|
|
|
for image in rsp.body.images.image or ():
|
2026-04-15 16:02:23 +01:00
|
|
|
logger.info("%s: deleting %s (%s)" %
|
|
|
|
|
(clients.region, image.image_name, image.image_id))
|
|
|
|
|
if image.is_public:
|
|
|
|
|
req = ecs.models.ModifyImageSharePermissionRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
image_id=image.image_id,
|
|
|
|
|
is_public=False,
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.modify_image_share_permission(req)
|
2026-03-18 16:01:54 +00:00
|
|
|
req = ecs.models.DeleteImageRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
image_id=image.image_id
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.delete_image(req)
|
|
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
def wait_for_task(clients, task_id):
|
|
|
|
|
"""Wait for task to complete"""
|
|
|
|
|
while True:
|
|
|
|
|
time.sleep(5)
|
|
|
|
|
req = ecs.models.DescribeTasksRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
task_ids=task_id,
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.describe_tasks(req)
|
|
|
|
|
assert len(rsp.body.task_set.task) == 1
|
|
|
|
|
assert rsp.body.task_set.task[0].task_id == task_id
|
|
|
|
|
status = rsp.body.task_set.task[0].task_status
|
|
|
|
|
if status not in ('Waiting', 'Processing'):
|
|
|
|
|
break
|
|
|
|
|
if status != 'Finished':
|
|
|
|
|
raise RuntimeError(status)
|
|
|
|
|
|
|
|
|
|
def wait_for_image(clients, image_id):
|
|
|
|
|
"""Wait for image to become available"""
|
|
|
|
|
while True:
|
|
|
|
|
time.sleep(5)
|
|
|
|
|
req = ecs.models.DescribeImagesRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
image_id=image_id,
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.describe_images(req)
|
|
|
|
|
if len(rsp.body.images.image):
|
|
|
|
|
assert len(rsp.body.images.image) == 1
|
|
|
|
|
assert rsp.body.images.image[0].image_id == image_id
|
|
|
|
|
status = rsp.body.images.image[0].status
|
|
|
|
|
if status != 'Creating':
|
|
|
|
|
break
|
|
|
|
|
if status != 'Available':
|
|
|
|
|
raise RuntimeError(status)
|
|
|
|
|
|
|
|
|
|
def import_image(clients, image, bucket, key):
|
2026-03-18 16:01:54 +00:00
|
|
|
"""Import image"""
|
2026-04-15 16:02:23 +01:00
|
|
|
logger.info("%s: importing %s" % (clients.region, image.name))
|
2026-03-18 16:01:54 +00:00
|
|
|
disk = ecs.models.ImportImageRequestDiskDeviceMapping(
|
|
|
|
|
disk_image_size = 1,
|
|
|
|
|
format = 'RAW',
|
|
|
|
|
ossbucket = bucket,
|
|
|
|
|
ossobject = key,
|
|
|
|
|
)
|
|
|
|
|
req = ecs.models.ImportImageRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
image_name=image.name,
|
|
|
|
|
architecture=image.arch,
|
|
|
|
|
boot_mode=image.mode,
|
|
|
|
|
disk_device_mapping=[disk],
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.import_image(req)
|
|
|
|
|
image_id = rsp.body.image_id
|
|
|
|
|
task_id = rsp.body.task_id
|
2026-04-17 13:54:49 +01:00
|
|
|
wait_for_task(clients, task_id)
|
|
|
|
|
wait_for_image(clients, image_id)
|
|
|
|
|
logger.info("%s: imported %s (%s)" %
|
|
|
|
|
(clients.region, image.name, image_id))
|
|
|
|
|
return image_id
|
|
|
|
|
|
|
|
|
|
def copy_image(clients, image, image_id, censored):
|
|
|
|
|
"""Copy imported image to censored region"""
|
|
|
|
|
logger.info("%s: copying %s (%s) to %s" %
|
|
|
|
|
(clients.region, image.name, image_id, censored.region))
|
|
|
|
|
req = ecs.models.CopyImageRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
image_id=image_id,
|
|
|
|
|
destination_region_id=censored.region,
|
|
|
|
|
destination_image_name=image.name,
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.copy_image(req)
|
|
|
|
|
copy_id = rsp.body.image_id
|
|
|
|
|
wait_for_image(censored, copy_id)
|
|
|
|
|
logger.info("%s: copied %s (%s) to %s" %
|
|
|
|
|
(clients.region, image.name, copy_id, censored.region))
|
|
|
|
|
return copy_id
|
|
|
|
|
|
|
|
|
|
def finalise_image(clients, image, image_id):
|
|
|
|
|
"""Finalise image attributes and permissions"""
|
|
|
|
|
logger.info("%s: finalising %s (%s)" %
|
|
|
|
|
(clients.region, image.name, image_id))
|
2026-03-18 16:01:54 +00:00
|
|
|
req = ecs.models.ModifyImageAttributeRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
image_id=image_id,
|
|
|
|
|
image_family=image.family,
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.modify_image_attribute(req)
|
2026-04-17 13:54:49 +01:00
|
|
|
if image.public:
|
2026-03-18 16:01:54 +00:00
|
|
|
req = ecs.models.ModifyImageSharePermissionRequest(
|
|
|
|
|
region_id=clients.region,
|
|
|
|
|
image_id=image_id,
|
|
|
|
|
is_public=True,
|
|
|
|
|
)
|
|
|
|
|
rsp = clients.ecs.modify_image_share_permission(req)
|
|
|
|
|
|
|
|
|
|
# Parse command-line arguments
|
|
|
|
|
parser = argparse.ArgumentParser(description="Import Alibaba Cloud image")
|
2026-04-15 16:02:23 +01:00
|
|
|
parser.add_argument('--verbose', '-v', action='count', default=0)
|
2026-03-18 16:01:54 +00:00
|
|
|
parser.add_argument('--name', '-n',
|
|
|
|
|
help="Base image name")
|
|
|
|
|
parser.add_argument('--family', '-f', default='ipxe',
|
|
|
|
|
help="Base family name")
|
|
|
|
|
parser.add_argument('--public', '-p', action='store_true',
|
2026-04-17 13:54:49 +01:00
|
|
|
help="Make image(s) public")
|
2026-03-18 16:01:54 +00:00
|
|
|
parser.add_argument('--overwrite', action='store_true',
|
|
|
|
|
help="Overwrite any existing image with same name")
|
|
|
|
|
parser.add_argument('--region', '-r', action='append',
|
|
|
|
|
help="AliCloud region(s)")
|
|
|
|
|
parser.add_argument('image', nargs='+', help="iPXE disk image")
|
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
2026-04-15 16:02:23 +01:00
|
|
|
# Configure logging
|
|
|
|
|
loglevels = [logging.WARNING, logging.INFO, logging.DEBUG]
|
|
|
|
|
verbosity = min(args.verbose, (len(loglevels) - 1))
|
|
|
|
|
logging.basicConfig(level=loglevels[verbosity])
|
|
|
|
|
|
2026-03-18 16:01:54 +00:00
|
|
|
# Use default name if none specified
|
|
|
|
|
if not args.name:
|
2026-04-15 16:02:23 +01:00
|
|
|
args.name = '%s-%s' % (args.family,
|
|
|
|
|
datetime.date.today().strftime('%Y%m%d'))
|
2026-03-18 16:01:54 +00:00
|
|
|
|
|
|
|
|
# Use all regions if none specified
|
2026-04-17 13:54:49 +01:00
|
|
|
regions = args.region or all_regions()
|
2026-03-18 16:01:54 +00:00
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
# Construct image list
|
|
|
|
|
images = [image(x, args.family, args.name, args.public) for x in args.image]
|
|
|
|
|
imports = [(region, image) for region in regions for image in images]
|
2026-03-18 16:01:54 +00:00
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
# Construct per-region clients
|
|
|
|
|
clients = {region: all_clients(region) for region in regions}
|
|
|
|
|
|
|
|
|
|
# Delete existing images from all regions, if applicable
|
|
|
|
|
if args.overwrite:
|
|
|
|
|
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
|
|
|
|
|
futures = {executor.submit(delete_image,
|
|
|
|
|
clients=clients[region],
|
|
|
|
|
name=image.name): (region, image)
|
|
|
|
|
for region, image in imports}
|
|
|
|
|
done = {futures[x]: x.result() for x in as_completed(futures)}
|
|
|
|
|
|
|
|
|
|
# Create temporary buckets in all uncensored regions
|
|
|
|
|
with ThreadPoolExecutor(max_workers=len(regions)) as executor:
|
2026-03-18 16:01:54 +00:00
|
|
|
futures = {executor.submit(create_temp_bucket,
|
2026-04-17 13:54:49 +01:00
|
|
|
clients=clients[region]): region
|
|
|
|
|
for region in regions}
|
2026-03-18 16:01:54 +00:00
|
|
|
buckets = {futures[x]: x.result() for x in as_completed(futures)}
|
2026-04-17 13:54:49 +01:00
|
|
|
if not any(buckets.values()):
|
|
|
|
|
parser.error("At least one non-Chinese region is required")
|
2026-03-18 16:01:54 +00:00
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
# Upload images directly to uncensored regions
|
|
|
|
|
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
|
2026-03-18 16:01:54 +00:00
|
|
|
futures = {executor.submit(upload_image,
|
|
|
|
|
clients=clients[region],
|
|
|
|
|
bucket=buckets[region],
|
2026-04-17 13:54:49 +01:00
|
|
|
image=image): (region, image)
|
|
|
|
|
for region, image in imports if buckets[region]}
|
|
|
|
|
keys = {futures[x]: x.result() for x in as_completed(futures)}
|
2026-03-18 16:01:54 +00:00
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
# Import images to uncensored regions
|
2026-03-18 16:01:54 +00:00
|
|
|
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
|
|
|
|
|
futures = {executor.submit(import_image,
|
|
|
|
|
clients=clients[region],
|
|
|
|
|
image=image,
|
|
|
|
|
bucket=buckets[region],
|
2026-04-17 13:54:49 +01:00
|
|
|
key=keys[(region, image)]): (region, image)
|
|
|
|
|
for region, image in imports if buckets[region]}
|
2026-03-18 16:01:54 +00:00
|
|
|
results = {futures[x]: x.result() for x in as_completed(futures)}
|
|
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
# Select source uncensored region for each copy
|
|
|
|
|
#
|
|
|
|
|
# Copies are rate-limited by source region, so spread the copies
|
|
|
|
|
# across all available uncensored regions.
|
|
|
|
|
#
|
|
|
|
|
copies = [(region, censored, image) for region, (censored, image) in zip(
|
|
|
|
|
cycle(region for region in regions if buckets[region]),
|
|
|
|
|
((region, image) for region, image in imports if not buckets[region]),
|
|
|
|
|
)]
|
|
|
|
|
|
|
|
|
|
# Copy images to censored regions
|
|
|
|
|
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
|
|
|
|
|
futures = {executor.submit(copy_image,
|
2026-03-18 16:01:54 +00:00
|
|
|
clients=clients[region],
|
2026-04-17 13:54:49 +01:00
|
|
|
censored=clients[censored],
|
|
|
|
|
image=image,
|
|
|
|
|
image_id=results[(region, image)]):
|
|
|
|
|
(censored, image)
|
|
|
|
|
for region, censored, image in copies}
|
|
|
|
|
results.update({futures[x]: x.result() for x in as_completed(futures)})
|
|
|
|
|
|
|
|
|
|
# Finalise images
|
|
|
|
|
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
|
|
|
|
|
futures = {executor.submit(finalise_image,
|
|
|
|
|
clients=clients[region],
|
|
|
|
|
image=image,
|
|
|
|
|
image_id=results[(region, image)]):
|
|
|
|
|
(region, image)
|
|
|
|
|
for region, image in imports}
|
2026-03-18 16:01:54 +00:00
|
|
|
done = {futures[x]: x.result() for x in as_completed(futures)}
|
|
|
|
|
|
2026-04-17 13:54:49 +01:00
|
|
|
# Remove temporary buckets
|
|
|
|
|
with ThreadPoolExecutor(max_workers=len(regions)) as executor:
|
|
|
|
|
futures = {executor.submit(delete_temp_bucket,
|
2026-03-18 16:01:54 +00:00
|
|
|
clients=clients[region],
|
2026-04-17 13:54:49 +01:00
|
|
|
bucket=buckets[region]): region
|
|
|
|
|
for region in regions if buckets[region]}
|
2026-03-18 16:01:54 +00:00
|
|
|
done = {futures[x]: x.result() for x in as_completed(futures)}
|
|
|
|
|
|
|
|
|
|
# Show created images
|
|
|
|
|
for region, image in imports:
|
2026-04-17 13:54:49 +01:00
|
|
|
image_id = results[(region, image)]
|
|
|
|
|
print("%s %s (%s) %s" % (region, image.name, image.family, image_id))
|