Files
ipxe/contrib/cloud/ali-import
T

377 lines
14 KiB
Python
Raw Normal View History

#!/usr/bin/env python3
import argparse
from collections import namedtuple
from concurrent.futures import ThreadPoolExecutor, as_completed
import datetime
from itertools import cycle
import logging
from pathlib import Path
import subprocess
import tempfile
import time
from uuid import uuid4
import alibabacloud_credentials as credentials
import alibabacloud_credentials.client
import alibabacloud_credentials.models
import alibabacloud_ecs20140526 as ecs
import alibabacloud_ecs20140526.client
import alibabacloud_ecs20140526.models
import alibabacloud_oss_v2 as oss
import alibabacloud_tea_openapi as openapi
import alibabacloud_tea_openapi.client
import alibabacloud_tea_openapi.models
import alibabacloud_tea_util as util
import alibabacloud_tea_util.client
import alibabacloud_tea_util.models
# For regions in mainland China, the Chinese state censorship laws
# prohibit direct access to OSS bucket contents.
#
# We work around this restriction by creating a temporary ECS instance
# in each region to access OSS via the internal OSS endpoints, which
# are not subject to these restrictions. Yes, this is absurd.
logger = logging.getLogger('ali-import')
ECS_ENDPOINT = 'ecs.aliyuncs.com'
OSS_BUCKET_NAME_LEN = 63
IPXE_STORAGE_PREFIX = 'ipxe-upload-temp-'
IPXE_STORAGE_TAG = 'ipxe-upload-temp'
Clients = namedtuple('Clients', ['region', 'ecs', 'oss'])
Image = namedtuple('Image',
['path', 'family', 'name', 'arch', 'mode', 'public'])
def image(filename, basefamily, basename, public):
"""Construct image description"""
with tempfile.NamedTemporaryFile(mode='w+t') as mtoolsrc:
mtoolsrc.writelines([
'drive D:', f'file="{filename}"',
'drive P:', f'file="{filename}"', 'partition=4',
])
mtoolsrc.flush()
mdir = subprocess.run(['mdir', '-b', 'D:/EFI/BOOT', 'P:/EFI/BOOT'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
check=False, env={'MTOOLSRC': mtoolsrc.name})
mapping = {
b'BOOTX64.EFI': 'x86_64',
b'BOOTAA64.EFI': 'arm64',
}
uefi = [v for k, v in mapping.items() if k in mdir.stdout]
suffix = ('-uefi-%s' % uefi[0].replace('_', '-') if len(uefi) == 1 else
'-uefi-multi' if uefi else '')
path = Path(filename)
family = '%s%s' % (basefamily, suffix)
name = '%s%s' % (basename, suffix)
arch = uefi[0] if len(uefi) == 1 else None if uefi else 'x86_64'
mode = 'UEFI' if uefi else 'BIOS'
return Image(path, family, name, arch, mode, public)
def all_regions():
"""Get list of all regions"""
cred = credentials.client.Client()
conf = openapi.models.Config(credential=cred, endpoint=ECS_ENDPOINT)
client = ecs.client.Client(conf)
req = ecs.models.DescribeRegionsRequest()
rsp = client.describe_regions(req)
regions = sorted(x.region_id for x in rsp.body.regions.region)
return regions
def all_clients(region):
"""Construct all per-region clients"""
cred = credentials.client.Client()
ecsconf = openapi.models.Config(credential=cred, region_id=region)
osscred = oss.credentials.EnvironmentVariableCredentialsProvider()
ossconf = oss.config.Config(credentials_provider=osscred, region=region)
clients = Clients(
region=region,
ecs=ecs.client.Client(ecsconf),
oss=oss.client.Client(ossconf),
)
return clients
def delete_temp_bucket(clients, bucket):
"""Remove temporary bucket"""
logger.info("%s: deleting %s" % (clients.region, bucket))
assert bucket.startswith(IPXE_STORAGE_PREFIX)
req = oss.models.ListObjectsV2Request(
bucket=bucket,
prefix=IPXE_STORAGE_PREFIX,
)
rsp = clients.oss.list_objects_v2(req)
delete = [x.key for x in rsp.contents or ()]
if delete:
req = oss.models.DeleteMultipleObjectsRequest(
bucket=bucket,
objects=[oss.models.DeleteObject(x) for x in delete],
)
rsp = clients.oss.delete_multiple_objects(req)
req = oss.models.DeleteBucketRequest(bucket=bucket)
rsp = clients.oss.delete_bucket(req)
def create_temp_bucket(clients):
"""Create temporary bucket (and remove any stale temporary buckets)"""
if clients.region.startswith('cn-'):
# Object storage is non-functional in Chinese mainland regions
# due to censorship restrictions
return None
prefix = '%s%s-' % (IPXE_STORAGE_PREFIX, clients.region)
req = oss.models.ListBucketsRequest(prefix=prefix)
rsp = clients.oss.list_buckets(req)
buckets = [x.name for x in rsp.buckets or ()]
for bucket in buckets:
delete_temp_bucket(clients, bucket)
bucket = ('%s%s' % (prefix, uuid4()))[:OSS_BUCKET_NAME_LEN]
req = oss.models.PutBucketRequest(bucket=bucket)
rsp = clients.oss.put_bucket(req)
logger.info("%s: created %s" % (clients.region, bucket))
return bucket
def upload_image(clients, bucket, image):
"""Upload disk image to uncensored bucket"""
logger.info("%s: uploading %s" % (clients.region, image.name))
key = '%s%s' % (IPXE_STORAGE_PREFIX, uuid4())
req = oss.models.PutObjectRequest(bucket=bucket, key=key)
rsp = clients.oss.put_object_from_file(req, image.path)
return key
def delete_image(clients, name):
"""Remove existing image (if applicable)"""
req = ecs.models.DescribeImagesRequest(
region_id=clients.region,
image_name=name,
image_owner_alias='self',
)
rsp = clients.ecs.describe_images(req)
for image in rsp.body.images.image or ():
logger.info("%s: deleting %s (%s)" %
(clients.region, image.image_name, image.image_id))
if image.is_public:
req = ecs.models.ModifyImageSharePermissionRequest(
region_id=clients.region,
image_id=image.image_id,
is_public=False,
)
rsp = clients.ecs.modify_image_share_permission(req)
req = ecs.models.DeleteImageRequest(
region_id=clients.region,
image_id=image.image_id
)
rsp = clients.ecs.delete_image(req)
def wait_for_task(clients, task_id):
"""Wait for task to complete"""
while True:
time.sleep(5)
req = ecs.models.DescribeTasksRequest(
region_id=clients.region,
task_ids=task_id,
)
rsp = clients.ecs.describe_tasks(req)
assert len(rsp.body.task_set.task) == 1
assert rsp.body.task_set.task[0].task_id == task_id
status = rsp.body.task_set.task[0].task_status
if status not in ('Waiting', 'Processing'):
break
if status != 'Finished':
raise RuntimeError(status)
def wait_for_image(clients, image_id):
"""Wait for image to become available"""
while True:
time.sleep(5)
req = ecs.models.DescribeImagesRequest(
region_id=clients.region,
image_id=image_id,
)
rsp = clients.ecs.describe_images(req)
if len(rsp.body.images.image):
assert len(rsp.body.images.image) == 1
assert rsp.body.images.image[0].image_id == image_id
status = rsp.body.images.image[0].status
if status != 'Creating':
break
if status != 'Available':
raise RuntimeError(status)
def import_image(clients, image, bucket, key):
"""Import image"""
logger.info("%s: importing %s" % (clients.region, image.name))
disk = ecs.models.ImportImageRequestDiskDeviceMapping(
disk_image_size = 1,
format = 'RAW',
ossbucket = bucket,
ossobject = key,
)
req = ecs.models.ImportImageRequest(
region_id=clients.region,
image_name=image.name,
architecture=image.arch,
boot_mode=image.mode,
disk_device_mapping=[disk],
)
rsp = clients.ecs.import_image(req)
image_id = rsp.body.image_id
task_id = rsp.body.task_id
wait_for_task(clients, task_id)
wait_for_image(clients, image_id)
logger.info("%s: imported %s (%s)" %
(clients.region, image.name, image_id))
return image_id
def copy_image(clients, image, image_id, censored):
"""Copy imported image to censored region"""
logger.info("%s: copying %s (%s) to %s" %
(clients.region, image.name, image_id, censored.region))
req = ecs.models.CopyImageRequest(
region_id=clients.region,
image_id=image_id,
destination_region_id=censored.region,
destination_image_name=image.name,
)
rsp = clients.ecs.copy_image(req)
copy_id = rsp.body.image_id
wait_for_image(censored, copy_id)
logger.info("%s: copied %s (%s) to %s" %
(clients.region, image.name, copy_id, censored.region))
return copy_id
def finalise_image(clients, image, image_id):
"""Finalise image attributes and permissions"""
logger.info("%s: finalising %s (%s)" %
(clients.region, image.name, image_id))
req = ecs.models.ModifyImageAttributeRequest(
region_id=clients.region,
image_id=image_id,
image_family=image.family,
)
rsp = clients.ecs.modify_image_attribute(req)
if image.public:
req = ecs.models.ModifyImageSharePermissionRequest(
region_id=clients.region,
image_id=image_id,
is_public=True,
)
rsp = clients.ecs.modify_image_share_permission(req)
# Parse command-line arguments
parser = argparse.ArgumentParser(description="Import Alibaba Cloud image")
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--name', '-n',
help="Base image name")
parser.add_argument('--family', '-f', default='ipxe',
help="Base family name")
parser.add_argument('--public', '-p', action='store_true',
help="Make image(s) public")
parser.add_argument('--overwrite', action='store_true',
help="Overwrite any existing image with same name")
parser.add_argument('--region', '-r', action='append',
help="AliCloud region(s)")
parser.add_argument('image', nargs='+', help="iPXE disk image")
args = parser.parse_args()
# Configure logging
loglevels = [logging.WARNING, logging.INFO, logging.DEBUG]
verbosity = min(args.verbose, (len(loglevels) - 1))
logging.basicConfig(level=loglevels[verbosity])
# Use default name if none specified
if not args.name:
args.name = '%s-%s' % (args.family,
datetime.date.today().strftime('%Y%m%d'))
# Use all regions if none specified
regions = args.region or all_regions()
# Construct image list
images = [image(x, args.family, args.name, args.public) for x in args.image]
imports = [(region, image) for region in regions for image in images]
# Construct per-region clients
clients = {region: all_clients(region) for region in regions}
# Delete existing images from all regions, if applicable
if args.overwrite:
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
futures = {executor.submit(delete_image,
clients=clients[region],
name=image.name): (region, image)
for region, image in imports}
done = {futures[x]: x.result() for x in as_completed(futures)}
# Create temporary buckets in all uncensored regions
with ThreadPoolExecutor(max_workers=len(regions)) as executor:
futures = {executor.submit(create_temp_bucket,
clients=clients[region]): region
for region in regions}
buckets = {futures[x]: x.result() for x in as_completed(futures)}
if not any(buckets.values()):
parser.error("At least one non-Chinese region is required")
# Upload images directly to uncensored regions
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
futures = {executor.submit(upload_image,
clients=clients[region],
bucket=buckets[region],
image=image): (region, image)
for region, image in imports if buckets[region]}
keys = {futures[x]: x.result() for x in as_completed(futures)}
# Import images to uncensored regions
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
futures = {executor.submit(import_image,
clients=clients[region],
image=image,
bucket=buckets[region],
key=keys[(region, image)]): (region, image)
for region, image in imports if buckets[region]}
results = {futures[x]: x.result() for x in as_completed(futures)}
# Select source uncensored region for each copy
#
# Copies are rate-limited by source region, so spread the copies
# across all available uncensored regions.
#
copies = [(region, censored, image) for region, (censored, image) in zip(
cycle(region for region in regions if buckets[region]),
((region, image) for region, image in imports if not buckets[region]),
)]
# Copy images to censored regions
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
futures = {executor.submit(copy_image,
clients=clients[region],
censored=clients[censored],
image=image,
image_id=results[(region, image)]):
(censored, image)
for region, censored, image in copies}
results.update({futures[x]: x.result() for x in as_completed(futures)})
# Finalise images
with ThreadPoolExecutor(max_workers=len(imports)) as executor:
futures = {executor.submit(finalise_image,
clients=clients[region],
image=image,
image_id=results[(region, image)]):
(region, image)
for region, image in imports}
done = {futures[x]: x.result() for x in as_completed(futures)}
# Remove temporary buckets
with ThreadPoolExecutor(max_workers=len(regions)) as executor:
futures = {executor.submit(delete_temp_bucket,
clients=clients[region],
bucket=buckets[region]): region
for region in regions if buckets[region]}
done = {futures[x]: x.result() for x in as_completed(futures)}
# Show created images
for region, image in imports:
image_id = results[(region, image)]
print("%s %s (%s) %s" % (region, image.name, image.family, image_id))