Main scripts in scripts/ folder updated to use Python3 I went through the scripts detailed in scripts/README and updated them to use Python3. I used the Python "Future" module to provide suggestions, then manually went through and applied the changes. The "Future" module gives suggestions to allow for cross-compatibility between Python2/3, but since it was expressed that only Python3 needed to be supported, I left all that out.
The issue is detailed here: https://github.com/cloudius-systems/osv/issues/1056 --- scripts/export_manifest.py | 14 +++++----- scripts/firecracker.py | 4 +-- scripts/gen-rofs-img.py | 18 ++++++------ scripts/loader.py | 57 ++++++++++++++++++-------------------- scripts/metadata.py | 6 ++-- scripts/mkbootfs.py | 7 ++--- scripts/module.py | 4 +-- scripts/setup.py | 10 +++---- scripts/test.py | 6 ++-- scripts/tests/testing.py | 4 +-- scripts/upload_manifest.py | 12 ++------ 11 files changed, 65 insertions(+), 77 deletions(-) diff --git a/scripts/export_manifest.py b/scripts/export_manifest.py index 5e8fb4e7..370b1f29 100755 --- a/scripts/export_manifest.py +++ b/scripts/export_manifest.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 import optparse, os, shutil from manifest_common import add_var, expand, unsymlink, read_manifest, defines, strip_file @@ -8,7 +8,7 @@ from manifest_common import add_var, expand, unsymlink, read_manifest, defines, # support for links in OSv, e.g., /etc/mnttab: ->/proc/mounts. def export_package(manifest, dest): abs_dest = os.path.abspath(dest) - print "[INFO] exporting into directory %s" % abs_dest + print("[INFO] exporting into directory %s" % abs_dest) # Remove and create the base directory where we are going to put all package files. if os.path.exists(abs_dest): @@ -39,7 +39,7 @@ def export_package(manifest, dest): os.makedirs(target_dir) os.symlink(link_source, name) - print "[INFO] added link %s -> %s" % (name, link_source) + print("[INFO] added link %s -> %s" % (name, link_source)) else: # If it is a symlink, then resolve it add to the list of host symlinks to be created later @@ -58,23 +58,23 @@ def export_package(manifest, dest): hostname = strip_file(hostname) shutil.copy(hostname, name) - print "[INFO] exported %s" % name + print("[INFO] exported %s" % name) elif os.path.isdir(hostname): # If hostname is a dir, it is only a request to create the folder on guest. Nothing to copy. if not os.path.exists(name): os.makedirs(name) - print "[INFO] created dir %s" % name + print("[INFO] created dir %s" % name) else: # Inform the user that the rule cannot be applied. For example, this happens for links in OSv. - print "[ERR] unable to export %s" % hostname + print("[ERR] unable to export %s" % hostname) for link_source, name in host_symlinks: target_dir = os.path.dirname(name) if not os.path.exists(target_dir): os.makedirs(target_dir) os.symlink(link_source, name) - print "[INFO] added link %s -> %s" % (name, link_source) + print("[INFO] added link %s -> %s" % (name, link_source)) def main(): make_option = optparse.make_option diff --git a/scripts/firecracker.py b/scripts/firecracker.py index 3f21081d..5c98b0ee 100755 --- a/scripts/firecracker.py +++ b/scripts/firecracker.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # pip install requests-unixsocket import sys @@ -182,7 +182,7 @@ def find_firecracker(dirname): if not os.path.exists(firecracker_path): url_base = 'https://github.com/firecracker-microvm/firecracker/releases/download' download_url = '%s/%s/firecracker-%s' % (url_base, firecracker_version, firecracker_version) - answer = raw_input("Firecracker executable has not been found under %s. " + answer = input("Firecracker executable has not been found under %s. " "Would you like to download it from %s and place it under %s? [y|n]" % (firecracker_path, download_url, firecracker_path)) if answer.capitalize() != 'Y': diff --git a/scripts/gen-rofs-img.py b/scripts/gen-rofs-img.py index e825c639..8904a870 100755 --- a/scripts/gen-rofs-img.py +++ b/scripts/gen-rofs-img.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 # # Copyright (c) 2015 Carnegie Mellon University. @@ -218,13 +218,13 @@ def write_dir(fp, manifest, dirpath, parent_dir): inode.data_offset = symlinks_count inode.count = 1 next_symlink(val[2:],manifest) - print 'Link %s to %s' % (dirpath + '/' + entry, val[2:]) + print('Link %s to %s' % (dirpath + '/' + entry, val[2:])) else: #file inode.mode = REG_MODE global block inode.data_offset = block inode.count = write_file(fp, val) - print 'Adding %s' % (dirpath + '/' + entry) + print('Adding %s' % (dirpath + '/' + entry)) # This needs to be added so that later we can walk the tree # when fining symlinks @@ -264,7 +264,7 @@ def write_fs(fp, manifest): return (block_no, bytes_written) def gen_image(out, manifest): - print 'Writing image' + print('Writing image') fp = open(out, 'wb') # write the initial superblock @@ -272,7 +272,7 @@ def gen_image(out, manifest): system_structure_block, bytes_written = write_fs(fp, manifest) structure_info_last_block_bytes = bytes_written % OSV_BLOCK_SIZE - structure_info_blocks_count = bytes_written / OSV_BLOCK_SIZE + (1 if structure_info_last_block_bytes > 0 else 0) + structure_info_blocks_count = bytes_written // OSV_BLOCK_SIZE + (1 if structure_info_last_block_bytes > 0 else 0) pad(fp,OSV_BLOCK_SIZE - structure_info_last_block_bytes) @@ -290,10 +290,10 @@ def gen_image(out, manifest): sb.symlinks_count = len(symlinks) sb.inodes_count = len(inodes) - print 'First block: %d, blocks count: %d' % (sb.structure_info_first_block, sb.structure_info_blocks_count) - print 'Directory entries count %d' % sb.directory_entries_count - print 'Symlinks count %d' % sb.symlinks_count - print 'Inodes count %d' % sb.inodes_count + print('First block: %d, blocks count: %d' % (sb.structure_info_first_block, sb.structure_info_blocks_count)) + print('Directory entries count %d' % sb.directory_entries_count) + print('Symlinks count %d' % sb.symlinks_count) + print('Inodes count %d' % sb.inodes_count) fp.seek(0) fp.write(sb) diff --git a/scripts/loader.py b/scripts/loader.py index 500d864a..b7969d02 100644 --- a/scripts/loader.py +++ b/scripts/loader.py @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 import gdb import re @@ -37,8 +37,8 @@ def phys_cast(addr, type): def values(_dict): if hasattr(_dict, 'viewvalues'): - return _dict.viewvalues() - return _dict.values() + return _dict.values() + return list(_dict.values()) def read_vector(v): impl = v['_M_impl'] @@ -426,19 +426,19 @@ class osv_zfs(gdb.Command): print ("\n:: ARC SIZES ::") print ("\tCurrent size: %d (%d MB)" % - (arc_size, arc_size / 1024 / 1024)) + (arc_size, arc_size // 1024 // 1024)) print ("\tTarget size: %d (%d MB)" % - (arc_target_size, arc_target_size / 1024 / 1024)) + (arc_target_size, arc_target_size // 1024 // 1024)) print ("\tMin target size: %d (%d MB)" % - (arc_min_size, arc_min_size / 1024 / 1024)) + (arc_min_size, arc_min_size // 1024 // 1024)) print ("\tMax target size: %d (%d MB)" % - (arc_max_size, arc_max_size / 1024 / 1024)) + (arc_max_size, arc_max_size // 1024 // 1024)) print ("\n:: ARC SIZE BREAKDOWN ::") print ("\tMost recently used cache size: %d (%d MB) (%.2f%%)" % - (arc_mru_size, arc_mru_size / 1024 / 1024, arc_mru_perc)) + (arc_mru_size, arc_mru_size // 1024 // 1024, arc_mru_perc)) print ("\tMost frequently used cache size: %d (%d MB) (%.2f%%)" % - (arc_mfu_size, arc_mfu_size / 1024 / 1024, arc_mfu_perc)) + (arc_mfu_size, arc_mfu_size // 1024 // 1024, arc_mfu_perc)) # Cache efficiency arc_hits = get_stat_by_name(arc_stats_struct, arc_stats_cast, 'arcstat_hits') @@ -618,7 +618,7 @@ class osv_mmap(gdb.Command): end = ulong(vma['_range']['_end']) flags = flagstr(ulong(vma['_flags'])) perm = permstr(ulong(vma['_perm'])) - size = '{:<16}'.format('[%s kB]' % (ulong(end - start)/1024)) + size = '{:<16}'.format('[%s kB]' % (ulong(end - start)//1024)) if 'F' in flags: file_vma = vma.cast(gdb.lookup_type('mmu::file_vma').pointer()) @@ -648,7 +648,7 @@ class osv_vma_find(gdb.Command): if start <= addr and end > addr: flags = flagstr(ulong(vma['_flags'])) perm = permstr(ulong(vma['_perm'])) - size = '{:<16}'.format('[%s kB]' % (ulong(end - start)/1024)) + size = '{:<16}'.format('[%s kB]' % (ulong(end - start)//1024)) print('0x%016x -> vma 0x%016x' % (addr, vma_addr)) print('0x%016x 0x%016x %s flags=%s perm=%s' % (start, end, size, flags, perm)) break @@ -671,7 +671,7 @@ def ulong(x): def to_int(gdb_value): if hasattr(globals()['__builtins__'], 'long'): # For GDB with python2 - return long(gdb_value) + return int(gdb_value) return int(gdb_value) class osv_syms(gdb.Command): @@ -685,9 +685,9 @@ class osv_syms(gdb.Command): obj_path = obj['_pathname']['_M_dataplus']['_M_p'].string() path = translate(obj_path) if not path: - print('ERROR: Unable to locate object file for:', obj_path, hex(base)) + print(('ERROR: Unable to locate object file for:', obj_path, hex(base))) else: - print(path, hex(base)) + print((path, hex(base))) load_elf(path, base) class osv_load_elf(gdb.Command): @@ -751,7 +751,7 @@ def get_base_class_offset(gdb_type, base_class_name): name_pattern = re.escape(base_class_name) + "(<.*>)?$" for field in gdb_type.fields(): if field.is_base_class and re.match(name_pattern, field.name): - return field.bitpos / 8 + return field.bitpos // 8 def derived_from(type, base_class): return len([x for x in type.fields() @@ -808,11 +808,8 @@ class intrusive_list: yield node_ptr.cast(self.node_type.pointer()).dereference() hook = hook['next_'] - def __nonzero__(self): - return self.root['next_'] != self.root.address - def __bool__(self): - return self.__nonzero__() + return self.root['next_'] != self.root.address class vmstate(object): def __init__(self): @@ -832,7 +829,7 @@ class vmstate(object): self.cpu_list = cpu_list def load_thread_list(self): - threads = map(gdb.Value.dereference, unordered_map(gdb.lookup_global_symbol('sched::thread_map').value())) + threads = list(map(gdb.Value.dereference, unordered_map(gdb.lookup_global_symbol('sched::thread_map').value()))) self.thread_list = sorted(threads, key=lambda x: int(x["_id"])) def cpu_from_thread(self, thread): @@ -896,7 +893,7 @@ def show_thread_timers(t): gdb.write(' timers:') for timer in timer_list: expired = '*' if timer['_state'] == timer_state_expired else '' - expiration = int(timer['_time']['__d']['__r']) / 1.0e9 + expiration = int(timer['_time']['__d']['__r']) // 1.0e9 gdb.write(' %11.9f%s' % (expiration, expired)) gdb.write('\n') @@ -911,7 +908,7 @@ class ResolvedFrame: self.frame = frame self.file_name = file_name self.line = line - self.func_name = func_name + self.__name__ = func_name def traverse_resolved_frames(frame): while frame: @@ -989,14 +986,14 @@ class osv_info_threads(gdb.Command): function_whitelist = [sched_thread_join] def is_interesting(resolved_frame): - is_whitelisted = resolved_frame.func_name in function_whitelist + is_whitelisted = resolved_frame.__name__ in function_whitelist is_blacklisted = os.path.basename(resolved_frame.file_name) in file_blacklist return is_whitelisted or not is_blacklisted fr = find_or_give_last(is_interesting, traverse_resolved_frames(newest_frame)) if fr: - location = '%s at %s:%s' % (fr.func_name, strip_dotdot(fr.file_name), fr.line) + location = '%s at %s:%s' % (fr.__name__, strip_dotdot(fr.file_name), fr.line) else: location = '??' @@ -1009,7 +1006,7 @@ class osv_info_threads(gdb.Command): ) ) - if fr and fr.func_name == sched_thread_join: + if fr and fr.__name__ == sched_thread_join: gdb.write("\tjoining on %s\n" % fr.frame.read_var("this")) show_thread_timers(t) @@ -1214,7 +1211,7 @@ def all_traces(): unpacker.align_up(8) yield Trace(tp, Thread(thread, thread_name), time, cpu, data, backtrace=backtrace) - iters = map(lambda cpu: one_cpu_trace(cpu), values(state.cpu_list)) + iters = [one_cpu_trace(cpu) for cpu in values(state.cpu_list)] return heapq.merge(*iters) def save_traces_to_file(filename): @@ -1281,7 +1278,7 @@ def show_leak(): gdb.flush() allocs = [] for i in range(size_allocations): - newpercent = '%2d%%' % round(100.0*i/(size_allocations-1)) + newpercent = '%2d%%' % round(100.0*i//(size_allocations-1)) if newpercent != percent: percent = newpercent gdb.write('\b\b\b%s' % newpercent) @@ -1343,10 +1340,10 @@ def show_leak(): allocations=cur_n, minsize=cur_min_size, maxsize=cur_max_size, - avgsize=cur_total_size/cur_n, + avgsize=cur_total_size//cur_n, minbirth=cur_first_seq, maxbirth=cur_last_seq, - avgbirth=cur_total_seq/cur_n, + avgbirth=cur_total_seq//cur_n, callchain=callchain) records.append(r) cur_n = 0 @@ -1538,7 +1535,7 @@ class osv_percpu(gdb.Command): gdb.write('%s\n'%e) return percpu_addr = percpu.address - for cpu in vmstate().cpu_list.values(): + for cpu in list(vmstate().cpu_list.values()): gdb.write("CPU %d:\n" % cpu.id) base = cpu.obj['percpu_base'] addr = base+to_int(percpu_addr) diff --git a/scripts/metadata.py b/scripts/metadata.py index 761a927f..37a53323 100644 --- a/scripts/metadata.py +++ b/scripts/metadata.py @@ -1,7 +1,7 @@ -import SimpleHTTPServer as http -import SocketServer import subprocess import os +import http.server as http +import socketserver METADATA_IP = '169.254.169.254' port = 80 @@ -32,7 +32,7 @@ def start_server(path): try: os.chdir(path) handler = http.SimpleHTTPRequestHandler - server = SocketServer.TCPServer(("", port), handler, False) + server = socketserver.TCPServer(("", port), handler, False) server.allow_reuse_address = True server.server_bind() server.server_activate() diff --git a/scripts/mkbootfs.py b/scripts/mkbootfs.py index 134fb7b0..1c6a82a2 100755 --- a/scripts/mkbootfs.py +++ b/scripts/mkbootfs.py @@ -1,10 +1,7 @@ -#!/usr/bin/python +#!/usr/bin/python3 import os, struct, optparse, io -try: - import configparser -except ImportError: - import ConfigParser as configparser +import configparser from manifest_common import add_var, expand, unsymlink, read_manifest, defines, strip_file def main(): diff --git a/scripts/module.py b/scripts/module.py index 548dd0c7..c253968c 100755 --- a/scripts/module.py +++ b/scripts/module.py @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 import re import os @@ -226,7 +226,7 @@ def build(args): else: print(prefix) - for module, run_config_name in modules_to_run.items(): + for module, run_config_name in list(modules_to_run.items()): run_config = resolve.get_run_config(module, run_config_name) if run_config: run_list.append(run_config) diff --git a/scripts/setup.py b/scripts/setup.py index ce02b5d8..9878e1d7 100755 --- a/scripts/setup.py +++ b/scripts/setup.py @@ -1,8 +1,8 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 # set up a development environment for OSv. Run as root. -import sys, platform, argparse +import sys, distro, argparse import subprocess standard_ec2_packages = ['python-pip', 'wget'] @@ -305,11 +305,11 @@ parser.add_argument("-t", "--test", action="store_true", help="install packages required by testing tools") cmdargs = parser.parse_args() -(name, version, id) = platform.linux_distribution() +(name, version, id) = distro.linux_distribution() for distro in distros: if type(distro.name) == type([]): - dname = filter(lambda n: name.startswith(n), distro.name) + dname = [n for n in distro.name if name.startswith(n)] if len(dname): distro.name = dname[0] else: @@ -335,5 +335,5 @@ for distro in distros: print ('Your distribution %s version %s is not supported by this script' % (name, version)) sys.exit(1) -print 'Your distribution is not supported by this script.' +print('Your distribution is not supported by this script.') sys.exit(2) diff --git a/scripts/test.py b/scripts/test.py index 02eb4b55..94528dd1 100755 --- a/scripts/test.py +++ b/scripts/test.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 import atexit import subprocess import argparse @@ -81,7 +81,7 @@ def is_not_skipped(test): return test.name not in blacklist def run_tests_in_single_instance(): - run(filter(lambda test: not isinstance(test, TestRunnerTest), tests)) + run([test for test in tests if not isinstance(test, TestRunnerTest)]) blacklist_tests = ' '.join(blacklist) args = run_py_args + ["-s", "-e", "/testrunner.so -b %s" % (blacklist_tests)] @@ -103,7 +103,7 @@ def pluralize(word, count): def make_export_and_conf(): export_dir = tempfile.mkdtemp(prefix='share') - os.chmod(export_dir, 0777) + os.chmod(export_dir, 0o777) (conf_fd, conf_path) = tempfile.mkstemp(prefix='export') conf = os.fdopen(conf_fd, "w") conf.write("%s 127.0.0.1(insecure,rw)\n" % export_dir) diff --git a/scripts/tests/testing.py b/scripts/tests/testing.py index c5249753..c3aaf218 100644 --- a/scripts/tests/testing.py +++ b/scripts/tests/testing.py @@ -133,7 +133,7 @@ class SupervisedProcess: self.cv.release() line = '' - ch_bytes = '' + ch_bytes = bytes() while True: ch_bytes = ch_bytes + self.process.stdout.read(1) try: @@ -144,7 +144,7 @@ class SupervisedProcess: if ch == '\n': append_line(line) line = '' - ch_bytes = '' + ch_bytes = bytes() except UnicodeError: continue diff --git a/scripts/upload_manifest.py b/scripts/upload_manifest.py index 42033203..310b1bbc 100755 --- a/scripts/upload_manifest.py +++ b/scripts/upload_manifest.py @@ -1,17 +1,11 @@ -#!/usr/bin/python +#!/usr/bin/python3 import optparse, os, subprocess, socket, threading, stat, sys from manifest_common import add_var, expand, unsymlink, read_manifest, defines, strip_file from contextlib import closing -try: - import StringIO - # This works on Python 2 - StringIO = StringIO.StringIO -except ImportError: - import io - # This works on Python 3 - StringIO = io.StringIO +import io +StringIO = io.StringIO def find_free_port(): with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: -- 2.24.0 -- You received this message because you are subscribed to the Google Groups "OSv Development" group. To unsubscribe from this group and stop receiving emails from it, send an email to osv-dev+unsubscr...@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/osv-dev/20191212200248.12048-1-bassmatt0515%40gmail.com.