[WIP] Require Python 3.6 (f-strings) and fix pylints

So far only done scripts directory, will do the rest if this is
considered good
This commit is contained in:
def 2022-06-12 13:15:02 +02:00 committed by Dennis Felsing
parent 53e9fd3599
commit 7379a64004
21 changed files with 98 additions and 107 deletions

View file

@ -48,4 +48,4 @@ jobs:
- name: Pylint
run: |
pylint --version
find . -type f -name "*.py" -not -path './ddnet-libs/*' -print0 | xargs -0 pylint
find . -type f -name "*.py" -not -path './ddnet-libs/*' -not -path './googletest-src/*' -print0 | xargs -0 pylint

View file

@ -48,7 +48,7 @@ def gen_network_header():
print(network.RawHeader)
for e in network.Enums:
for line in create_enum_table(["%s_%s"%(e.name, v) for v in e.values], 'NUM_%sS'%e.name): # pylint: disable=E1101
for line in create_enum_table(["%s_%s"%(e.name, v) for v in e.values], 'NUM_%sS'%e.name): # pylint: disable=no-member
print(line)
print("")

View file

@ -1,17 +1,11 @@
# -*- coding: utf-8 -*-
# pylint: disable=E0602
# pylint: disable=undefined-variable
from __future__ import unicode_literals
import os.path
import sys
# Before Python 3.4, use biplist; afterwards, use plistlib
if sys.version_info < (3, 4):
import biplist # pylint: disable=E0401
def read_plist(path):
return biplist.readPlist(path)
else:
import plistlib # pylint: disable=E0401
def read_plist(path):
import plistlib
def read_plist(path):
with open(path, 'rb') as f:
return plistlib.load(f)
@ -52,7 +46,7 @@ def icon_from_app(app_path):
# volume_name = 'DDNet'
# Volume format (see hdiutil create -help)
format = defines.get('format', 'UDBZ') # pylint: disable=W0622
format = defines.get('format', 'UDBZ') # pylint: disable=redefined-builtin
# Compression level (if relevant)
compression_level = 9

View file

@ -17,7 +17,7 @@ def check_file(filename):
if filename in EXCEPTIONS:
return False
error = False
with open(filename) as file:
with open(filename, encoding="utf-8") as file:
for line in file:
if line == "// This file can be included several times.\n":
break
@ -27,10 +27,10 @@ def check_file(filename):
if line.startswith("#ifndef"):
if line[:-1] != header_guard:
error = True
print("Wrong header guard in {}, is: {}, should be: {}".format(filename, line[:-1], header_guard))
print(f"Wrong header guard in {filename}, is: {line[:-1]}, should be: {header_guard}")
else:
error = True
print("Missing header guard in {}, should be: {}".format(filename, header_guard))
print(f"Missing header guard in {filename}, should be: {header_guard}")
break
return error

View file

@ -44,13 +44,13 @@ def check_variable_name(qualifiers, typ, name):
return None
prefix = "".join([qualifiers, "_" if qualifiers else "", typ])
if not name.startswith(prefix):
return "should start with {!r}".format(prefix)
return f"should start with {prefix!r}"
if name in ALLOW:
return None
name = name[len(prefix):]
if not name[0].isupper():
if prefix:
return "should start with an uppercase letter after the prefix {!r}".format(prefix)
return f"should start with an uppercase letter after the prefix {prefix!r}"
return "should start with an uppercase letter"
return None
@ -65,7 +65,7 @@ def main():
error = check_name(i["kind"], i["qualifiers"], i["type"], i["name"])
if error:
unclean = True
print("{}:{}:{}: {}: {}".format(i["file"], i["line"], i["column"], i["name"], error))
print(f"{['file']}:{i['line']}:{i['column']}: {i['name']}: {error}")
return unclean
if __name__ == "__main__":

View file

@ -6,7 +6,7 @@ import os
os.chdir(os.path.dirname(__file__) + "/..")
def hash_bytes(b):
return "0x{}".format(hashlib.sha256(b).hexdigest()[:8])
return f"0x{hashlib.sha256(b).hexdigest()[:8]}"
def hash_file(filename):
with open(filename, "rb") as f:
@ -18,7 +18,7 @@ def main():
p.add_argument("extra_file", metavar="EXTRA_FILE", help="File containing extra strings to be hashed")
args = p.parse_args()
with open(args.list_file) as f:
with open(args.list_file, encoding="utf-8") as f:
files = f.read().splitlines()
with open(args.extra_file, "rb") as f:
extra = f.read().splitlines()
@ -31,10 +31,10 @@ def main():
void CChecksumData::InitFiles()
{
""", end="")
print("\tm_NumFiles = {};".format(len(hashes_files)))
print("\tm_NumExtra = {};".format(len(hashes_extra)))
print(f"\tm_NumFiles = {len(hashes_files)};")
print(f"\tm_NumExtra = {len(hashes_extra)};")
for i, h in enumerate(hashes):
print("\tm_aFiles[0x{:03x}] = {};".format(i, h))
print(f"\tm_aFiles[0x{i:03x}] = {h};")
print("}")
if __name__ == "__main__":

View file

@ -92,20 +92,20 @@ class ParseError(RuntimeError):
def process_source_file(out, file, extra_args, break_on):
args = extra_args + ["-Isrc"]
if file.endswith(".c"):
header = "{}.h".format(file[:-2])
header = f"{file[:-2]}.h"
elif file.endswith(".cpp"):
header = "{}.h".format(file[:-4])
header = f"{file[:-4]}.h"
else:
raise ValueError("unrecognized source file: {}".format(file))
raise ValueError(f"unrecognized source file: {file}")
index = clang.cindex.Index.create()
unit = index.parse(file, args=args)
errors = list(unit.diagnostics)
if errors:
for error in errors:
print("{}: {}".format(file, error.format()), file=sys.stderr)
print(f"{file}: {error.format()}", file=sys.stderr)
print(args, file=sys.stderr)
raise ParseError("failed parsing {}".format(file))
raise ParseError(f"failed parsing {file}")
filter_files = frozenset([file, header])
@ -140,7 +140,7 @@ def process_source_file(out, file, extra_args, break_on):
"name": node.spelling,
})
if node.spelling == break_on:
breakpoint()
breakpoint() # pylint: disable=forgotten-debug-statement
def main():
p = argparse.ArgumentParser(description="Extracts identifier data from a Teeworlds source file and its header, outputting the data as CSV to stdout")

View file

@ -29,15 +29,15 @@ def filter_cpp(filenames):
def find_clang_format(version):
for binary in (
"clang-format",
"clang-format-{}".format(version),
"/opt/clang-format-static/clang-format-{}".format(version)):
f"clang-format-{version}",
f"/opt/clang-format-static/clang-format-{version}"):
try:
out = subprocess.check_output([binary, "--version"])
except FileNotFoundError:
continue
if "clang-format version {}.".format(version) in out.decode("utf-8"):
if f"clang-format version {version}." in out.decode("utf-8"):
return binary
print("Found no clang-format {}".format(version))
print(f"Found no clang-format {version}")
sys.exit(-1)
clang_format_bin = find_clang_format(10)

View file

@ -18,7 +18,7 @@ def get_curl_calls(path):
if (filename.endswith(".cpp") or
filename.endswith(".c") or
filename.endswith(".h")):
with open(os.path.join(directory, filename)) as f:
with open(os.path.join(directory, filename), encoding="utf-8") as f:
contents = f.read()
names = names.union(CURL_RE.findall(contents))
return names
@ -27,11 +27,11 @@ def assembly_source(names):
names = sorted(names)
result = []
for name in names:
result.append(".type {},@function".format(name))
result.append(f".type {name},@function")
for name in names:
result.append(".global {}".format(name))
result.append(f".global {name}")
for name in names:
result.append("{}:".format(name))
result.append(f"{name}:")
return "\n".join(result + [""])
DEFAULT_OUTPUT="libcurl.so"
@ -40,8 +40,8 @@ DEFAULT_SONAME="libcurl.so.4"
def main():
p = argparse.ArgumentParser(description="Create a stub shared object for linking")
p.add_argument("-k", "--keep", action="store_true", help="Keep the intermediary assembly file")
p.add_argument("--output", help="Output filename (default: {})".format(DEFAULT_OUTPUT), default=DEFAULT_OUTPUT)
p.add_argument("--soname", help="soname of the produced shared object (default: {})".format(DEFAULT_SONAME), default=DEFAULT_SONAME)
p.add_argument("--output", help=f"Output filename (default: {DEFAULT_OUTPUT})", default=DEFAULT_OUTPUT)
p.add_argument("--soname", help=f"soname of the produced shared object (default: {DEFAULT_SONAME})", default=DEFAULT_SONAME)
p.add_argument("--functions", metavar="FUNCTION", nargs="+", help="Function symbols that should be put into the shared object (default: look for curl_* names in the source code)")
p.add_argument("--link-args", help="Colon-separated list of additional linking arguments")
args = p.parse_args()
@ -56,7 +56,7 @@ def main():
with tempfile.NamedTemporaryFile("w", suffix=".s", delete=not args.keep) as f:
if args.keep:
print("using {} as temporary file".format(f.name))
print(f"using {f.name} as temporary file")
f.write(assembly_source(functions))
f.flush()
subprocess.check_call([
@ -64,7 +64,7 @@ def main():
] + extra_link_args + [
"-shared",
"-nostdlib", # don't need to link to libc
"-Wl,-soname,{}".format(args.soname),
f"-Wl,-soname,{args.soname}",
"-o", args.output,
f.name,
])

View file

@ -15,7 +15,8 @@ def generate_decompositions():
ud = unicode.data()
con = unicode.confusables()
category = lambda x: {unicode.unhex(u["Value"]) for u in ud if u["General_Category"].startswith(x)}
def category(x):
return {unicode.unhex(u["Value"]) for u in ud if u["General_Category"].startswith(x)}
# TODO: Is this correct? They changed the decompositioning format
nfd = {unicode.unhex(u["Value"]): unicode.unhex_sequence(u["Decomposition_Type"]) for u in ud}
@ -62,8 +63,8 @@ struct DECOMP_SLICE
""")
print("enum")
print("{")
print("\tNUM_DECOMP_LENGTHS = {},".format(len(len_set)))
print("\tNUM_DECOMPS = {},".format(len(decompositions)))
print(f"\tNUM_DECOMP_LENGTHS = {len(len_set)},")
print(f"\tNUM_DECOMPS = {len(decompositions)},")
print("};")
print()
@ -81,13 +82,13 @@ def gen_data(decompositions, decomposition_set, decomposition_offsets, len_set):
print("const uint8_t decomp_lengths[NUM_DECOMP_LENGTHS] = {")
for l in len_set:
print("\t{},".format(l))
print(f"\t{l},")
print("};")
print()
print("const int32_t decomp_chars[NUM_DECOMPS] = {")
for k in sorted(decompositions):
print("\t0x{:x},".format(k))
print(f"\t0x{k:x},")
print("};")
print()
@ -96,14 +97,14 @@ def gen_data(decompositions, decomposition_set, decomposition_offsets, len_set):
d = decompositions[k]
i = decomposition_set.index(tuple(d))
l = len_set.index(len(d))
print("\t{{{}, {}}},".format(decomposition_offsets[i], l))
print(f"\t{{{decomposition_offsets[i]}, {l}}},")
print("};")
print()
print("const int32_t decomp_data[] = {")
for d in decomposition_set:
for c in d:
print("\t0x{:x},".format(c))
print(f"\t0x{c:x},")
print("};")
def main():

View file

@ -15,7 +15,7 @@ def generate_cases():
return [(unicode.unhex(u["Value"]), unicode.unhex(u["Simple_Lowercase_Mapping"])) for u in ud if u["Simple_Lowercase_Mapping"]]
def gen_header(cases):
print("""\
print(f"""\
#include <stdint.h>
struct UPPER_LOWER
@ -26,10 +26,10 @@ struct UPPER_LOWER
enum
{{
\tNUM_TOLOWER = {},
\tNUM_TOLOWER = {len(cases)},
}};
extern const struct UPPER_LOWER tolowermap[];""".format(len(cases)))
extern const struct UPPER_LOWER tolowermap[];""")
def gen_data(cases):
print("""\
@ -39,7 +39,7 @@ def gen_data(cases):
const struct UPPER_LOWER tolowermap[] = {""")
for upper_code, lower_code in cases:
print("\t{{{}, {}}},".format(upper_code, lower_code))
print(f"\t{{{upper_code}, {lower_code}}},")
print("};")
def main():

View file

@ -1,17 +1,12 @@
import errno
import subprocess
try:
from subprocess import DEVNULL
except ImportError:
import os
DEVNULL = open(os.devnull, 'wb')
try:
git_hash = subprocess.check_output(["git", "rev-parse", "--short=16", "HEAD"], stderr=DEVNULL).decode().strip()
definition = '"{}"'.format(git_hash)
git_hash = subprocess.check_output(["git", "rev-parse", "--short=16", "HEAD"], stderr=subprocess.DEVNULL).decode().strip()
definition = f'"{git_hash}"'
except FileNotFoundError as e:
if e.errno != errno.ENOENT:
raise
definition = "0"
except subprocess.CalledProcessError:
definition = "0"
print("const char *GIT_SHORTREV_HASH = {};".format(definition))
print(f"const char *GIT_SHORTREV_HASH = {definition};")

View file

@ -18,15 +18,15 @@ def hash_password(password):
def auth_add_p_line(username, level, pwhash, salt):
if level not in ('admin', 'mod', 'moderator', 'helper'):
print("Warning: level ({}) not one of admin, mod or helper.".format(level), file=sys.stderr)
if repr(level) != "'{}'".format(level):
print("Warning: level ({}) contains weird symbols, config line is possibly malformed.".format(level), file=sys.stderr)
if repr(username) != "'{}'".format(username):
print("Warning: username ({}) contains weird symbols, config line is possibly malformed.".format(username), file=sys.stderr)
print(f"Warning: level ({level}) not one of admin, mod or helper.", file=sys.stderr)
if repr(level) != f"'{level}'":
print(f"Warning: level ({level}) contains weird symbols, config line is possibly malformed.", file=sys.stderr)
if repr(username) != f"'{username}'":
print(f"Warning: username ({username}) contains weird symbols, config line is possibly malformed.", file=sys.stderr)
username = username.replace('"', '\\"')
if ' ' in username or ';' in username:
username = '"{}"'.format(username)
return "auth_add_p {} {} {} {}".format(username, level, pwhash, salt)
username = f'"{username}"'
return f"auth_add_p {username} {level} {pwhash} {salt}"
def auth_add_p_line_from_pw(username, level, password):
if len(password) < 8:
@ -57,13 +57,13 @@ def main():
use_stdio = args.config is None or args.config == '-'
if use_stdio:
if args.config is None:
input_file = open(os.devnull)
input_file = open(os.devnull, encoding="utf-8")
else:
input_file = sys.stdin
output_file = sys.stdout
else:
input_file = open(args.config)
output_file = tempfile.NamedTemporaryFile('w', dir=os.getcwd(), prefix="{}.".format(args.config), delete=False)
input_file = open(args.config, encoding="utf-8") # pylint: disable=consider-using-with
output_file = tempfile.NamedTemporaryFile('w', dir=os.getcwd(), prefix=f"{args.config}.", delete=False) # pylint: disable=consider-using-with
for line in input_file:
parsed = parse_line(line)

View file

@ -21,7 +21,7 @@ class Record(namedtuple('Record', 'name time checkpoints')):
time = Decimal(lines[1])
checkpoints_str = lines[2].split(' ')
if len(checkpoints_str) != 26 or checkpoints_str[25] != "":
raise ValueError("wrong amount of checkpoint times: {}".format(len(checkpoints_str)))
raise ValueError(f"wrong amount of checkpoint times: {len(checkpoints_str)}")
checkpoints_str = checkpoints_str[:25]
checkpoints = tuple(Decimal(c) for c in checkpoints_str)
return Record(name=name, time=time, checkpoints=checkpoints)
@ -46,11 +46,11 @@ def main():
for in_ in args.in_:
match = MAP_RE.match(os.path.basename(in_))
if not match:
raise ValueError("Invalid text score database name, does not end in '_record.dtb': {}".format(in_))
raise ValueError(f"Invalid text score database name, does not end in '_record.dtb': {in_}")
m = match.group("map")
if m in records:
raise ValueError("Two text score databases refer to the same map: {}".format(in_))
with open(in_) as f:
raise ValueError(f"Two text score databases refer to the same map: {in_}")
with open(in_, encoding="utf-8") as f:
records[m] = read_records(f)
if not args.dry_run:
@ -62,23 +62,23 @@ def main():
"Timestamp TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, "
"Time FLOAT DEFAULT 0, "
"Server CHAR(4), " +
"".join("cp{} FLOAT DEFAULT 0, ".format(i + 1) for i in range(25)) +
"".join(f"cp{i + 1} FLOAT DEFAULT 0, " for i in range(25)) +
"GameID VARCHAR(64), "
"DDNet7 BOOL DEFAULT FALSE"
");")
c.executemany(
"INSERT INTO record_race (Map, Name, Time, Server, " +
"".join("cp{}, ".format(i + 1) for i in range(25)) +
"".join(f"cp{i + 1}, " for i in range(25)) +
"GameID, DDNet7) " +
"VALUES ({})".format(",".join("?" * 31)),
[(map, r.name, float(r.time), "TEXT", *[float(c) for c in r.checkpoints], None, False) for map in records for r in records[map]]
f"VALUES ({','.join('?' * 31)})",
[(map, r.name, float(r.time), "TEXT", *[float(c) for c in r.checkpoints], None, False) for map, record in records.items() for r in record]
)
conn.commit()
conn.close()
if args.stats:
print("Number of imported text databases: {}".format(len(records)), file=sys.stderr)
print("Number of imported ranks: {}".format(sum(len(r) for r in records.values()), file=sys.stderr))
print(f"Number of imported text databases: {len(records)}", file=sys.stderr)
print(f"Number of imported ranks: {sum(len(r) for r in records.values())}", file=sys.stderr)
if __name__ == '__main__':
sys.exit(main())

View file

@ -30,6 +30,6 @@ table.sort(key=lambda l: l[3])
table = [["filename", "total", "empty", "missing", "unused"]] + table
s = [[str(e) for e in row] for row in table]
lens = [max(map(len, col)) for col in zip(*s)]
fmt = " ".join("{{:{}}}".format(x) for x in lens)
fmt = " ".join(f"{{:{x}}}" for x in lens)
t = [fmt.format(*row) for row in s]
print("\n".join(t))

View file

@ -5,7 +5,8 @@ import sys
import twlang
def copy_fix(infile, delete_unused, append_missing, delete_empty):
content = open(infile).readlines()
with open(infile, encoding="utf-8") as f:
content = f.readlines()
trans = twlang.translations(infile)
if delete_unused or append_missing:
local = twlang.localizes()
@ -59,7 +60,8 @@ def main(argv):
content = copy_fix(infile, delete_unused, append_missing, delete_empty)
open(outfile, "w").write("".join(content))
with open(outfile, "w", encoding="utf-8") as f:
f.write("".join(content))
print("Successfully created '" + outfile + "'.")
if __name__ == '__main__':

View file

@ -4,7 +4,7 @@ from collections import OrderedDict
class LanguageDecodeError(Exception):
def __init__(self, message, filename, line):
error = "File \"{1}\", line {2}: {0}".format(message, filename, line+1)
error = f"File \"{filename}\", line {line+1}: {message}"
super().__init__(error)
@ -45,7 +45,7 @@ def decode(fileobj, elements_per_key):
if len(data[current_key]) >= 1+elements_per_key:
raise LanguageDecodeError("Wrong number of elements per key", fileobj.name, index)
if current_key:
original = current_key[0] # pylint: disable=E1136
original = current_key[0] # pylint: disable=unsubscriptable-object
translation = line[3:]
if translation and [m.group(1) for m in re.finditer(cfmt, original, flags=re.X)] != [m.group(1) for m in re.finditer(cfmt, translation, flags=re.X)]:
raise LanguageDecodeError("Non-matching formatting string", fileobj.name, index)
@ -68,7 +68,7 @@ def decode(fileobj, elements_per_key):
def check_file(path):
with open(path) as fileobj:
with open(path, encoding="utf-8") as fileobj:
matches = re.findall(r"Localize\s*\(\s*\"([^\"]+)\"(?:\s*,\s*\"([^\"]+)\")?\s*\)", fileobj.read())
return matches
@ -86,13 +86,15 @@ def check_folder(path):
def languages():
index = decode(open("data/languages/index.txt"), 2)
with open("data/languages/index.txt", encoding="utf-8") as f:
index = decode(f, 2)
langs = {"data/languages/"+key[0]+".txt" : [key[0]]+elements for key, elements in index.items()}
return langs
def translations(filename):
return decode(open(filename), 1)
with open(filename, encoding="utf-8") as f:
return decode(f, 1)
def localizes():

View file

@ -7,4 +7,5 @@ os.chdir(os.path.dirname(__file__) + "/../..")
for lang in twlang.languages():
content = copy_fix(lang, delete_unused=True, append_missing=True, delete_empty=False)
open(lang, "w").write(content)
with open(lang, "w", encoding="utf-8") as f:
f.write(content)

View file

@ -18,14 +18,14 @@ from time import strftime
import os
def sqlite_table_exists(cursor, table):
cursor.execute("SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='{}'".format(table))
cursor.execute(f"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='{table}'")
return cursor.fetchone()[0] != 0
def sqlite_num_transfer(conn, table):
c = conn.cursor()
if not sqlite_table_exists(c, table):
return 0
c.execute('SELECT COUNT(*) FROM {}'.format(table))
c.execute(f'SELECT COUNT(*) FROM {table}')
num = c.fetchone()[0]
return num
@ -66,7 +66,7 @@ def main():
args = parser.parse_args()
if not os.path.exists(args.f):
print("Warning: '{}' does not exist (yet). Is the path specified correctly?".format(args.f))
print(f"Warning: '{args.f}' does not exist (yet). Is the path specified correctly?")
return
conn = sqlite3.connect(args.f)
@ -78,22 +78,18 @@ def main():
if num == 0:
return
print('{} new entries in backup database found ({} ranks, {} teamranks, {} saves'.format(num, num_ranks, num_teamranks, num_saves))
print('Moving entries from {} to {}'.format(
os.path.abspath(args.f),
os.path.abspath(args.to)))
print(f'{num} new entries in backup database found ({num_ranks} ranks, {num_teamranks} teamranks, {num_saves} saves')
print('Moving entries from {os.path.abspath(args.f)} to {os.path.abspath(args.to)}')
sql_file = 'ddnet-server-' + strftime('%Y-%m-%d') + '.sql'
print("You can use the following commands to import the entries to MySQL (use sed 's/record_/prefix_/' for other database prefixes):")
print()
print((" echo '.dump --preserve-rowids' | sqlite3 {} | " + # including rowids, this forces sqlite to name all columns in each INSERT statement
print((f" echo '.dump --preserve-rowids' | sqlite3 {os.path.abspath(args.to)} | " + # including rowids, this forces sqlite to name all columns in each INSERT statement
"grep -E '^INSERT INTO record_(race|teamrace|saves)' | " + # filter out inserts
"sed -e 's/INSERT INTO/INSERT IGNORE INTO/' | " + # ignore duplicate rows
"sed -e 's/rowid,//' -e 's/VALUES([0-9]*,/VALUES(/' > {}") # filter out rowids again
.format(os.path.abspath(args.to), sql_file))
print(" mysql -u teeworlds -p'PW2' teeworlds < {}".format(sql_file))
f"sed -e 's/rowid,//' -e 's/VALUES([0-9]*,/VALUES(/' > {sql_file}")) # filter out rowids again
print(f" mysql -u teeworlds -p'PW2' teeworlds < {sql_file}")
print()
print("When the ranks are transfered successfully to mysql, {} can be removed".format(
os.path.abspath(args.to)))
print(f"When the ranks are transfered successfully to mysql, {os.path.abspath(args.to)} can be removed")
print()
print("Log of the transfer:")
print()

View file

@ -25,7 +25,7 @@ UNICODEDATA_FIELDS = (
)
def data():
with open('UnicodeData.txt') as f:
with open('UnicodeData.txt', encoding='utf-8') as f:
return list(csv.DictReader(f, fieldnames=UNICODEDATA_FIELDS, delimiter=';'))
def unhex(s):

View file

@ -1,9 +1,9 @@
print("#ifndef GENERATED_WORDLIST_H")
print("#define GENERATED_WORDLIST_H")
print("const char g_aFallbackWordlist[][32] = {")
with open("data/wordlist.txt") as f:
with open("data/wordlist.txt", encoding="utf-8") as f:
for line in f:
word = line.strip().split("\t")[1]
print("\t\"%s\", " % word)
print(f"\t\"{word}\", ")
print("};")
print("#endif // GENERATED_WORDLIST_H")