mirror of
https://github.com/ddnet/ddnet.git
synced 2024-11-19 22:48:18 +00:00
Clean up scripts/update_localization.py
It now properly uses the python built-in JSON library. Fix #1185 on GitHub.
This commit is contained in:
parent
dc827fa460
commit
d73165f563
|
@ -1,17 +1,28 @@
|
||||||
import os, re, sys
|
import json
|
||||||
match = re.search('(.*)/', sys.argv[0])
|
import os
|
||||||
if match != None:
|
import sys
|
||||||
os.chdir(match.group(1))
|
|
||||||
|
os.chdir(os.path.dirname(os.path.realpath(sys.argv[0])) + "/..")
|
||||||
|
|
||||||
source_exts = [".c", ".cpp", ".h"]
|
|
||||||
content_author = ""
|
content_author = ""
|
||||||
|
|
||||||
|
format = "{0:40} {1:8} {2:8} {3:8}".format
|
||||||
|
SOURCE_EXTS = [".c", ".cpp", ".h"]
|
||||||
|
|
||||||
|
JSON_KEY_AUTHORS="authors"
|
||||||
|
JSON_KEY_TRANSL="translated strings"
|
||||||
|
JSON_KEY_UNTRANSL="needs translation"
|
||||||
|
JSON_KEY_OLDTRANSL="old translations"
|
||||||
|
JSON_KEY_OR="or"
|
||||||
|
JSON_KEY_TR="tr"
|
||||||
|
|
||||||
def parse_source():
|
def parse_source():
|
||||||
stringtable = {}
|
l10n = []
|
||||||
|
|
||||||
def process_line(line):
|
def process_line(line):
|
||||||
if 'Localize("'.encode() in line:
|
if b"Localize(\"" in line:
|
||||||
fields = line.split('Localize("'.encode(), 1)[1].split('"'.encode(), 1)
|
fields = line.split(b"Localize(\"", 1)[1].split(b"\"", 1)
|
||||||
stringtable[fields[0]] = ""
|
l10n.append(fields[0].decode())
|
||||||
process_line(fields[1])
|
process_line(fields[1])
|
||||||
|
|
||||||
for root, dirs, files in os.walk("src"):
|
for root, dirs, files in os.walk("src"):
|
||||||
|
@ -20,90 +31,69 @@ def parse_source():
|
||||||
|
|
||||||
if os.sep + "external" + os.sep in filename:
|
if os.sep + "external" + os.sep in filename:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if filename[-2:] in source_exts or filename[-4:] in source_exts:
|
if os.path.splitext(filename)[1] in SOURCE_EXTS:
|
||||||
for line in open(filename, "rb"):
|
# HACK: Open source as binary file.
|
||||||
|
# Necessary some of teeworlds source files
|
||||||
|
# aren't utf-8 yet for some reason
|
||||||
|
for line in open(filename, 'rb'):
|
||||||
process_line(line)
|
process_line(line)
|
||||||
|
|
||||||
return stringtable
|
return l10n
|
||||||
|
|
||||||
def load_languagefile(filename):
|
def load_languagefile(filename):
|
||||||
f = open(filename, "rb")
|
return json.load(open(filename))
|
||||||
lines = f.readlines()
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
stringtable = {}
|
def write_languagefile(outputfilename, l10n_src, old_l10n_data):
|
||||||
authorpart = 0
|
result = {}
|
||||||
global content_author
|
|
||||||
|
|
||||||
for i in range(0, len(lines)-1):
|
result[JSON_KEY_AUTHORS] = old_l10n_data[JSON_KEY_AUTHORS]
|
||||||
if authorpart == 0 and "\"authors\":".encode() in lines[i]:
|
|
||||||
authorpart = 1
|
|
||||||
content_author = lines[i]
|
|
||||||
elif authorpart == 1:
|
|
||||||
if "\"translated strings\":".encode() in lines[i]:
|
|
||||||
authorpart = 2
|
|
||||||
else:
|
|
||||||
content_author += lines[i]
|
|
||||||
elif "\"or\":".encode() in lines[i]:
|
|
||||||
stringtable[lines[i].strip()[7:-2]] = lines[i+1].strip()[7:-1]
|
|
||||||
|
|
||||||
return stringtable
|
translations = {}
|
||||||
|
for type_ in (
|
||||||
|
JSON_KEY_OLDTRANSL,
|
||||||
|
JSON_KEY_UNTRANSL,
|
||||||
|
JSON_KEY_TRANSL,
|
||||||
|
):
|
||||||
|
translations.update({
|
||||||
|
t[JSON_KEY_OR]: t[JSON_KEY_TR]
|
||||||
|
for t in old_l10n_data[type_]
|
||||||
|
if t[JSON_KEY_TR]
|
||||||
|
})
|
||||||
|
|
||||||
def generate_languagefile(outputfilename, srctable, loctable):
|
num_items = set(translations) | set(l10n_src)
|
||||||
f = open(outputfilename, "wb")
|
tsl_items = set(translations) & set(l10n_src)
|
||||||
|
new_items = set(translations) - set(l10n_src)
|
||||||
|
old_items = set(l10n_src) - set(translations)
|
||||||
|
|
||||||
num_items = 0
|
def to_transl(set_):
|
||||||
new_items = 0
|
return [
|
||||||
old_items = 0
|
{
|
||||||
|
JSON_KEY_OR: x,
|
||||||
|
JSON_KEY_TR: translations.get(x, ""),
|
||||||
|
}
|
||||||
|
for x in set_
|
||||||
|
]
|
||||||
|
|
||||||
srctable_keys = []
|
result[JSON_KEY_TRANSL] = to_transl(tsl_items)
|
||||||
for key in srctable:
|
result[JSON_KEY_UNTRANSL] = to_transl(new_items)
|
||||||
srctable_keys.append(key)
|
result[JSON_KEY_OLDTRANSL] = to_transl(old_items)
|
||||||
srctable_keys.sort()
|
|
||||||
|
|
||||||
content = content_author
|
json.dump(result, open(outputfilename, 'w'), sort_keys=True, indent=2)
|
||||||
|
|
||||||
content += "\"translated strings\": [\n".encode()
|
print(format(outputfilename, len(num_items), len(new_items), len(old_items)))
|
||||||
for k in srctable_keys:
|
|
||||||
if k in loctable and len(loctable[k]):
|
|
||||||
if not num_items == 0:
|
|
||||||
content += ",\n".encode()
|
|
||||||
content += "\t{\n\t\t\"or\": \"".encode() + k + "\",\n\t\t\"tr\": \"".encode() + loctable[k] + "\"\n\t}".encode()
|
|
||||||
num_items += 1
|
|
||||||
content += "],\n".encode()
|
|
||||||
|
|
||||||
content += "\"needs translation\": [\n".encode()
|
|
||||||
for k in srctable_keys:
|
|
||||||
if not k in loctable or len(loctable[k]) == 0:
|
|
||||||
if not new_items == 0:
|
|
||||||
content += ",\n".encode()
|
|
||||||
content += "\t{\n\t\t\"or\": \"".encode() + k + "\",\n\t\t\"tr\": \"\"\n\t}".encode()
|
|
||||||
num_items += 1
|
|
||||||
new_items += 1
|
|
||||||
content += "],\n".encode()
|
|
||||||
|
|
||||||
content += "\"old translations\": [\n".encode()
|
if __name__ == '__main__':
|
||||||
for k in loctable:
|
l10n_src = parse_source()
|
||||||
if not k in srctable:
|
print(format("filename", *(x.rjust(8) for x in ("total", "new", "old"))))
|
||||||
if not old_items == 0:
|
for filename in os.listdir("data/languages"):
|
||||||
content += ",\n".encode()
|
try:
|
||||||
content += "\t{\n\t\t\"or\": \"".encode() + k + "\",\n\t\t\"tr\": \"".encode() + loctable[k] + "\"\n\t}".encode()
|
if (os.path.splitext(filename)[1] == ".json"
|
||||||
num_items += 1
|
and filename != "index.json"):
|
||||||
old_items += 1
|
filename = "data/languages/" + filename
|
||||||
content += "]\n}\n".encode()
|
write_languagefile(filename, l10n_src, load_languagefile(filename))
|
||||||
|
except Exception as e:
|
||||||
|
print("Failed on {0}, re-raising for traceback".format(filename))
|
||||||
|
raise
|
||||||
|
|
||||||
f.write(content)
|
|
||||||
f.close()
|
|
||||||
print("%-40s %8d %8d %8d" % (outputfilename, num_items, new_items, old_items))
|
|
||||||
|
|
||||||
srctable = parse_source()
|
|
||||||
|
|
||||||
print("%-40s %8s %8s %8s" % ("filename", "total", "new", "old"))
|
|
||||||
|
|
||||||
for filename in os.listdir("data/languages"):
|
|
||||||
if not filename[-5:] == ".json" or filename == "index.json":
|
|
||||||
continue
|
|
||||||
|
|
||||||
filename = "data/languages/" + filename
|
|
||||||
generate_languagefile(filename, srctable, load_languagefile(filename))
|
|
||||||
|
|
Loading…
Reference in a new issue