mirror of
https://github.com/godotengine/godot-editor-l10n.git
synced 2025-12-31 17:48:32 +03:00
Fix regression from ignoring comments
This commit is contained in:
@@ -48,14 +48,14 @@ msgstr ""
|
||||
|
||||
# Regex "(?P<name>([^"\\]|\\.)*)" creates a group named `name` that matches a string.
|
||||
message_patterns = {
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*RTR\(U?"(?P<message>([^"\\]|\\.)*)"(, "(?P<context>([^"\\]|\\.)*)")?\)'): ExtractType.TEXT,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*TTR\(U?"(?P<message>([^"\\]|\\.)*)"(, "(?P<context>([^"\\]|\\.)*)")?\)'): ExtractType.TEXT,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*TTRC\(U?"(?P<message>([^"\\]|\\.)*)"\)'): ExtractType.TEXT,
|
||||
re.compile(r'RTR\(U?"(?P<message>([^"\\]|\\.)*)"(, "(?P<context>([^"\\]|\\.)*)")?\)'): ExtractType.TEXT,
|
||||
re.compile(r'TTR\(U?"(?P<message>([^"\\]|\\.)*)"(, "(?P<context>([^"\\]|\\.)*)")?\)'): ExtractType.TEXT,
|
||||
re.compile(r'TTRC\(U?"(?P<message>([^"\\]|\\.)*)"\)'): ExtractType.TEXT,
|
||||
re.compile(
|
||||
r'^(?!\s*\/\/|\s*\*).*TTRN\(U?"(?P<message>([^"\\]|\\.)*)", "(?P<plural_message>([^"\\]|\\.)*)",[^,)]+?(, "(?P<context>([^"\\]|\\.)*)")?\)'
|
||||
r'TTRN\(U?"(?P<message>([^"\\]|\\.)*)", "(?P<plural_message>([^"\\]|\\.)*)",[^,)]+?(, "(?P<context>([^"\\]|\\.)*)")?\)'
|
||||
): ExtractType.TEXT,
|
||||
re.compile(
|
||||
r'^(?!\s*\/\/|\s*\*).*RTRN\(U?"(?P<message>([^"\\]|\\.)*)", "(?P<plural_message>([^"\\]|\\.)*)",[^,)]+?(, "(?P<context>([^"\\]|\\.)*)")?\)'
|
||||
r'RTRN\(U?"(?P<message>([^"\\]|\\.)*)", "(?P<plural_message>([^"\\]|\\.)*)",[^,)]+?(, "(?P<context>([^"\\]|\\.)*)")?\)'
|
||||
): ExtractType.TEXT,
|
||||
}
|
||||
|
||||
@@ -99,45 +99,50 @@ def _extract_translator_comment(line, is_block_translator_comment):
|
||||
|
||||
def process_file(f, fname):
|
||||
l = f.readline()
|
||||
ls = l.lstrip()
|
||||
lc = 1
|
||||
|
||||
reading_translator_comment = False
|
||||
is_block_translator_comment = False
|
||||
translator_comment = ""
|
||||
patterns = message_patterns
|
||||
|
||||
while l:
|
||||
has_translation_comment = l.find("TRANSLATORS:") != -1
|
||||
# Skip code comments that aren't for translators.
|
||||
if (not ls.startswith("//") and not ls.startswith("* ")) or reading_translator_comment or has_translation_comment:
|
||||
# Detect translator comments.
|
||||
if not reading_translator_comment and has_translation_comment:
|
||||
reading_translator_comment = True
|
||||
is_block_translator_comment = _is_block_translator_comment(l)
|
||||
translator_comment = ""
|
||||
|
||||
# Detect translator comments.
|
||||
if not reading_translator_comment and l.find("TRANSLATORS:") != -1:
|
||||
reading_translator_comment = True
|
||||
is_block_translator_comment = _is_block_translator_comment(l)
|
||||
translator_comment = ""
|
||||
# Gather translator comments. It will be gathered for the next translation function.
|
||||
if reading_translator_comment:
|
||||
reading_translator_comment, extracted_comment = _extract_translator_comment(l, is_block_translator_comment)
|
||||
if extracted_comment != "":
|
||||
translator_comment += extracted_comment + "\n"
|
||||
if not reading_translator_comment:
|
||||
translator_comment = translator_comment[:-1] # Remove extra \n at the end.
|
||||
|
||||
# Gather translator comments. It will be gathered for the next translation function.
|
||||
if reading_translator_comment:
|
||||
reading_translator_comment, extracted_comment = _extract_translator_comment(l, is_block_translator_comment)
|
||||
if extracted_comment != "":
|
||||
translator_comment += extracted_comment + "\n"
|
||||
if not reading_translator_comment:
|
||||
translator_comment = translator_comment[:-1] # Remove extra \n at the end.
|
||||
for pattern, extract_type in patterns.items():
|
||||
for m in pattern.finditer(l):
|
||||
location = os.path.relpath(fname).replace("\\", "/")
|
||||
if line_nb:
|
||||
location += ":" + str(lc)
|
||||
|
||||
if not reading_translator_comment:
|
||||
for pattern, extract_type in patterns.items():
|
||||
for m in pattern.finditer(l):
|
||||
location = os.path.relpath(fname).replace("\\", "/")
|
||||
if line_nb:
|
||||
location += ":" + str(lc)
|
||||
captures = m.groupdict("")
|
||||
msg = captures.get("message", "")
|
||||
msg_plural = captures.get("plural_message", "")
|
||||
msgctx = captures.get("context", "")
|
||||
|
||||
captures = m.groupdict("")
|
||||
msg = captures.get("message", "")
|
||||
msg_plural = captures.get("plural_message", "")
|
||||
msgctx = captures.get("context", "")
|
||||
|
||||
if extract_type == ExtractType.TEXT:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
translator_comment = ""
|
||||
if extract_type == ExtractType.TEXT:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
translator_comment = ""
|
||||
|
||||
l = f.readline()
|
||||
ls = l.lstrip()
|
||||
lc += 1
|
||||
|
||||
|
||||
|
||||
@@ -48,9 +48,9 @@ msgstr ""
|
||||
|
||||
# Regex "(?P<name>([^"\\]|\\.)*)" creates a group named `name` that matches a string.
|
||||
message_patterns = {
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*ETR\(U?"(?P<message>([^"\\]|\\.)*)"(, "(?P<context>([^"\\]|\\.)*)")?\)'): ExtractType.TEXT,
|
||||
re.compile(r'ETR\(U?"(?P<message>([^"\\]|\\.)*)"(, "(?P<context>([^"\\]|\\.)*)")?\)'): ExtractType.TEXT,
|
||||
re.compile(
|
||||
r'^(?!\s*\/\/|\s*\*).*ETRN\(U?"(?P<message>([^"\\]|\\.)*)", "(?P<plural_message>([^"\\]|\\.)*)",[^,)]+?(, "(?P<context>([^"\\]|\\.)*)")?\)'
|
||||
r'ETRN\(U?"(?P<message>([^"\\]|\\.)*)", "(?P<plural_message>([^"\\]|\\.)*)",[^,)]+?(, "(?P<context>([^"\\]|\\.)*)")?\)'
|
||||
): ExtractType.TEXT,
|
||||
}
|
||||
|
||||
@@ -94,45 +94,50 @@ def _extract_translator_comment(line, is_block_translator_comment):
|
||||
|
||||
def process_file(f, fname):
|
||||
l = f.readline()
|
||||
ls = l.lstrip()
|
||||
lc = 1
|
||||
|
||||
reading_translator_comment = False
|
||||
is_block_translator_comment = False
|
||||
translator_comment = ""
|
||||
patterns = message_patterns
|
||||
|
||||
while l:
|
||||
has_translation_comment = l.find("TRANSLATORS:") != -1
|
||||
# Skip code comments that aren't for translators.
|
||||
if (not ls.startswith("//") and not ls.startswith("* ")) or reading_translator_comment or has_translation_comment:
|
||||
# Detect translator comments.
|
||||
if not reading_translator_comment and has_translation_comment:
|
||||
reading_translator_comment = True
|
||||
is_block_translator_comment = _is_block_translator_comment(l)
|
||||
translator_comment = ""
|
||||
|
||||
# Detect translator comments.
|
||||
if not reading_translator_comment and l.find("TRANSLATORS:") != -1:
|
||||
reading_translator_comment = True
|
||||
is_block_translator_comment = _is_block_translator_comment(l)
|
||||
translator_comment = ""
|
||||
# Gather translator comments. It will be gathered for the next translation function.
|
||||
if reading_translator_comment:
|
||||
reading_translator_comment, extracted_comment = _extract_translator_comment(l, is_block_translator_comment)
|
||||
if extracted_comment != "":
|
||||
translator_comment += extracted_comment + "\n"
|
||||
if not reading_translator_comment:
|
||||
translator_comment = translator_comment[:-1] # Remove extra \n at the end.
|
||||
|
||||
# Gather translator comments. It will be gathered for the next translation function.
|
||||
if reading_translator_comment:
|
||||
reading_translator_comment, extracted_comment = _extract_translator_comment(l, is_block_translator_comment)
|
||||
if extracted_comment != "":
|
||||
translator_comment += extracted_comment + "\n"
|
||||
if not reading_translator_comment:
|
||||
translator_comment = translator_comment[:-1] # Remove extra \n at the end.
|
||||
for pattern, extract_type in patterns.items():
|
||||
for m in pattern.finditer(l):
|
||||
location = os.path.relpath(fname).replace("\\", "/")
|
||||
if line_nb:
|
||||
location += ":" + str(lc)
|
||||
|
||||
if not reading_translator_comment:
|
||||
for pattern, extract_type in patterns.items():
|
||||
for m in pattern.finditer(l):
|
||||
location = os.path.relpath(fname).replace("\\", "/")
|
||||
if line_nb:
|
||||
location += ":" + str(lc)
|
||||
captures = m.groupdict("")
|
||||
msg = captures.get("message", "")
|
||||
msg_plural = captures.get("plural_message", "")
|
||||
msgctx = captures.get("context", "")
|
||||
|
||||
captures = m.groupdict("")
|
||||
msg = captures.get("message", "")
|
||||
msg_plural = captures.get("plural_message", "")
|
||||
msgctx = captures.get("context", "")
|
||||
|
||||
if extract_type == ExtractType.TEXT:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
translator_comment = ""
|
||||
if extract_type == ExtractType.TEXT:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
translator_comment = ""
|
||||
|
||||
l = f.readline()
|
||||
ls = l.lstrip()
|
||||
lc += 1
|
||||
|
||||
|
||||
|
||||
@@ -48,25 +48,25 @@ msgstr ""
|
||||
|
||||
# Regex "(?P<name>([^"\\]|\\.)*)" creates a group named `name` that matches a string.
|
||||
message_patterns = {
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*_initial_set\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*GLOBAL_DEF(_RST)?(_NOVAL)?(_BASIC)?\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*EDITOR_DEF(_RST)?\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'_initial_set\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'GLOBAL_DEF(_RST)?(_NOVAL)?(_BASIC)?\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'EDITOR_DEF(_RST)?\("(?P<message>[^"]+?)",'): ExtractType.PROPERTY_PATH,
|
||||
re.compile(
|
||||
r'^(?!\s*\/\/|\s*\*).*EDITOR_SETTING(_USAGE)?\(Variant::[_A-Z0-9]+, [_A-Z0-9]+, "(?P<message>[^"]+?)",'
|
||||
r'EDITOR_SETTING(_USAGE)?\(Variant::[_A-Z0-9]+, [_A-Z0-9]+, "(?P<message>[^"]+?)",'
|
||||
): ExtractType.PROPERTY_PATH,
|
||||
re.compile(
|
||||
r"^(?!\s*\/\/|\s*\*).*(ADD_PROPERTYI?|GLOBAL_DEF(_RST)?(_NOVAL)?(_BASIC)?|ImportOption|ExportOption)\(PropertyInfo\("
|
||||
r"(ADD_PROPERTYI?|GLOBAL_DEF(_RST)?(_NOVAL)?(_BASIC)?|ImportOption|ExportOption)\(PropertyInfo\("
|
||||
+ r"Variant::[_A-Z0-9]+" # Name
|
||||
+ r', "(?P<message>[^"]+)"' # Type
|
||||
+ r'(, [_A-Z0-9]+(, "(?P<hint_string>(?:[^"\\]|\\.)*)"(, (?P<usage>[_A-Z0-9 |]+))?)?|\))' # [, hint[, hint string[, usage]]].
|
||||
): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*ADD_ARRAY\("(?P<message>[^"]+)", '): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*ADD_ARRAY_COUNT(_WITH_USAGE_FLAGS)?\("(?P<message>[^"]+)", '): ExtractType.TEXT,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*(ADD_GROUP|GNAME)\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)"\)'): ExtractType.GROUP,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*ADD_GROUP_INDENT\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)", '): ExtractType.GROUP,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*ADD_SUBGROUP\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)"\)'): ExtractType.SUBGROUP,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*ADD_SUBGROUP_INDENT\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)", '): ExtractType.GROUP,
|
||||
re.compile(r'^(?!\s*\/\/|\s*\*).*PNAME\("(?P<message>[^"]+)"\)'): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'ADD_ARRAY\("(?P<message>[^"]+)", '): ExtractType.PROPERTY_PATH,
|
||||
re.compile(r'ADD_ARRAY_COUNT(_WITH_USAGE_FLAGS)?\("(?P<message>[^"]+)", '): ExtractType.TEXT,
|
||||
re.compile(r'(ADD_GROUP|GNAME)\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)"\)'): ExtractType.GROUP,
|
||||
re.compile(r'ADD_GROUP_INDENT\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)", '): ExtractType.GROUP,
|
||||
re.compile(r'ADD_SUBGROUP\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)"\)'): ExtractType.SUBGROUP,
|
||||
re.compile(r'ADD_SUBGROUP_INDENT\("(?P<message>[^"]+)", "(?P<prefix>[^"]*)", '): ExtractType.GROUP,
|
||||
re.compile(r'PNAME\("(?P<message>[^"]+)"\)'): ExtractType.PROPERTY_PATH,
|
||||
}
|
||||
theme_property_patterns = {
|
||||
re.compile(r'set_(?P<theme_item>constant|font|font_size|stylebox|color|icon)\("(?P<message>[^"]+)", '): ExtractType.PROPERTY_PATH,
|
||||
@@ -113,7 +113,9 @@ def _extract_translator_comment(line, is_block_translator_comment):
|
||||
|
||||
def process_file(f, fname):
|
||||
l = f.readline()
|
||||
ls = l.lstrip()
|
||||
lc = 1
|
||||
|
||||
reading_translator_comment = False
|
||||
is_block_translator_comment = False
|
||||
translator_comment = ""
|
||||
@@ -126,93 +128,97 @@ def process_file(f, fname):
|
||||
patterns = {**message_patterns, **theme_property_patterns}
|
||||
|
||||
while l:
|
||||
# Detect class name.
|
||||
m = class_name_pattern.match(l)
|
||||
if m:
|
||||
current_class = m.group("class_name")
|
||||
has_translation_comment = l.find("TRANSLATORS:") != -1
|
||||
# Skip code comments that aren't for translators.
|
||||
if (not ls.startswith("//") and not ls.startswith("* ")) or reading_translator_comment or has_translation_comment:
|
||||
# Detect class name.
|
||||
m = class_name_pattern.match(l)
|
||||
if m:
|
||||
current_class = m.group("class_name")
|
||||
|
||||
# Detect translator comments.
|
||||
if not reading_translator_comment and l.find("TRANSLATORS:") != -1:
|
||||
reading_translator_comment = True
|
||||
is_block_translator_comment = _is_block_translator_comment(l)
|
||||
translator_comment = ""
|
||||
# Detect translator comments.
|
||||
if not reading_translator_comment and has_translation_comment:
|
||||
reading_translator_comment = True
|
||||
is_block_translator_comment = _is_block_translator_comment(l)
|
||||
translator_comment = ""
|
||||
|
||||
# Gather translator comments. It will be gathered for the next translation function.
|
||||
if reading_translator_comment:
|
||||
reading_translator_comment, extracted_comment = _extract_translator_comment(l, is_block_translator_comment)
|
||||
if extracted_comment != "":
|
||||
translator_comment += extracted_comment + "\n"
|
||||
if not reading_translator_comment:
|
||||
translator_comment = translator_comment[:-1] # Remove extra \n at the end.
|
||||
|
||||
# Gather translator comments. It will be gathered for the next translation function.
|
||||
if reading_translator_comment:
|
||||
reading_translator_comment, extracted_comment = _extract_translator_comment(l, is_block_translator_comment)
|
||||
if extracted_comment != "":
|
||||
translator_comment += extracted_comment + "\n"
|
||||
if not reading_translator_comment:
|
||||
translator_comment = translator_comment[:-1] # Remove extra \n at the end.
|
||||
for pattern, extract_type in patterns.items():
|
||||
for m in pattern.finditer(l):
|
||||
location = os.path.relpath(fname).replace("\\", "/")
|
||||
if line_nb:
|
||||
location += ":" + str(lc)
|
||||
|
||||
if not reading_translator_comment:
|
||||
for pattern, extract_type in patterns.items():
|
||||
for m in pattern.finditer(l):
|
||||
location = os.path.relpath(fname).replace("\\", "/")
|
||||
if line_nb:
|
||||
location += ":" + str(lc)
|
||||
captures = m.groupdict("")
|
||||
msg = captures.get("message", "")
|
||||
msg_plural = captures.get("plural_message", "")
|
||||
msgctx = captures.get("context", "")
|
||||
|
||||
captures = m.groupdict("")
|
||||
msg = captures.get("message", "")
|
||||
msg_plural = captures.get("plural_message", "")
|
||||
msgctx = captures.get("context", "")
|
||||
|
||||
if extract_type == ExtractType.TEXT:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
elif extract_type == ExtractType.PROPERTY_PATH:
|
||||
usage_string = captures.get("usage") or "PROPERTY_USAGE_DEFAULT"
|
||||
usages = [e.strip() for e in usage_string.split("|")]
|
||||
|
||||
if "PROPERTY_USAGE_GROUP" in usages:
|
||||
if extract_type == ExtractType.TEXT:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
current_group = captures["hint_string"]
|
||||
elif extract_type == ExtractType.PROPERTY_PATH:
|
||||
usage_string = captures.get("usage") or "PROPERTY_USAGE_DEFAULT"
|
||||
usages = [e.strip() for e in usage_string.split("|")]
|
||||
|
||||
if "PROPERTY_USAGE_GROUP" in usages:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
current_group = captures["hint_string"]
|
||||
current_subgroup = ""
|
||||
continue
|
||||
|
||||
# Ignore properties that are not meant to be displayed in the editor.
|
||||
if "PROPERTY_USAGE_NO_EDITOR" in usages:
|
||||
continue
|
||||
if "PROPERTY_USAGE_DEFAULT" not in usages and "PROPERTY_USAGE_EDITOR" not in usages:
|
||||
continue
|
||||
|
||||
property_path = msg
|
||||
theme_item = captures.get("theme_item")
|
||||
if theme_item:
|
||||
if theme_item == "stylebox":
|
||||
theme_item = "style"
|
||||
property_path = "theme_override_" + theme_item + "s/" + property_path
|
||||
|
||||
if current_subgroup:
|
||||
if msg.startswith(current_subgroup):
|
||||
msg = msg[len(current_subgroup) :]
|
||||
elif current_subgroup.startswith(msg):
|
||||
pass # Keep this as-is. See EditorInspector::update_tree().
|
||||
else:
|
||||
current_subgroup = ""
|
||||
elif current_group:
|
||||
if msg.startswith(current_group):
|
||||
msg = msg[len(current_group) :]
|
||||
elif current_group.startswith(msg):
|
||||
pass # Keep this as-is. See EditorInspector::update_tree().
|
||||
else:
|
||||
current_group = ""
|
||||
current_subgroup = ""
|
||||
|
||||
if "." in msg: # Strip feature tag.
|
||||
msg = msg.split(".", 1)[0]
|
||||
for part in msg.split("/"):
|
||||
msgctx = processor.get_context(part, property_path, current_class)
|
||||
_add_message(processor.process_name(part), msg_plural, msgctx, location, translator_comment)
|
||||
elif extract_type == ExtractType.GROUP:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
current_group = captures["prefix"]
|
||||
current_subgroup = ""
|
||||
continue
|
||||
|
||||
# Ignore properties that are not meant to be displayed in the editor.
|
||||
if "PROPERTY_USAGE_NO_EDITOR" in usages:
|
||||
continue
|
||||
if "PROPERTY_USAGE_DEFAULT" not in usages and "PROPERTY_USAGE_EDITOR" not in usages:
|
||||
continue
|
||||
|
||||
property_path = msg
|
||||
theme_item = captures.get("theme_item")
|
||||
if theme_item:
|
||||
if theme_item == "stylebox":
|
||||
theme_item = "style"
|
||||
property_path = "theme_override_" + theme_item + "s/" + property_path
|
||||
|
||||
if current_subgroup:
|
||||
if msg.startswith(current_subgroup):
|
||||
msg = msg[len(current_subgroup) :]
|
||||
elif current_subgroup.startswith(msg):
|
||||
pass # Keep this as-is. See EditorInspector::update_tree().
|
||||
else:
|
||||
current_subgroup = ""
|
||||
elif current_group:
|
||||
if msg.startswith(current_group):
|
||||
msg = msg[len(current_group) :]
|
||||
elif current_group.startswith(msg):
|
||||
pass # Keep this as-is. See EditorInspector::update_tree().
|
||||
else:
|
||||
current_group = ""
|
||||
current_subgroup = ""
|
||||
|
||||
if "." in msg: # Strip feature tag.
|
||||
msg = msg.split(".", 1)[0]
|
||||
for part in msg.split("/"):
|
||||
msgctx = processor.get_context(part, property_path, current_class)
|
||||
_add_message(processor.process_name(part), msg_plural, msgctx, location, translator_comment)
|
||||
elif extract_type == ExtractType.GROUP:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
current_group = captures["prefix"]
|
||||
current_subgroup = ""
|
||||
elif extract_type == ExtractType.SUBGROUP:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
current_subgroup = captures["prefix"]
|
||||
translator_comment = ""
|
||||
elif extract_type == ExtractType.SUBGROUP:
|
||||
_add_message(msg, msg_plural, msgctx, location, translator_comment)
|
||||
current_subgroup = captures["prefix"]
|
||||
translator_comment = ""
|
||||
|
||||
l = f.readline()
|
||||
ls = l.lstrip()
|
||||
lc += 1
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user