240 lines
7.9 KiB
Python
Executable file
240 lines
7.9 KiB
Python
Executable file
#!/usr/bin/python
|
|
# -*- coding: utf-8 -*-
|
|
#
|
|
# apply-languages.py -- generate language subpackages from LANGUAGES comment
|
|
# in RPM spec files
|
|
# Copyright © 2012, 2014 Red Hat, Inc.
|
|
#
|
|
# This program is free software; you can redistribute it and/or modify
|
|
# it under the terms of the GNU General Public License as published by
|
|
# the Free Software Foundation; either version 2 of the License, or
|
|
# (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
#
|
|
# Author: Nils Philippsen <nils@redhat.com>
|
|
|
|
import os
|
|
import sys
|
|
import tempfile
|
|
import atexit
|
|
import re
|
|
from stat import S_IMODE
|
|
from itertools import islice
|
|
|
|
def usage():
|
|
print >>sys.stderr, "Usage: %s <packagename>.spec" % (sys.argv[0],)
|
|
sys.exit(1)
|
|
|
|
def cleantmpfile():
|
|
global newfspath
|
|
|
|
try:
|
|
os.unlink(newsfpath)
|
|
except OSError:
|
|
pass
|
|
|
|
if len(sys.argv) != 2:
|
|
usage()
|
|
|
|
sfpath = sys.argv[1]
|
|
if not os.access(sfpath, os.R_OK | os.W_OK):
|
|
print >>sys.stderr, "Not readable/writable:", sfpath
|
|
sys.exit(2)
|
|
|
|
sfdir = os.path.dirname(sfpath)
|
|
sfbase = os.path.basename(sfpath)
|
|
|
|
(fd, newsfpath) = tempfile.mkstemp(prefix=sfbase + ".", dir=sfdir)
|
|
atexit.register(cleantmpfile)
|
|
newsf = os.fdopen(fd, "w")
|
|
|
|
sf = open(sfpath)
|
|
sfcontent = sf.read()
|
|
sf.close()
|
|
|
|
sfmode = S_IMODE(os.stat(sfpath).st_mode)
|
|
|
|
languages_re = re.compile(
|
|
r"^#\s*LANGUAGES:\s*(?P<languages>[^\n\r]+)\s*$", re.MULTILINE)
|
|
langsplit_re = re.compile(r"\s+")
|
|
begin_obsoletes_re = re.compile(
|
|
r"^#\s*BEGIN:\s*OBSOLETE\s+LANGUAGES\s*$", re.MULTILINE)
|
|
end_obsoletes_re = re.compile(
|
|
r"^#\s*END:\s*OBSOLETE\s+LANGUAGES\s*$", re.MULTILINE)
|
|
begin_langpkgs_re = re.compile(
|
|
r"^#\s*BEGIN:\s*LANGUAGE\s+SUB\s+PACKAGES\s*$", re.MULTILINE)
|
|
end_langpkgs_re = re.compile(
|
|
r"^#\s*END:\s*LANGUAGE\s+SUB\s+PACKAGES\s*$", re.MULTILINE)
|
|
begin_langfiles_re = re.compile(
|
|
r"^#\s*BEGIN:\s*LANGUAGE\s+FILE\s+LISTS\s*$", re.MULTILINE)
|
|
end_langfiles_re = re.compile(
|
|
r"^#\s*END:\s*LANGUAGE\s+FILE\s+LISTS\s*$", re.MULTILINE)
|
|
|
|
name_re = re.compile(r"^name:\s*(?P<name>[^\n\r]+)\s*$",
|
|
re.MULTILINE | re.IGNORECASE)
|
|
version_re = re.compile(r"^version:\s*(?P<version>[^\n\r]+)\s*$",
|
|
re.MULTILINE | re.IGNORECASE)
|
|
release_re = re.compile(r"^release:\s*(?P<release>[^\n\r]+)\s*$",
|
|
re.MULTILINE | re.IGNORECASE)
|
|
license_re = re.compile(r"^license:\s*(?P<license>[^\n\r]+)\s*$",
|
|
re.MULTILINE | re.IGNORECASE)
|
|
pkg_re = re.compile(r"^%package\s*(?P<pkg>\S+)\s*$",
|
|
re.MULTILINE | re.IGNORECASE)
|
|
group_re = re.compile(r"^group:\s*(?P<group>[^\n\r]+)\s*$",
|
|
re.MULTILINE | re.IGNORECASE)
|
|
|
|
missing = False
|
|
for what, what_re in (("name tag", name_re), ("license tag", license_re),
|
|
("group tag", group_re), ("LANGUAGES comment", languages_re),
|
|
("BEGIN: OBSOLETE LANGUAGES comment", begin_obsoletes_re),
|
|
("END: OBSOLETE LANGUAGES comment", end_obsoletes_re),
|
|
("BEGIN: LANGUAGE SUB PACKAGES comment", begin_langpkgs_re),
|
|
("END: LANGUAGE SUB PACKAGES comment", end_langpkgs_re),
|
|
("BEGIN: LANGUAGE FILE LISTS comment", begin_langfiles_re),
|
|
("END: LANGUAGE FILE LISTS comment", end_langfiles_re),
|
|
):
|
|
found = what_re.search(sfcontent)
|
|
if found is None:
|
|
print >>sys.stderr, "%s not found" % (what,)
|
|
missing = True
|
|
if missing:
|
|
sys.exit(2)
|
|
|
|
langspecs = langsplit_re.split(
|
|
languages_re.search(sfcontent).group('languages'))
|
|
#languages = []
|
|
#for ls in langspecs:
|
|
# langcode, langname = ls.split(",")
|
|
# languages.append((langcode, langname))
|
|
languages = [(x.split(",")[0], x.split(",")[1].replace('_', ' '))
|
|
for x in langspecs]
|
|
langcodes = set((x[0] for x in languages))
|
|
name = name_re.search(sfcontent).group('name')
|
|
version = version_re.search(sfcontent).group('version')
|
|
release = release_re.search(sfcontent).group('release')
|
|
license = license_re.search(sfcontent).group('license')
|
|
group = group_re.search(sfcontent).group('group')
|
|
|
|
obsoletes_re = re.compile(
|
|
r"^obsoletes:\s*%(name)s-(?P<lang_ver>(?P<lang>\S+)\s*.*)$" % locals(),
|
|
re.MULTILINE | re.IGNORECASE)
|
|
conflicts_re = re.compile(
|
|
r"^conflicts:\s*%(name)s-(?P<lang_ver>(?P<lang>\S+)\s*.*)$" % locals(),
|
|
re.MULTILINE | re.IGNORECASE)
|
|
|
|
numlang = len(languages)
|
|
replacing = None
|
|
|
|
sflines = sfcontent.split("\n")
|
|
if sflines[-1] == "":
|
|
del sflines[-1]
|
|
|
|
# handle obsoleting language subpackages
|
|
preprocess_state_transitions = {
|
|
'out': (
|
|
begin_obsoletes_re, 'in_obsoletes', begin_langpkgs_re, 'in_langpkgs'),
|
|
'in_obsoletes': (end_obsoletes_re, 'out'),
|
|
'in_langpkgs': (end_langpkgs_re, 'out'),
|
|
}
|
|
|
|
state = 'out'
|
|
state_change = True
|
|
|
|
found_obsoleted_langs = set()
|
|
found_lang_pkgs = set()
|
|
|
|
for line in sflines:
|
|
if state_change:
|
|
transitions = preprocess_state_transitions[state]
|
|
packed_transitions = zip(
|
|
islice(transitions, 0, None, 2), islice(transitions, 1, None, 2))
|
|
state_change = False
|
|
|
|
for regex, new_state in packed_transitions:
|
|
if regex.match(line):
|
|
state_change = True
|
|
state = new_state
|
|
break
|
|
if state_change:
|
|
continue
|
|
|
|
if state == 'in_obsoletes':
|
|
m = obsoletes_re.match(line)
|
|
if m:
|
|
found_obsoleted_langs.add(m.group('lang'))
|
|
elif state == 'in_langpkgs':
|
|
m = pkg_re.match(line)
|
|
if m:
|
|
found_lang_pkgs.add(m.group('pkg'))
|
|
|
|
langcodes_to_obsolete = found_lang_pkgs - langcodes
|
|
langcodes_to_unobsolete = found_obsoleted_langs & langcodes
|
|
|
|
gobble = False
|
|
# update spec file
|
|
for line in sflines:
|
|
if not replacing:
|
|
print >>newsf, line
|
|
if begin_obsoletes_re.match(line):
|
|
replacing = 'obsoletes'
|
|
elif begin_langpkgs_re.match(line):
|
|
replacing = 'langpkgs'
|
|
elif begin_langfiles_re.match(line):
|
|
replacing = 'langfiles'
|
|
elif replacing == 'obsoletes':
|
|
om = obsoletes_re.match(line)
|
|
cm = conflicts_re.match(line)
|
|
em = end_obsoletes_re.match(line)
|
|
if em:
|
|
gobble = False
|
|
replacing = None
|
|
for lang in langcodes_to_obsolete:
|
|
print >>newsf, (
|
|
"Obsoletes: %(name)s-%(lang)s < %(version)s-%(release)s\n"
|
|
"Conflicts: %(name)s-%(lang)s < %(version)s-%(release)s"
|
|
) % locals()
|
|
print >>newsf, line
|
|
elif not (om or cm):
|
|
if not gobble:
|
|
print >>newsf, line
|
|
elif (
|
|
om and om.group('lang') not in langcodes_to_unobsolete or
|
|
cm and cm.group('lang') not in langcodes_to_unobsolete):
|
|
gobble = False
|
|
print >>newsf, line
|
|
else:
|
|
gobble = True
|
|
elif replacing == 'langpkgs':
|
|
if end_langpkgs_re.match(line):
|
|
replacing = None
|
|
for no, lang in enumerate(languages):
|
|
langcode, langname = lang
|
|
print >>newsf, """%%package %(langcode)s
|
|
Summary: %(langname)s (%(langcode)s) language support for %(name)s
|
|
Group: %(group)s
|
|
Requires: %%{name} = %%{?epoch:%%{epoch}:}%%{version}-%%{release}
|
|
|
|
%%description %(langcode)s
|
|
%(langname)s language support for %(name)s.""" % locals()
|
|
if no < numlang:
|
|
print >>newsf
|
|
print >>newsf, line
|
|
elif replacing == 'langfiles':
|
|
if end_langfiles_re.match(line):
|
|
replacing = None
|
|
for lang in languages:
|
|
langcode, langname = lang
|
|
print >>newsf, "%%files %(langcode)s -f files.list.%(langcode)s" % locals()
|
|
print >>newsf, line
|
|
|
|
newsf.close()
|
|
|
|
os.rename(newsfpath, sfpath)
|
|
os.chmod(sfpath, sfmode)
|