blob: 7b49b7085e03f257786aed320f92076d58aa4e28 [file] [log] [blame]
# Copyright 1998-2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, unicode_literals
__all__ = [
"vardbapi", "vartree", "dblink"] + \
["write_contents", "tar_contents"]
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.checksum:_perform_md5_merge@perform_md5',
'portage.data:portage_gid,portage_uid,secpass',
'portage.dbapi.dep_expand:dep_expand',
'portage.dbapi._MergeProcess:MergeProcess',
'portage.dbapi._SyncfsProcess:SyncfsProcess',
'portage.dep:dep_getkey,isjustname,isvalidatom,match_from_list,' + \
'use_reduce,_slot_separator,_repo_separator',
'portage.eapi:_get_eapi_attrs',
'portage.elog:collect_ebuild_messages,collect_messages,' + \
'elog_process,_merge_logentries',
'portage.locks:lockdir,unlockdir,lockfile,unlockfile',
'portage.output:bold,colorize',
'portage.package.ebuild.doebuild:doebuild_environment,' + \
'_merge_unicode_error', '_spawn_phase',
'portage.package.ebuild.prepare_build_dirs:prepare_build_dirs',
'portage.package.ebuild._ipc.QueryCommand:QueryCommand',
'portage.util:apply_secpass_permissions,ConfigProtect,ensure_dirs,' + \
'writemsg,writemsg_level,write_atomic,atomic_ofstream,writedict,' + \
'grabdict,normalize_path,new_protect_filename',
'portage.util.digraph:digraph',
'portage.util.env_update:env_update',
'portage.util.listdir:dircache,listdir',
'portage.util.movefile:movefile',
'portage.util.writeable_check:get_ro_checker',
'portage.util._dyn_libs.PreservedLibsRegistry:PreservedLibsRegistry',
'portage.util._dyn_libs.LinkageMapELF:LinkageMapELF@LinkageMap',
'portage.util._async.SchedulerInterface:SchedulerInterface',
'portage.util._eventloop.EventLoop:EventLoop',
'portage.util._eventloop.global_event_loop:global_event_loop',
'portage.versions:best,catpkgsplit,catsplit,cpv_getkey,vercmp,' + \
'_get_slot_re,_pkgsplit@pkgsplit,_pkg_str,_unknown_repo',
'subprocess',
'tarfile',
)
from portage.const import CACHE_PATH, CONFIG_MEMORY_FILE, \
MERGING_IDENTIFIER, PORTAGE_PACKAGE_ATOM, PRIVATE_PATH, VDB_PATH
from portage.dbapi import dbapi
from portage.exception import CommandNotFound, \
InvalidData, InvalidLocation, InvalidPackageName, \
FileNotFound, PermissionDenied, UnsupportedAPIException
from portage.localization import _
from portage import abssymlink, _movefile, bsd_chflags
# This is a special version of the os module, wrapped for unicode support.
from portage import os
from portage import shutil
from portage import _encodings
from portage import _os_merge
from portage import _selinux_merge
from portage import _unicode_decode
from portage import _unicode_encode
from _emerge.EbuildBuildDir import EbuildBuildDir
from _emerge.EbuildPhase import EbuildPhase
from _emerge.emergelog import emergelog
from _emerge.MiscFunctionsProcess import MiscFunctionsProcess
from _emerge.SpawnProcess import SpawnProcess
import errno
import fileinput
import fnmatch
import gc
import grp
import io
from itertools import chain
import logging
import os as _os
import platform
import pwd
import re
import stat
import sys
import tempfile
import textwrap
import time
import warnings
try:
import cPickle as pickle
except ImportError:
import pickle
if sys.hexversion >= 0x3000000:
# pylint: disable=W0622
basestring = str
long = int
_unicode = str
else:
_unicode = unicode
class vardbapi(dbapi):
_excluded_dirs = ["CVS", "lost+found"]
_excluded_dirs = [re.escape(x) for x in _excluded_dirs]
_excluded_dirs = re.compile(r'^(\..*|' + MERGING_IDENTIFIER + '.*|' + \
"|".join(_excluded_dirs) + r')$')
_aux_cache_version = "1"
_owners_cache_version = "1"
# Number of uncached packages to trigger cache update, since
# it's wasteful to update it for every vdb change.
_aux_cache_threshold = 5
_aux_cache_keys_re = re.compile(r'^NEEDED\..*$')
_aux_multi_line_re = re.compile(r'^(CONTENTS|NEEDED\..*)$')
def __init__(self, _unused_param=DeprecationWarning,
categories=None, settings=None, vartree=None):
"""
The categories parameter is unused since the dbapi class
now has a categories property that is generated from the
available packages.
"""
# Used by emerge to check whether any packages
# have been added or removed.
self._pkgs_changed = False
# The _aux_cache_threshold doesn't work as designed
# if the cache is flushed from a subprocess, so we
# use this to avoid waste vdb cache updates.
self._flush_cache_enabled = True
#cache for category directory mtimes
self.mtdircache = {}
#cache for dependency checks
self.matchcache = {}
#cache for cp_list results
self.cpcache = {}
self.blockers = None
if settings is None:
settings = portage.settings
self.settings = settings
if _unused_param is not DeprecationWarning:
warnings.warn("The first parameter of the "
"portage.dbapi.vartree.vardbapi"
" constructor is now unused. Instead "
"settings['ROOT'] is used.",
DeprecationWarning, stacklevel=2)
self._eroot = settings['EROOT']
self._dbroot = self._eroot + VDB_PATH
self._lock = None
self._lock_count = 0
self._conf_mem_file = self._eroot + CONFIG_MEMORY_FILE
self._fs_lock_obj = None
self._fs_lock_count = 0
if vartree is None:
vartree = portage.db[settings['EROOT']]['vartree']
self.vartree = vartree
self._aux_cache_keys = set(
["BUILD_TIME", "CHOST", "COUNTER", "DEPEND", "DESCRIPTION",
"EAPI", "HDEPEND", "HOMEPAGE", "IUSE", "KEYWORDS",
"LICENSE", "PDEPEND", "PROPERTIES", "PROVIDE", "RDEPEND",
"repository", "RESTRICT" , "SLOT", "USE", "DEFINED_PHASES",
])
self._aux_cache_obj = None
self._aux_cache_filename = os.path.join(self._eroot,
CACHE_PATH, "vdb_metadata.pickle")
self._counter_path = os.path.join(self._eroot,
CACHE_PATH, "counter")
self._plib_registry = PreservedLibsRegistry(settings["ROOT"],
os.path.join(self._eroot, PRIVATE_PATH, "preserved_libs_registry"))
self._linkmap = LinkageMap(self)
self._owners = self._owners_db(self)
self._cached_counter = None
@property
def root(self):
warnings.warn("The root attribute of "
"portage.dbapi.vartree.vardbapi"
" is deprecated. Use "
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=3)
return self.settings['ROOT']
def getpath(self, mykey, filename=None):
# This is an optimized hotspot, so don't use unicode-wrapped
# os module and don't use os.path.join().
rValue = self._eroot + VDB_PATH + _os.sep + mykey
if filename is not None:
# If filename is always relative, we can do just
# rValue += _os.sep + filename
rValue = _os.path.join(rValue, filename)
return rValue
def lock(self):
"""
Acquire a reentrant lock, blocking, for cooperation with concurrent
processes. State is inherited by subprocesses, allowing subprocesses
to reenter a lock that was acquired by a parent process. However,
a lock can be released only by the same process that acquired it.
"""
if self._lock_count:
self._lock_count += 1
elif os.environ.get("PORTAGE_LOCKS") != "false":
if self._lock is not None:
raise AssertionError("already locked")
# At least the parent needs to exist for the lock file.
ensure_dirs(self._dbroot)
self._lock = lockdir(self._dbroot)
self._lock_count += 1
def unlock(self):
"""
Release a lock, decrementing the recursion level. Each unlock() call
must be matched with a prior lock() call, or else an AssertionError
will be raised if unlock() is called while not locked.
"""
if self._lock_count > 1:
self._lock_count -= 1
elif os.environ.get("PORTAGE_LOCKS") != "false":
if self._lock is None:
raise AssertionError("not locked")
self._lock_count = 0
unlockdir(self._lock)
self._lock = None
def _fs_lock(self):
"""
Acquire a reentrant lock, blocking, for cooperation with concurrent
processes.
"""
if self._fs_lock_count < 1:
if self._fs_lock_obj is not None:
raise AssertionError("already locked")
try:
self._fs_lock_obj = lockfile(self._conf_mem_file)
except InvalidLocation:
self.settings._init_dirs()
self._fs_lock_obj = lockfile(self._conf_mem_file)
self._fs_lock_count += 1
def _fs_unlock(self):
"""
Release a lock, decrementing the recursion level.
"""
if self._fs_lock_count <= 1:
if self._fs_lock_obj is None:
raise AssertionError("not locked")
unlockfile(self._fs_lock_obj)
self._fs_lock_obj = None
self._fs_lock_count -= 1
def _bump_mtime(self, cpv):
"""
This is called before an after any modifications, so that consumers
can use directory mtimes to validate caches. See bug #290428.
"""
base = self._eroot + VDB_PATH
cat = catsplit(cpv)[0]
catdir = base + _os.sep + cat
t = time.time()
t = (t, t)
try:
for x in (catdir, base):
os.utime(x, t)
except OSError:
ensure_dirs(catdir)
def cpv_exists(self, mykey, myrepo=None):
"Tells us whether an actual ebuild exists on disk (no masking)"
return os.path.exists(self.getpath(mykey))
def cpv_counter(self, mycpv):
"This method will grab the COUNTER. Returns a counter value."
try:
return long(self.aux_get(mycpv, ["COUNTER"])[0])
except (KeyError, ValueError):
pass
writemsg_level(_("portage: COUNTER for %s was corrupted; " \
"resetting to value of 0\n") % (mycpv,),
level=logging.ERROR, noiselevel=-1)
return 0
def cpv_inject(self, mycpv):
"injects a real package into our on-disk database; assumes mycpv is valid and doesn't already exist"
ensure_dirs(self.getpath(mycpv))
counter = self.counter_tick(mycpv=mycpv)
# write local package counter so that emerge clean does the right thing
write_atomic(self.getpath(mycpv, filename="COUNTER"), str(counter))
def isInjected(self, mycpv):
if self.cpv_exists(mycpv):
if os.path.exists(self.getpath(mycpv, filename="INJECTED")):
return True
if not os.path.exists(self.getpath(mycpv, filename="CONTENTS")):
return True
return False
def move_ent(self, mylist, repo_match=None):
origcp = mylist[1]
newcp = mylist[2]
# sanity check
for atom in (origcp, newcp):
if not isjustname(atom):
raise InvalidPackageName(str(atom))
origmatches = self.match(origcp, use_cache=0)
moves = 0
if not origmatches:
return moves
for mycpv in origmatches:
try:
mycpv = self._pkg_str(mycpv, None)
except (KeyError, InvalidData):
continue
mycpv_cp = cpv_getkey(mycpv)
if mycpv_cp != origcp:
# Ignore PROVIDE virtual match.
continue
if repo_match is not None \
and not repo_match(mycpv.repo):
continue
# Use isvalidatom() to check if this move is valid for the
# EAPI (characters allowed in package names may vary).
if not isvalidatom(newcp, eapi=mycpv.eapi):
continue
mynewcpv = mycpv.replace(mycpv_cp, _unicode(newcp), 1)
mynewcat = catsplit(newcp)[0]
origpath = self.getpath(mycpv)
if not os.path.exists(origpath):
continue
moves += 1
if not os.path.exists(self.getpath(mynewcat)):
#create the directory
ensure_dirs(self.getpath(mynewcat))
newpath = self.getpath(mynewcpv)
if os.path.exists(newpath):
#dest already exists; keep this puppy where it is.
continue
_movefile(origpath, newpath, mysettings=self.settings)
self._clear_pkg_cache(self._dblink(mycpv))
self._clear_pkg_cache(self._dblink(mynewcpv))
# We need to rename the ebuild now.
old_pf = catsplit(mycpv)[1]
new_pf = catsplit(mynewcpv)[1]
if new_pf != old_pf:
try:
os.rename(os.path.join(newpath, old_pf + ".ebuild"),
os.path.join(newpath, new_pf + ".ebuild"))
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
write_atomic(os.path.join(newpath, "PF"), new_pf+"\n")
write_atomic(os.path.join(newpath, "CATEGORY"), mynewcat+"\n")
return moves
def cp_list(self, mycp, use_cache=1):
mysplit=catsplit(mycp)
if mysplit[0] == '*':
mysplit[0] = mysplit[0][1:]
try:
if sys.hexversion >= 0x3030000:
mystat = os.stat(self.getpath(mysplit[0])).st_mtime_ns
else:
mystat = os.stat(self.getpath(mysplit[0])).st_mtime
except OSError:
mystat = 0
if use_cache and mycp in self.cpcache:
cpc = self.cpcache[mycp]
if cpc[0] == mystat:
return cpc[1][:]
cat_dir = self.getpath(mysplit[0])
try:
dir_list = os.listdir(cat_dir)
except EnvironmentError as e:
if e.errno == PermissionDenied.errno:
raise PermissionDenied(cat_dir)
del e
dir_list = []
returnme = []
for x in dir_list:
if self._excluded_dirs.match(x) is not None:
continue
ps = pkgsplit(x)
if not ps:
self.invalidentry(os.path.join(self.getpath(mysplit[0]), x))
continue
if len(mysplit) > 1:
if ps[0] == mysplit[1]:
returnme.append(_pkg_str(mysplit[0]+"/"+x))
self._cpv_sort_ascending(returnme)
if use_cache:
self.cpcache[mycp] = [mystat, returnme[:]]
elif mycp in self.cpcache:
del self.cpcache[mycp]
return returnme
def cpv_all(self, use_cache=1):
"""
Set use_cache=0 to bypass the portage.cachedir() cache in cases
when the accuracy of mtime staleness checks should not be trusted
(generally this is only necessary in critical sections that
involve merge or unmerge of packages).
"""
returnme = []
basepath = os.path.join(self._eroot, VDB_PATH) + os.path.sep
if use_cache:
from portage import listdir
else:
def listdir(p, **kwargs):
try:
return [x for x in os.listdir(p) \
if os.path.isdir(os.path.join(p, x))]
except EnvironmentError as e:
if e.errno == PermissionDenied.errno:
raise PermissionDenied(p)
del e
return []
for x in listdir(basepath, EmptyOnError=1, ignorecvs=1, dirsonly=1):
if self._excluded_dirs.match(x) is not None:
continue
if not self._category_re.match(x):
continue
for y in listdir(basepath + x, EmptyOnError=1, dirsonly=1):
if self._excluded_dirs.match(y) is not None:
continue
subpath = x + "/" + y
# -MERGING- should never be a cpv, nor should files.
try:
if catpkgsplit(subpath) is None:
self.invalidentry(self.getpath(subpath))
continue
except InvalidData:
self.invalidentry(self.getpath(subpath))
continue
returnme.append(subpath)
return returnme
def cp_all(self, use_cache=1):
mylist = self.cpv_all(use_cache=use_cache)
d={}
for y in mylist:
if y[0] == '*':
y = y[1:]
try:
mysplit = catpkgsplit(y)
except InvalidData:
self.invalidentry(self.getpath(y))
continue
if not mysplit:
self.invalidentry(self.getpath(y))
continue
d[mysplit[0]+"/"+mysplit[1]] = None
return list(d)
def checkblockers(self, origdep):
pass
def _clear_cache(self):
self.mtdircache.clear()
self.matchcache.clear()
self.cpcache.clear()
self._aux_cache_obj = None
def _add(self, pkg_dblink):
self._pkgs_changed = True
self._clear_pkg_cache(pkg_dblink)
def _remove(self, pkg_dblink):
self._pkgs_changed = True
self._clear_pkg_cache(pkg_dblink)
def _clear_pkg_cache(self, pkg_dblink):
# Due to 1 second mtime granularity in <python-2.5, mtime checks
# are not always sufficient to invalidate vardbapi caches. Therefore,
# the caches need to be actively invalidated here.
self.mtdircache.pop(pkg_dblink.cat, None)
self.matchcache.pop(pkg_dblink.cat, None)
self.cpcache.pop(pkg_dblink.mysplit[0], None)
dircache.pop(pkg_dblink.dbcatdir, None)
def match(self, origdep, use_cache=1):
"caching match function"
mydep = dep_expand(
origdep, mydb=self, use_cache=use_cache, settings=self.settings)
cache_key = (mydep, mydep.unevaluated_atom)
mykey = dep_getkey(mydep)
mycat = catsplit(mykey)[0]
if not use_cache:
if mycat in self.matchcache:
del self.mtdircache[mycat]
del self.matchcache[mycat]
return list(self._iter_match(mydep,
self.cp_list(mydep.cp, use_cache=use_cache)))
try:
if sys.hexversion >= 0x3030000:
curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime_ns
else:
curmtime = os.stat(os.path.join(self._eroot, VDB_PATH, mycat)).st_mtime
except (IOError, OSError):
curmtime=0
if mycat not in self.matchcache or \
self.mtdircache[mycat] != curmtime:
# clear cache entry
self.mtdircache[mycat] = curmtime
self.matchcache[mycat] = {}
if mydep not in self.matchcache[mycat]:
mymatch = list(self._iter_match(mydep,
self.cp_list(mydep.cp, use_cache=use_cache)))
self.matchcache[mycat][cache_key] = mymatch
return self.matchcache[mycat][cache_key][:]
def findname(self, mycpv, myrepo=None):
return self.getpath(str(mycpv), filename=catsplit(mycpv)[1]+".ebuild")
def flush_cache(self):
"""If the current user has permission and the internal aux_get cache has
been updated, save it to disk and mark it unmodified. This is called
by emerge after it has loaded the full vdb for use in dependency
calculations. Currently, the cache is only written if the user has
superuser privileges (since that's required to obtain a lock), but all
users have read access and benefit from faster metadata lookups (as
long as at least part of the cache is still valid)."""
if self._flush_cache_enabled and \
self._aux_cache is not None and \
len(self._aux_cache["modified"]) >= self._aux_cache_threshold and \
secpass >= 2:
self._owners.populate() # index any unindexed contents
valid_nodes = set(self.cpv_all())
for cpv in list(self._aux_cache["packages"]):
if cpv not in valid_nodes:
del self._aux_cache["packages"][cpv]
del self._aux_cache["modified"]
try:
f = atomic_ofstream(self._aux_cache_filename, 'wb')
pickle.dump(self._aux_cache, f, protocol=2)
f.close()
apply_secpass_permissions(
self._aux_cache_filename, gid=portage_gid, mode=0o644)
except (IOError, OSError) as e:
pass
self._aux_cache["modified"] = set()
@property
def _aux_cache(self):
if self._aux_cache_obj is None:
self._aux_cache_init()
return self._aux_cache_obj
def _aux_cache_init(self):
aux_cache = None
open_kwargs = {}
if sys.hexversion >= 0x3000000 and sys.hexversion < 0x3020000:
# Buffered io triggers extreme performance issues in
# Unpickler.load() (problem observed with python-3.0.1).
# Unfortunately, performance is still poor relative to
# python-2.x, but buffering makes it much worse (problem
# appears to be solved in Python >=3.2 at least).
open_kwargs["buffering"] = 0
try:
with open(_unicode_encode(self._aux_cache_filename,
encoding=_encodings['fs'], errors='strict'),
mode='rb', **open_kwargs) as f:
mypickle = pickle.Unpickler(f)
try:
mypickle.find_global = None
except AttributeError:
# TODO: If py3k, override Unpickler.find_class().
pass
aux_cache = mypickle.load()
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
if isinstance(e, EnvironmentError) and \
getattr(e, 'errno', None) in (errno.ENOENT, errno.EACCES):
pass
else:
writemsg(_("!!! Error loading '%s': %s\n") % \
(self._aux_cache_filename, e), noiselevel=-1)
del e
if not aux_cache or \
not isinstance(aux_cache, dict) or \
aux_cache.get("version") != self._aux_cache_version or \
not aux_cache.get("packages"):
aux_cache = {"version": self._aux_cache_version}
aux_cache["packages"] = {}
owners = aux_cache.get("owners")
if owners is not None:
if not isinstance(owners, dict):
owners = None
elif "version" not in owners:
owners = None
elif owners["version"] != self._owners_cache_version:
owners = None
elif "base_names" not in owners:
owners = None
elif not isinstance(owners["base_names"], dict):
owners = None
if owners is None:
owners = {
"base_names" : {},
"version" : self._owners_cache_version
}
aux_cache["owners"] = owners
aux_cache["modified"] = set()
self._aux_cache_obj = aux_cache
def aux_get(self, mycpv, wants, myrepo = None):
"""This automatically caches selected keys that are frequently needed
by emerge for dependency calculations. The cached metadata is
considered valid if the mtime of the package directory has not changed
since the data was cached. The cache is stored in a pickled dict
object with the following format:
{version:"1", "packages":{cpv1:(mtime,{k1,v1, k2,v2, ...}), cpv2...}}
If an error occurs while loading the cache pickle or the version is
unrecognized, the cache will simple be recreated from scratch (it is
completely disposable).
"""
cache_these_wants = self._aux_cache_keys.intersection(wants)
for x in wants:
if self._aux_cache_keys_re.match(x) is not None:
cache_these_wants.add(x)
if not cache_these_wants:
mydata = self._aux_get(mycpv, wants)
return [mydata[x] for x in wants]
cache_these = set(self._aux_cache_keys)
cache_these.update(cache_these_wants)
mydir = self.getpath(mycpv)
mydir_stat = None
try:
mydir_stat = os.stat(mydir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
raise KeyError(mycpv)
# Use float mtime when available.
mydir_mtime = mydir_stat.st_mtime
pkg_data = self._aux_cache["packages"].get(mycpv)
pull_me = cache_these.union(wants)
mydata = {"_mtime_" : mydir_mtime}
cache_valid = False
cache_incomplete = False
cache_mtime = None
metadata = None
if pkg_data is not None:
if not isinstance(pkg_data, tuple) or len(pkg_data) != 2:
pkg_data = None
else:
cache_mtime, metadata = pkg_data
if not isinstance(cache_mtime, (float, long, int)) or \
not isinstance(metadata, dict):
pkg_data = None
if pkg_data:
cache_mtime, metadata = pkg_data
if isinstance(cache_mtime, float):
cache_valid = cache_mtime == mydir_stat.st_mtime
else:
# Cache may contain integer mtime.
cache_valid = cache_mtime == mydir_stat[stat.ST_MTIME]
if cache_valid:
# Migrate old metadata to unicode.
for k, v in metadata.items():
metadata[k] = _unicode_decode(v,
encoding=_encodings['repo.content'], errors='replace')
mydata.update(metadata)
pull_me.difference_update(mydata)
if pull_me:
# pull any needed data and cache it
aux_keys = list(pull_me)
mydata.update(self._aux_get(mycpv, aux_keys, st=mydir_stat))
if not cache_valid or cache_these.difference(metadata):
cache_data = {}
if cache_valid and metadata:
cache_data.update(metadata)
for aux_key in cache_these:
cache_data[aux_key] = mydata[aux_key]
self._aux_cache["packages"][_unicode(mycpv)] = \
(mydir_mtime, cache_data)
self._aux_cache["modified"].add(mycpv)
eapi_attrs = _get_eapi_attrs(mydata['EAPI'])
if _get_slot_re(eapi_attrs).match(mydata['SLOT']) is None:
# Empty or invalid slot triggers InvalidAtom exceptions when
# generating slot atoms for packages, so translate it to '0' here.
mydata['SLOT'] = '0'
return [mydata[x] for x in wants]
def _aux_get(self, mycpv, wants, st=None):
mydir = self.getpath(mycpv)
if st is None:
try:
st = os.stat(mydir)
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError(mycpv)
elif e.errno == PermissionDenied.errno:
raise PermissionDenied(mydir)
else:
raise
if not stat.S_ISDIR(st.st_mode):
raise KeyError(mycpv)
results = {}
env_keys = []
for x in wants:
if x == "_mtime_":
results[x] = st[stat.ST_MTIME]
continue
try:
with io.open(
_unicode_encode(os.path.join(mydir, x),
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
myd = f.read()
except IOError:
if x not in self._aux_cache_keys and \
self._aux_cache_keys_re.match(x) is None:
env_keys.append(x)
continue
myd = ''
# Preserve \n for metadata that is known to
# contain multiple lines.
if self._aux_multi_line_re.match(x) is None:
myd = " ".join(myd.split())
results[x] = myd
if env_keys:
env_results = self._aux_env_search(mycpv, env_keys)
for k in env_keys:
v = env_results.get(k)
if v is None:
v = ''
if self._aux_multi_line_re.match(k) is None:
v = " ".join(v.split())
results[k] = v
if results.get("EAPI") == "":
results["EAPI"] = '0'
return results
def _aux_env_search(self, cpv, variables):
"""
Search environment.bz2 for the specified variables. Returns
a dict mapping variables to values, and any variables not
found in the environment will not be included in the dict.
This is useful for querying variables like ${SRC_URI} and
${A}, which are not saved in separate files but are available
in environment.bz2 (see bug #395463).
"""
env_file = self.getpath(cpv, filename="environment.bz2")
if not os.path.isfile(env_file):
return {}
bunzip2_cmd = portage.util.shlex_split(
self.settings.get("PORTAGE_BUNZIP2_COMMAND", ""))
if not bunzip2_cmd:
bunzip2_cmd = portage.util.shlex_split(
self.settings["PORTAGE_BZIP2_COMMAND"])
bunzip2_cmd.append("-d")
args = bunzip2_cmd + ["-c", env_file]
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
raise portage.exception.CommandNotFound(args[0])
# Parts of the following code are borrowed from
# filter-bash-environment.py (keep them in sync).
var_assign_re = re.compile(r'(^|^declare\s+-\S+\s+|^declare\s+|^export\s+)([^=\s]+)=("|\')?(.*)$')
close_quote_re = re.compile(r'(\\"|"|\')\s*$')
def have_end_quote(quote, line):
close_quote_match = close_quote_re.search(line)
return close_quote_match is not None and \
close_quote_match.group(1) == quote
variables = frozenset(variables)
results = {}
for line in proc.stdout:
line = _unicode_decode(line,
encoding=_encodings['content'], errors='replace')
var_assign_match = var_assign_re.match(line)
if var_assign_match is not None:
key = var_assign_match.group(2)
quote = var_assign_match.group(3)
if quote is not None:
if have_end_quote(quote,
line[var_assign_match.end(2)+2:]):
value = var_assign_match.group(4)
else:
value = [var_assign_match.group(4)]
for line in proc.stdout:
line = _unicode_decode(line,
encoding=_encodings['content'],
errors='replace')
value.append(line)
if have_end_quote(quote, line):
break
value = ''.join(value)
# remove trailing quote and whitespace
value = value.rstrip()[:-1]
else:
value = var_assign_match.group(4).rstrip()
if key in variables:
results[key] = value
proc.wait()
proc.stdout.close()
return results
def aux_update(self, cpv, values):
mylink = self._dblink(cpv)
if not mylink.exists():
raise KeyError(cpv)
self._bump_mtime(cpv)
self._clear_pkg_cache(mylink)
for k, v in values.items():
if v:
mylink.setfile(k, v)
else:
try:
os.unlink(os.path.join(self.getpath(cpv), k))
except EnvironmentError:
pass
self._bump_mtime(cpv)
def counter_tick(self, myroot=None, mycpv=None):
"""
@param myroot: ignored, self._eroot is used instead
"""
return self.counter_tick_core(incrementing=1, mycpv=mycpv)
def get_counter_tick_core(self, myroot=None, mycpv=None):
"""
Use this method to retrieve the counter instead
of having to trust the value of a global counter
file that can lead to invalid COUNTER
generation. When cache is valid, the package COUNTER
files are not read and we rely on the timestamp of
the package directory to validate cache. The stat
calls should only take a short time, so performance
is sufficient without having to rely on a potentially
corrupt global counter file.
The global counter file located at
$CACHE_PATH/counter serves to record the
counter of the last installed package and
it also corresponds to the total number of
installation actions that have occurred in
the history of this package database.
@param myroot: ignored, self._eroot is used instead
"""
del myroot
counter = -1
try:
with io.open(
_unicode_encode(self._counter_path,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
try:
counter = long(f.readline().strip())
except (OverflowError, ValueError) as e:
writemsg(_("!!! COUNTER file is corrupt: '%s'\n") %
self._counter_path, noiselevel=-1)
writemsg("!!! %s\n" % (e,), noiselevel=-1)
except EnvironmentError as e:
# Silently allow ENOENT since files under
# /var/cache/ are allowed to disappear.
if e.errno != errno.ENOENT:
writemsg(_("!!! Unable to read COUNTER file: '%s'\n") % \
self._counter_path, noiselevel=-1)
writemsg("!!! %s\n" % str(e), noiselevel=-1)
del e
if self._cached_counter == counter:
max_counter = counter
else:
# We must ensure that we return a counter
# value that is at least as large as the
# highest one from the installed packages,
# since having a corrupt value that is too low
# can trigger incorrect AUTOCLEAN behavior due
# to newly installed packages having lower
# COUNTERs than the previous version in the
# same slot.
max_counter = counter
for cpv in self.cpv_all():
try:
pkg_counter = int(self.aux_get(cpv, ["COUNTER"])[0])
except (KeyError, OverflowError, ValueError):
continue
if pkg_counter > max_counter:
max_counter = pkg_counter
return max_counter + 1
def counter_tick_core(self, myroot=None, incrementing=1, mycpv=None):
"""
This method will grab the next COUNTER value and record it back
to the global file. Note that every package install must have
a unique counter, since a slotmove update can move two packages
into the same SLOT and in that case it's important that both
packages have different COUNTER metadata.
@param myroot: ignored, self._eroot is used instead
@param mycpv: ignored
@rtype: int
@return: new counter value
"""
myroot = None
mycpv = None
self.lock()
try:
counter = self.get_counter_tick_core() - 1
if incrementing:
#increment counter
counter += 1
# update new global counter file
try:
write_atomic(self._counter_path, str(counter))
except InvalidLocation:
self.settings._init_dirs()
write_atomic(self._counter_path, str(counter))
self._cached_counter = counter
# Since we hold a lock, this is a good opportunity
# to flush the cache. Note that this will only
# flush the cache periodically in the main process
# when _aux_cache_threshold is exceeded.
self.flush_cache()
finally:
self.unlock()
return counter
def _dblink(self, cpv):
category, pf = catsplit(cpv)
return dblink(category, pf, settings=self.settings,
vartree=self.vartree, treetype="vartree")
def removeFromContents(self, pkg, paths, relative_paths=True):
"""
@param pkg: cpv for an installed package
@type pkg: string
@param paths: paths of files to remove from contents
@type paths: iterable
"""
if not hasattr(pkg, "getcontents"):
pkg = self._dblink(pkg)
root = self.settings['ROOT']
root_len = len(root) - 1
new_contents = pkg.getcontents().copy()
removed = 0
for filename in paths:
filename = _unicode_decode(filename,
encoding=_encodings['content'], errors='strict')
filename = normalize_path(filename)
if relative_paths:
relative_filename = filename
else:
relative_filename = filename[root_len:]
contents_key = pkg._match_contents(relative_filename)
if contents_key:
# It's possible for two different paths to refer to the same
# contents_key, due to directory symlinks. Therefore, pass a
# default value to pop, in order to avoid a KeyError which
# could otherwise be triggered (see bug #454400).
new_contents.pop(contents_key, None)
removed += 1
if removed:
self.writeContentsToContentsFile(pkg, new_contents)
def writeContentsToContentsFile(self, pkg, new_contents):
"""
@param pkg: package to write contents file for
@type pkg: dblink
@param new_contents: contents to write to CONTENTS file
@type new_contents: contents dictionary of the form
{u'/path/to/file' : (contents_attribute 1, ...), ...}
"""
root = self.settings['ROOT']
self._bump_mtime(pkg.mycpv)
f = atomic_ofstream(os.path.join(pkg.dbdir, "CONTENTS"))
write_contents(new_contents, root, f)
f.close()
self._bump_mtime(pkg.mycpv)
pkg._clear_contents_cache()
class _owners_cache(object):
"""
This class maintains an hash table that serves to index package
contents by mapping the basename of file to a list of possible
packages that own it. This is used to optimize owner lookups
by narrowing the search down to a smaller number of packages.
"""
try:
from hashlib import md5 as _new_hash
except ImportError:
from md5 import new as _new_hash
_hash_bits = 16
_hex_chars = _hash_bits // 4
def __init__(self, vardb):
self._vardb = vardb
def add(self, cpv):
eroot_len = len(self._vardb._eroot)
contents = self._vardb._dblink(cpv).getcontents()
pkg_hash = self._hash_pkg(cpv)
if not contents:
# Empty path is a code used to represent empty contents.
self._add_path("", pkg_hash)
for x in contents:
self._add_path(x[eroot_len:], pkg_hash)
self._vardb._aux_cache["modified"].add(cpv)
def _add_path(self, path, pkg_hash):
"""
Empty path is a code that represents empty contents.
"""
if path:
name = os.path.basename(path.rstrip(os.path.sep))
if not name:
return
else:
name = path
name_hash = self._hash_str(name)
base_names = self._vardb._aux_cache["owners"]["base_names"]
pkgs = base_names.get(name_hash)
if pkgs is None:
pkgs = {}
base_names[name_hash] = pkgs
pkgs[pkg_hash] = None
def _hash_str(self, s):
h = self._new_hash()
# Always use a constant utf_8 encoding here, since
# the "default" encoding can change.
h.update(_unicode_encode(s,
encoding=_encodings['repo.content'],
errors='backslashreplace'))
h = h.hexdigest()
h = h[-self._hex_chars:]
h = int(h, 16)
return h
def _hash_pkg(self, cpv):
counter, mtime = self._vardb.aux_get(
cpv, ["COUNTER", "_mtime_"])
try:
counter = int(counter)
except ValueError:
counter = 0
return (_unicode(cpv), counter, mtime)
class _owners_db(object):
def __init__(self, vardb):
self._vardb = vardb
def populate(self):
self._populate()
def _populate(self):
owners_cache = vardbapi._owners_cache(self._vardb)
cached_hashes = set()
base_names = self._vardb._aux_cache["owners"]["base_names"]
# Take inventory of all cached package hashes.
for name, hash_values in list(base_names.items()):
if not isinstance(hash_values, dict):
del base_names[name]
continue
cached_hashes.update(hash_values)
# Create sets of valid package hashes and uncached packages.
uncached_pkgs = set()
hash_pkg = owners_cache._hash_pkg
valid_pkg_hashes = set()
for cpv in self._vardb.cpv_all():
hash_value = hash_pkg(cpv)
valid_pkg_hashes.add(hash_value)
if hash_value not in cached_hashes:
uncached_pkgs.add(cpv)
# Cache any missing packages.
for cpv in uncached_pkgs:
owners_cache.add(cpv)
# Delete any stale cache.
stale_hashes = cached_hashes.difference(valid_pkg_hashes)
if stale_hashes:
for base_name_hash, bucket in list(base_names.items()):
for hash_value in stale_hashes.intersection(bucket):
del bucket[hash_value]
if not bucket:
del base_names[base_name_hash]
return owners_cache
def get_owners(self, path_iter):
"""
@return the owners as a dblink -> set(files) mapping.
"""
owners = {}
for owner, f in self.iter_owners(path_iter):
owned_files = owners.get(owner)
if owned_files is None:
owned_files = set()
owners[owner] = owned_files
owned_files.add(f)
return owners
def getFileOwnerMap(self, path_iter):
owners = self.get_owners(path_iter)
file_owners = {}
for pkg_dblink, files in owners.items():
for f in files:
owner_set = file_owners.get(f)
if owner_set is None:
owner_set = set()
file_owners[f] = owner_set
owner_set.add(pkg_dblink)
return file_owners
def iter_owners(self, path_iter):
"""
Iterate over tuples of (dblink, path). In order to avoid
consuming too many resources for too much time, resources
are only allocated for the duration of a given iter_owners()
call. Therefore, to maximize reuse of resources when searching
for multiple files, it's best to search for them all in a single
call.
"""
if not isinstance(path_iter, list):
path_iter = list(path_iter)
owners_cache = self._populate()
vardb = self._vardb
root = vardb._eroot
hash_pkg = owners_cache._hash_pkg
hash_str = owners_cache._hash_str
base_names = self._vardb._aux_cache["owners"]["base_names"]
dblink_cache = {}
def dblink(cpv):
x = dblink_cache.get(cpv)
if x is None:
if len(dblink_cache) > 20:
# Ensure that we don't run out of memory.
raise StopIteration()
x = self._vardb._dblink(cpv)
dblink_cache[cpv] = x
return x
while path_iter:
path = path_iter.pop()
is_basename = os.sep != path[:1]
if is_basename:
name = path
else:
name = os.path.basename(path.rstrip(os.path.sep))
if not name:
continue
name_hash = hash_str(name)
pkgs = base_names.get(name_hash)
owners = []
if pkgs is not None:
try:
for hash_value in pkgs:
if not isinstance(hash_value, tuple) or \
len(hash_value) != 3:
continue
cpv, counter, mtime = hash_value
if not isinstance(cpv, basestring):
continue
try:
current_hash = hash_pkg(cpv)
except KeyError:
continue
if current_hash != hash_value:
continue
if is_basename:
for p in dblink(cpv).getcontents():
if os.path.basename(p) == name:
owners.append((cpv, p[len(root):]))
else:
if dblink(cpv).isowner(path):
owners.append((cpv, path))
except StopIteration:
path_iter.append(path)
del owners[:]
dblink_cache.clear()
gc.collect()
for x in self._iter_owners_low_mem(path_iter):
yield x
return
else:
for cpv, p in owners:
yield (dblink(cpv), p)
def _iter_owners_low_mem(self, path_list):
"""
This implemention will make a short-lived dblink instance (and
parse CONTENTS) for every single installed package. This is
slower and but uses less memory than the method which uses the
basename cache.
"""
if not path_list:
return
path_info_list = []
for path in path_list:
is_basename = os.sep != path[:1]
if is_basename:
name = path
else:
name = os.path.basename(path.rstrip(os.path.sep))
path_info_list.append((path, name, is_basename))
# Do work via the global event loop, so that it can be used
# for indication of progress during the search (bug #461412).
event_loop = (portage._internal_caller and
global_event_loop() or EventLoop(main=False))
root = self._vardb._eroot
def search_pkg(cpv):
dblnk = self._vardb._dblink(cpv)
for path, name, is_basename in path_info_list:
if is_basename:
for p in dblnk.getcontents():
if os.path.basename(p) == name:
search_pkg.results.append((dblnk, p[len(root):]))
else:
if dblnk.isowner(path):
search_pkg.results.append((dblnk, path))
search_pkg.complete = True
return False
search_pkg.results = []
for cpv in self._vardb.cpv_all():
del search_pkg.results[:]
search_pkg.complete = False
event_loop.idle_add(search_pkg, cpv)
while not search_pkg.complete:
event_loop.iteration()
for result in search_pkg.results:
yield result
class vartree(object):
"this tree will scan a var/db/pkg database located at root (passed to init)"
def __init__(self, root=None, virtual=DeprecationWarning, categories=None,
settings=None):
if settings is None:
settings = portage.settings
if root is not None and root != settings['ROOT']:
warnings.warn("The 'root' parameter of the "
"portage.dbapi.vartree.vartree"
" constructor is now unused. Use "
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=2)
if virtual is not DeprecationWarning:
warnings.warn("The 'virtual' parameter of the "
"portage.dbapi.vartree.vartree"
" constructor is unused",
DeprecationWarning, stacklevel=2)
self.settings = settings
self.dbapi = vardbapi(settings=settings, vartree=self)
self.populated = 1
@property
def root(self):
warnings.warn("The root attribute of "
"portage.dbapi.vartree.vartree"
" is deprecated. Use "
"settings['ROOT'] instead.",
DeprecationWarning, stacklevel=3)
return self.settings['ROOT']
def getpath(self, mykey, filename=None):
return self.dbapi.getpath(mykey, filename=filename)
def zap(self, mycpv):
return
def inject(self, mycpv):
return
def get_provide(self, mycpv):
myprovides = []
mylines = None
try:
mylines, myuse = self.dbapi.aux_get(mycpv, ["PROVIDE", "USE"])
if mylines:
myuse = myuse.split()
mylines = use_reduce(mylines, uselist=myuse, flat=True)
for myprovide in mylines:
mys = catpkgsplit(myprovide)
if not mys:
mys = myprovide.split("/")
myprovides += [mys[0] + "/" + mys[1]]
return myprovides
except SystemExit as e:
raise
except Exception as e:
mydir = self.dbapi.getpath(mycpv)
writemsg(_("\nParse Error reading PROVIDE and USE in '%s'\n") % mydir,
noiselevel=-1)
if mylines:
writemsg(_("Possibly Invalid: '%s'\n") % str(mylines),
noiselevel=-1)
writemsg(_("Exception: %s\n\n") % str(e), noiselevel=-1)
return []
def get_all_provides(self):
myprovides = {}
for node in self.getallcpv():
for mykey in self.get_provide(node):
if mykey in myprovides:
myprovides[mykey] += [node]
else:
myprovides[mykey] = [node]
return myprovides
def dep_bestmatch(self, mydep, use_cache=1):
"compatibility method -- all matches, not just visible ones"
#mymatch=best(match(dep_expand(mydep,self.dbapi),self.dbapi))
mymatch = best(self.dbapi.match(
dep_expand(mydep, mydb=self.dbapi, settings=self.settings),
use_cache=use_cache))
if mymatch is None:
return ""
else:
return mymatch
def dep_match(self, mydep, use_cache=1):
"compatibility method -- we want to see all matches, not just visible ones"
#mymatch = match(mydep,self.dbapi)
mymatch = self.dbapi.match(mydep, use_cache=use_cache)
if mymatch is None:
return []
else:
return mymatch
def exists_specific(self, cpv):
return self.dbapi.cpv_exists(cpv)
def getallcpv(self):
"""temporary function, probably to be renamed --- Gets a list of all
category/package-versions installed on the system."""
return self.dbapi.cpv_all()
def getallnodes(self):
"""new behavior: these are all *unmasked* nodes. There may or may not be available
masked package for nodes in this nodes list."""
return self.dbapi.cp_all()
def getebuildpath(self, fullpackage):
cat, package = catsplit(fullpackage)
return self.getpath(fullpackage, filename=package+".ebuild")
def getslot(self, mycatpkg):
"Get a slot for a catpkg; assume it exists."
try:
return self.dbapi._pkg_str(mycatpkg, None).slot
except KeyError:
return ""
def populate(self):
self.populated=1
class dblink(object):
"""
This class provides an interface to the installed package database
At present this is implemented as a text backend in /var/db/pkg.
"""
import re
_normalize_needed = re.compile(r'//|^[^/]|./$|(^|/)\.\.?(/|$)')
_contents_re = re.compile(r'^(' + \
r'(?P<dir>(dev|dir|fif) (.+))|' + \
r'(?P<obj>(obj) (.+) (\S+) (\d+))|' + \
r'(?P<sym>(sym) (.+) -> (.+) ((\d+)|(?P<oldsym>(' + \
r'\(\d+, \d+L, \d+L, \d+, \d+, \d+, \d+L, \d+, (\d+), \d+\)))))' + \
r')$'
)
# These files are generated by emerge, so we need to remove
# them when they are the only thing left in a directory.
_infodir_cleanup = frozenset(["dir", "dir.old"])
_ignored_unlink_errnos = (
errno.EBUSY, errno.ENOENT,
errno.ENOTDIR, errno.EISDIR)
_ignored_rmdir_errnos = (
errno.EEXIST, errno.ENOTEMPTY,
errno.EBUSY, errno.ENOENT,
errno.ENOTDIR, errno.EISDIR,
errno.EPERM)
def __init__(self, cat, pkg, myroot=None, settings=None, treetype=None,
vartree=None, blockers=None, scheduler=None, pipe=None):
"""
Creates a DBlink object for a given CPV.
The given CPV may not be present in the database already.
@param cat: Category
@type cat: String
@param pkg: Package (PV)
@type pkg: String
@param myroot: ignored, settings['ROOT'] is used instead
@type myroot: String (Path)
@param settings: Typically portage.settings
@type settings: portage.config
@param treetype: one of ['porttree','bintree','vartree']
@type treetype: String
@param vartree: an instance of vartree corresponding to myroot.
@type vartree: vartree
"""
if settings is None:
raise TypeError("settings argument is required")
mysettings = settings
self._eroot = mysettings['EROOT']
self.cat = cat
self.pkg = pkg
self.mycpv = self.cat + "/" + self.pkg
if self.mycpv == settings.mycpv and \
isinstance(settings.mycpv, _pkg_str):
self.mycpv = settings.mycpv
else:
self.mycpv = _pkg_str(self.mycpv)
self.mysplit = list(self.mycpv.cpv_split[1:])
self.mysplit[0] = self.mycpv.cp
self.treetype = treetype
if vartree is None:
vartree = portage.db[self._eroot]["vartree"]
self.vartree = vartree
self._blockers = blockers
self._scheduler = scheduler
self.dbroot = normalize_path(os.path.join(self._eroot, VDB_PATH))
self.dbcatdir = self.dbroot+"/"+cat
self.dbpkgdir = self.dbcatdir+"/"+pkg
self.dbtmpdir = self.dbcatdir+"/"+MERGING_IDENTIFIER+pkg
self.dbdir = self.dbpkgdir
self.settings = mysettings
self._verbose = self.settings.get("PORTAGE_VERBOSE") == "1"
self.myroot = self.settings['ROOT']
self._installed_instance = None
self.contentscache = None
self._contents_inodes = None
self._contents_basenames = None
self._linkmap_broken = False
self._device_path_map = {}
self._hardlink_merge_map = {}
self._hash_key = (self._eroot, self.mycpv)
self._protect_obj = None
self._pipe = pipe
# When necessary, this attribute is modified for
# compliance with RESTRICT=preserve-libs.
self._preserve_libs = "preserve-libs" in mysettings.features
def __hash__(self):
return hash(self._hash_key)
def __eq__(self, other):
return isinstance(other, dblink) and \
self._hash_key == other._hash_key
def _get_protect_obj(self):
if self._protect_obj is None:
self._protect_obj = ConfigProtect(self._eroot,
portage.util.shlex_split(
self.settings.get("CONFIG_PROTECT", "")),
portage.util.shlex_split(
self.settings.get("CONFIG_PROTECT_MASK", "")))
return self._protect_obj
def isprotected(self, obj):
return self._get_protect_obj().isprotected(obj)
def updateprotect(self):
self._get_protect_obj().updateprotect()
def lockdb(self):
self.vartree.dbapi.lock()
def unlockdb(self):
self.vartree.dbapi.unlock()
def getpath(self):
"return path to location of db information (for >>> informational display)"
return self.dbdir
def exists(self):
"does the db entry exist? boolean."
return os.path.exists(self.dbdir)
def delete(self):
"""
Remove this entry from the database
"""
try:
os.lstat(self.dbdir)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTDIR, errno.ESTALE):
raise
return
# Check validity of self.dbdir before attempting to remove it.
if not self.dbdir.startswith(self.dbroot):
writemsg(_("portage.dblink.delete(): invalid dbdir: %s\n") % \
self.dbdir, noiselevel=-1)
return
shutil.rmtree(self.dbdir)
# If empty, remove parent category directory.
try:
os.rmdir(os.path.dirname(self.dbdir))
except OSError:
pass
self.vartree.dbapi._remove(self)
# Use self.dbroot since we need an existing path for syncfs.
try:
self._merged_path(self.dbroot, os.lstat(self.dbroot))
except OSError:
pass
self._post_merge_sync()
def clearcontents(self):
"""
For a given db entry (self), erase the CONTENTS values.
"""
self.lockdb()
try:
if os.path.exists(self.dbdir+"/CONTENTS"):
os.unlink(self.dbdir+"/CONTENTS")
finally:
self.unlockdb()
def _clear_contents_cache(self):
self.contentscache = None
self._contents_inodes = None
self._contents_basenames = None
def getcontents(self):
"""
Get the installed files of a given package (aka what that package installed)
"""
contents_file = os.path.join(self.dbdir, "CONTENTS")
if self.contentscache is not None:
return self.contentscache
pkgfiles = {}
try:
with io.open(_unicode_encode(contents_file,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['repo.content'],
errors='replace') as f:
mylines = f.readlines()
except EnvironmentError as e:
if e.errno != errno.ENOENT:
raise
del e
self.contentscache = pkgfiles
return pkgfiles
null_byte = "\0"
normalize_needed = self._normalize_needed
contents_re = self._contents_re
obj_index = contents_re.groupindex['obj']
dir_index = contents_re.groupindex['dir']
sym_index = contents_re.groupindex['sym']
# The old symlink format may exist on systems that have packages
# which were installed many years ago (see bug #351814).
oldsym_index = contents_re.groupindex['oldsym']
# CONTENTS files already contain EPREFIX
myroot = self.settings['ROOT']
if myroot == os.path.sep:
myroot = None
# used to generate parent dir entries
dir_entry = ("dir",)
eroot_split_len = len(self.settings["EROOT"].split(os.sep)) - 1
pos = 0
errors = []
for pos, line in enumerate(mylines):
if null_byte in line:
# Null bytes are a common indication of corruption.
errors.append((pos + 1, _("Null byte found in CONTENTS entry")))
continue
line = line.rstrip("\n")
m = contents_re.match(line)
if m is None:
errors.append((pos + 1, _("Unrecognized CONTENTS entry")))
continue
if m.group(obj_index) is not None:
base = obj_index
#format: type, mtime, md5sum
data = (m.group(base+1), m.group(base+4), m.group(base+3))
elif m.group(dir_index) is not None:
base = dir_index
#format: type
data = (m.group(base+1),)
elif m.group(sym_index) is not None:
base = sym_index
if m.group(oldsym_index) is None:
mtime = m.group(base+5)
else:
mtime = m.group(base+8)
#format: type, mtime, dest
data = (m.group(base+1), mtime, m.group(base+3))
else:
# This won't happen as long the regular expression
# is written to only match valid entries.
raise AssertionError(_("required group not found " + \
"in CONTENTS entry: '%s'") % line)
path = m.group(base+2)
if normalize_needed.search(path) is not None:
path = normalize_path(path)
if not path.startswith(os.path.sep):
path = os.path.sep + path
if myroot is not None:
path = os.path.join(myroot, path.lstrip(os.path.sep))
# Implicitly add parent directories, since we can't necessarily
# assume that they are explicitly listed in CONTENTS, and it's
# useful for callers if they can rely on parent directory entries
# being generated here (crucial for things like dblink.isowner()).
path_split = path.split(os.sep)
path_split.pop()
while len(path_split) > eroot_split_len:
parent = os.sep.join(path_split)
if parent in pkgfiles:
break
pkgfiles[parent] = dir_entry
path_split.pop()
pkgfiles[path] = data
if errors:
writemsg(_("!!! Parse error in '%s'\n") % contents_file, noiselevel=-1)
for pos, e in errors:
writemsg(_("!!! line %d: %s\n") % (pos, e), noiselevel=-1)
self.contentscache = pkgfiles
return pkgfiles
def _prune_plib_registry(self, unmerge=False,
needed=None, preserve_paths=None):
# remove preserved libraries that don't have any consumers left
if not (self._linkmap_broken or
self.vartree.dbapi._linkmap is None or
self.vartree.dbapi._plib_registry is None):
self.vartree.dbapi._fs_lock()
plib_registry = self.vartree.dbapi._plib_registry
plib_registry.lock()
try:
plib_registry.load()
unmerge_with_replacement = \
unmerge and preserve_paths is not None
if unmerge_with_replacement:
# If self.mycpv is about to be unmerged and we
# have a replacement package, we want to exclude
# the irrelevant NEEDED data that belongs to
# files which are being unmerged now.
exclude_pkgs = (self.mycpv,)
else:
exclude_pkgs = None
self._linkmap_rebuild(exclude_pkgs=exclude_pkgs,
include_file=needed, preserve_paths=preserve_paths)
if unmerge:
unmerge_preserve = None
if not unmerge_with_replacement:
unmerge_preserve = \
self._find_libs_to_preserve(unmerge=True)
counter = self.vartree.dbapi.cpv_counter(self.mycpv)
try:
slot = self.mycpv.slot
except AttributeError:
slot = _pkg_str(self.mycpv, slot=self.settings["SLOT"]).slot
plib_registry.unregister(self.mycpv, slot, counter)
if unmerge_preserve:
for path in sorted(unmerge_preserve):
contents_key = self._match_contents(path)
if not contents_key:
continue
obj_type = self.getcontents()[contents_key][0]
self._display_merge(_(">>> needed %s %s\n") % \
(obj_type, contents_key), noiselevel=-1)
plib_registry.register(self.mycpv,
slot, counter, unmerge_preserve)
# Remove the preserved files from our contents
# so that they won't be unmerged.
self.vartree.dbapi.removeFromContents(self,
unmerge_preserve)
unmerge_no_replacement = \
unmerge and not unmerge_with_replacement
cpv_lib_map = self._find_unused_preserved_libs(
unmerge_no_replacement)
if cpv_lib_map:
self._remove_preserved_libs(cpv_lib_map)
self.vartree.dbapi.lock()
try:
for cpv, removed in cpv_lib_map.items():
if not self.vartree.dbapi.cpv_exists(cpv):
continue
self.vartree.dbapi.removeFromContents(cpv, removed)
finally:
self.vartree.dbapi.unlock()
plib_registry.store()
finally:
plib_registry.unlock()
self.vartree.dbapi._fs_unlock()
def unmerge(self, pkgfiles=None, trimworld=None, cleanup=True,
ldpath_mtimes=None, others_in_slot=None, needed=None,
preserve_paths=None):
"""
Calls prerm
Unmerges a given package (CPV)
calls postrm
calls cleanrm
calls env_update
@param pkgfiles: files to unmerge (generally self.getcontents() )
@type pkgfiles: Dictionary
@param trimworld: Unused
@type trimworld: Boolean
@param cleanup: cleanup to pass to doebuild (see doebuild)
@type cleanup: Boolean
@param ldpath_mtimes: mtimes to pass to env_update (see env_update)
@type ldpath_mtimes: Dictionary
@param others_in_slot: all dblink instances in this slot, excluding self
@type others_in_slot: list
@param needed: Filename containing libraries needed after unmerge.
@type needed: String
@param preserve_paths: Libraries preserved by a package instance that
is currently being merged. They need to be explicitly passed to the
LinkageMap, since they are not registered in the
PreservedLibsRegistry yet.
@type preserve_paths: set
@rtype: Integer
@return:
1. os.EX_OK if everything went well.
2. return code of the failed phase (for prerm, postrm, cleanrm)
"""
if trimworld is not None:
warnings.warn("The trimworld parameter of the " + \
"portage.dbapi.vartree.dblink.unmerge()" + \
" method is now unused.",
DeprecationWarning, stacklevel=2)
background = False
log_path = self.settings.get("PORTAGE_LOG_FILE")
if self._scheduler is None:
# We create a scheduler instance and use it to
# log unmerge output separately from merge output.
self._scheduler = SchedulerInterface(portage._internal_caller and
global_event_loop() or EventLoop(main=False))
if self.settings.get("PORTAGE_BACKGROUND") == "subprocess":
if self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "1":
self.settings["PORTAGE_BACKGROUND"] = "1"
self.settings.backup_changes("PORTAGE_BACKGROUND")
background = True
elif self.settings.get("PORTAGE_BACKGROUND_UNMERGE") == "0":
self.settings["PORTAGE_BACKGROUND"] = "0"
self.settings.backup_changes("PORTAGE_BACKGROUND")
elif self.settings.get("PORTAGE_BACKGROUND") == "1":
background = True
self.vartree.dbapi._bump_mtime(self.mycpv)
showMessage = self._display_merge
if self.vartree.dbapi._categories is not None:
self.vartree.dbapi._categories = None
# When others_in_slot is not None, the backup has already been
# handled by the caller.
caller_handles_backup = others_in_slot is not None
# When others_in_slot is supplied, the security check has already been
# done for this slot, so it shouldn't be repeated until the next
# replacement or unmerge operation.
if others_in_slot is None:
slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
slot_matches = self.vartree.dbapi.match(
"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
others_in_slot = []
for cur_cpv in slot_matches:
if cur_cpv == self.mycpv:
continue
others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
settings=self.settings, vartree=self.vartree,
treetype="vartree", pipe=self._pipe))
retval = self._security_check([self] + others_in_slot)
if retval:
return retval
contents = self.getcontents()
# Now, don't assume that the name of the ebuild is the same as the
# name of the dir; the package may have been moved.
myebuildpath = os.path.join(self.dbdir, self.pkg + ".ebuild")
failures = 0
ebuild_phase = "prerm"
mystuff = os.listdir(self.dbdir)
for x in mystuff:
if x.endswith(".ebuild"):
if x[:-7] != self.pkg:
# Clean up after vardbapi.move_ent() breakage in
# portage versions before 2.1.2
os.rename(os.path.join(self.dbdir, x), myebuildpath)
write_atomic(os.path.join(self.dbdir, "PF"), self.pkg+"\n")
break
if self.mycpv != self.settings.mycpv or \
"EAPI" not in self.settings.configdict["pkg"]:
# We avoid a redundant setcpv call here when
# the caller has already taken care of it.
self.settings.setcpv(self.mycpv, mydb=self.vartree.dbapi)
eapi_unsupported = False
try:
doebuild_environment(myebuildpath, "prerm",
settings=self.settings, db=self.vartree.dbapi)
except UnsupportedAPIException as e:
eapi_unsupported = e
if self._preserve_libs and "preserve-libs" in \
self.settings["PORTAGE_RESTRICT"].split():
self._preserve_libs = False
builddir_lock = None
scheduler = self._scheduler
retval = os.EX_OK
try:
# Only create builddir_lock if the caller
# has not already acquired the lock.
if "PORTAGE_BUILDDIR_LOCKED" not in self.settings:
builddir_lock = EbuildBuildDir(
scheduler=scheduler,
settings=self.settings)
builddir_lock.lock()
prepare_build_dirs(settings=self.settings, cleanup=True)
log_path = self.settings.get("PORTAGE_LOG_FILE")
# Do this before the following _prune_plib_registry call, since
# that removes preserved libraries from our CONTENTS, and we
# may want to backup those libraries first.
if not caller_handles_backup:
retval = self._pre_unmerge_backup(background)
if retval != os.EX_OK:
showMessage(_("!!! FAILED prerm: quickpkg: %s\n") % retval,
level=logging.ERROR, noiselevel=-1)
return retval
self._prune_plib_registry(unmerge=True, needed=needed,
preserve_paths=preserve_paths)
# Log the error after PORTAGE_LOG_FILE is initialized
# by prepare_build_dirs above.
if eapi_unsupported:
# Sometimes this happens due to corruption of the EAPI file.
failures += 1
showMessage(_("!!! FAILED prerm: %s\n") % \
os.path.join(self.dbdir, "EAPI"),
level=logging.ERROR, noiselevel=-1)
showMessage("%s\n" % (eapi_unsupported,),
level=logging.ERROR, noiselevel=-1)
elif os.path.isfile(myebuildpath):
phase = EbuildPhase(background=background,
phase=ebuild_phase, scheduler=scheduler,
settings=self.settings)
phase.start()
retval = phase.wait()
# XXX: Decide how to handle failures here.
if retval != os.EX_OK:
failures += 1
showMessage(_("!!! FAILED prerm: %s\n") % retval,
level=logging.ERROR, noiselevel=-1)
self.vartree.dbapi._fs_lock()
try:
self._unmerge_pkgfiles(pkgfiles, others_in_slot)
finally:
self.vartree.dbapi._fs_unlock()
self._clear_contents_cache()
if not eapi_unsupported and os.path.isfile(myebuildpath):
ebuild_phase = "postrm"
phase = EbuildPhase(background=background,
phase=ebuild_phase, scheduler=scheduler,
settings=self.settings)
phase.start()
retval = phase.wait()
# XXX: Decide how to handle failures here.
if retval != os.EX_OK:
failures += 1
showMessage(_("!!! FAILED postrm: %s\n") % retval,
level=logging.ERROR, noiselevel=-1)
finally:
self.vartree.dbapi._bump_mtime(self.mycpv)
try:
if not eapi_unsupported and os.path.isfile(myebuildpath):
if retval != os.EX_OK:
msg_lines = []
msg = _("The '%(ebuild_phase)s' "
"phase of the '%(cpv)s' package "
"has failed with exit value %(retval)s.") % \
{"ebuild_phase":ebuild_phase, "cpv":self.mycpv,
"retval":retval}
from textwrap import wrap
msg_lines.extend(wrap(msg, 72))
msg_lines.append("")
ebuild_name = os.path.basename(myebuildpath)
ebuild_dir = os.path.dirname(myebuildpath)
msg = _("The problem occurred while executing "
"the ebuild file named '%(ebuild_name)s' "
"located in the '%(ebuild_dir)s' directory. "
"If necessary, manually remove "
"the environment.bz2 file and/or the "
"ebuild file located in that directory.") % \
{"ebuild_name":ebuild_name, "ebuild_dir":ebuild_dir}
msg_lines.extend(wrap(msg, 72))
msg_lines.append("")
msg = _("Removal "
"of the environment.bz2 file is "
"preferred since it may allow the "
"removal phases to execute successfully. "
"The ebuild will be "
"sourced and the eclasses "
"from the current portage tree will be used "
"when necessary. Removal of "
"the ebuild file will cause the "
"pkg_prerm() and pkg_postrm() removal "
"phases to be skipped entirely.")
msg_lines.extend(wrap(msg, 72))
self._eerror(ebuild_phase, msg_lines)
self._elog_process(phasefilter=("prerm", "postrm"))
if retval == os.EX_OK:
try:
doebuild_environment(myebuildpath, "cleanrm",
settings=self.settings, db=self.vartree.dbapi)
except UnsupportedAPIException:
pass
phase = EbuildPhase(background=background,
phase="cleanrm", scheduler=scheduler,
settings=self.settings)
phase.start()
retval = phase.wait()
finally:
if builddir_lock is not None:
builddir_lock.unlock()
if log_path is not None:
if not failures and 'unmerge-logs' not in self.settings.features:
try:
os.unlink(log_path)
except OSError:
pass
try:
st = os.stat(log_path)
except OSError:
pass
else:
if st.st_size == 0:
try:
os.unlink(log_path)
except OSError:
pass
if log_path is not None and os.path.exists(log_path):
# Restore this since it gets lost somewhere above and it
# needs to be set for _display_merge() to be able to log.
# Note that the log isn't necessarily supposed to exist
# since if PORT_LOGDIR is unset then it's a temp file
# so it gets cleaned above.
self.settings["PORTAGE_LOG_FILE"] = log_path
else:
self.settings.pop("PORTAGE_LOG_FILE", None)
env_update(target_root=self.settings['ROOT'],
prev_mtimes=ldpath_mtimes,
contents=contents, env=self.settings,
writemsg_level=self._display_merge, vardbapi=self.vartree.dbapi)
unmerge_with_replacement = preserve_paths is not None
if not unmerge_with_replacement:
# When there's a replacement package which calls us via treewalk,
# treewalk will automatically call _prune_plib_registry for us.
# Otherwise, we need to call _prune_plib_registry ourselves.
# Don't pass in the "unmerge=True" flag here, since that flag
# is intended to be used _prior_ to unmerge, not after.
self._prune_plib_registry()
return os.EX_OK
def _display_merge(self, msg, level=0, noiselevel=0):
if not self._verbose and noiselevel >= 0 and level < logging.WARN:
return
if self._scheduler is None:
writemsg_level(msg, level=level, noiselevel=noiselevel)
else:
log_path = None
if self.settings.get("PORTAGE_BACKGROUND") != "subprocess":
log_path = self.settings.get("PORTAGE_LOG_FILE")
background = self.settings.get("PORTAGE_BACKGROUND") == "1"
if background and log_path is None:
if level >= logging.WARN:
writemsg_level(msg, level=level, noiselevel=noiselevel)
else:
self._scheduler.output(msg,
log_path=log_path, background=background,
level=level, noiselevel=noiselevel)
def _show_unmerge(self, zing, desc, file_type, file_name):
self._display_merge("%s %s %s %s\n" % \
(zing, desc.ljust(8), file_type, file_name))
def _unmerge_pkgfiles(self, pkgfiles, others_in_slot):
"""
Unmerges the contents of a package from the liveFS
Removes the VDB entry for self
@param pkgfiles: typically self.getcontents()
@type pkgfiles: Dictionary { filename: [ 'type', '?', 'md5sum' ] }
@param others_in_slot: all dblink instances in this slot, excluding self
@type others_in_slot: list
@rtype: None
"""
os = _os_merge
perf_md5 = perform_md5
showMessage = self._display_merge
show_unmerge = self._show_unmerge
ignored_unlink_errnos = self._ignored_unlink_errnos
ignored_rmdir_errnos = self._ignored_rmdir_errnos
if not pkgfiles:
showMessage(_("No package files given... Grabbing a set.\n"))
pkgfiles = self.getcontents()
if others_in_slot is None:
others_in_slot = []
slot = self.vartree.dbapi._pkg_str(self.mycpv, None).slot
slot_matches = self.vartree.dbapi.match(
"%s:%s" % (portage.cpv_getkey(self.mycpv), slot))
for cur_cpv in slot_matches:
if cur_cpv == self.mycpv:
continue
others_in_slot.append(dblink(self.cat, catsplit(cur_cpv)[1],
settings=self.settings,
vartree=self.vartree, treetype="vartree", pipe=self._pipe))
cfgfiledict = grabdict(self.vartree.dbapi._conf_mem_file)
stale_confmem = []
protected_symlinks = {}
unmerge_orphans = "unmerge-orphans" in self.settings.features
calc_prelink = "prelink-checksums" in self.settings.features
if pkgfiles:
self.updateprotect()
mykeys = list(pkgfiles)
mykeys.sort()
mykeys.reverse()
#process symlinks second-to-last, directories last.
mydirs = set()
uninstall_ignore = portage.util.shlex_split(
self.settings.get("UNINSTALL_IGNORE", ""))
def unlink(file_name, lstatobj):
if bsd_chflags:
if lstatobj.st_flags != 0:
bsd_chflags.lchflags(file_name, 0)
parent_name = os.path.dirname(file_name)
# Use normal stat/chflags for the parent since we want to
# follow any symlinks to the real parent directory.
pflags = os.stat(parent_name).st_flags
if pflags != 0:
bsd_chflags.chflags(parent_name, 0)
try:
if not stat.S_ISLNK(lstatobj.st_mode):
# Remove permissions to ensure that any hardlinks to
# suid/sgid files are rendered harmless.
os.chmod(file_name, 0)
os.unlink(file_name)
except OSError as ose:
# If the chmod or unlink fails, you are in trouble.
# With Prefix this can be because the file is owned
# by someone else (a screwup by root?), on a normal
# system maybe filesystem corruption. In any case,
# if we backtrace and die here, we leave the system
# in a totally undefined state, hence we just bleed
# like hell and continue to hopefully finish all our
# administrative and pkg_postinst stuff.
self._eerror("postrm",
["Could not chmod or unlink '%s': %s" % \
(file_name, ose)])
else:
# Even though the file no longer exists, we log it
# here so that _unmerge_dirs can see that we've
# removed a file from this device, and will record
# the parent directory for a syncfs call.
self._merged_path(file_name, lstatobj, exists=False)
finally:
if bsd_chflags and pflags != 0:
# Restore the parent flags we saved before unlinking
bsd_chflags.chflags(parent_name, pflags)
unmerge_desc = {}
unmerge_desc["cfgpro"] = _("cfgpro")
unmerge_desc["replaced"] = _("replaced")
unmerge_desc["!dir"] = _("!dir")
unmerge_desc["!empty"] = _("!empty")
unmerge_desc["!fif"] = _("!fif")
unmerge_desc["!found"] = _("!found")
unmerge_desc["!md5"] = _("!md5")
unmerge_desc["!mtime"] = _("!mtime")
unmerge_desc["!obj"] = _("!obj")
unmerge_desc["!sym"] = _("!sym")
unmerge_desc["!prefix"] = _("!prefix")
real_root = self.settings['ROOT']
real_root_len = len(real_root) - 1
eroot = self.settings["EROOT"]
infodirs = frozenset(infodir for infodir in chain(
self.settings.get("INFOPATH", "").split(":"),
self.settings.get("INFODIR", "").split(":")) if infodir)
infodirs_inodes = set()
for infodir in infodirs:
infodir = os.path.join(real_root, infodir.lstrip(os.sep))
try:
statobj = os.stat(infodir)
except OSError:
pass
else:
infodirs_inodes.add((statobj.st_dev, statobj.st_ino))
for i, objkey in enumerate(mykeys):
obj = normalize_path(objkey)
if os is _os_merge:
try:
_unicode_encode(obj,
encoding=_encodings['merge'], errors='strict')
except UnicodeEncodeError:
# The package appears to have been merged with a
# different value of sys.getfilesystemencoding(),
# so fall back to utf_8 if appropriate.
try:
_unicode_encode(obj,
encoding=_encodings['fs'], errors='strict')
except UnicodeEncodeError:
pass
else:
os = portage.os
perf_md5 = portage.checksum.perform_md5
file_data = pkgfiles[objkey]
file_type = file_data[0]
# don't try to unmerge the prefix offset itself
if len(obj) <= len(eroot) or not obj.startswith(eroot):
show_unmerge("---", unmerge_desc["!prefix"], file_type, obj)
continue
statobj = None
try:
statobj = os.stat(obj)
except OSError:
pass
lstatobj = None
try:
lstatobj = os.lstat(obj)
except (OSError, AttributeError):
pass
islink = lstatobj is not None and stat.S_ISLNK(lstatobj.st_mode)
if lstatobj is None:
show_unmerge("---", unmerge_desc["!found"], file_type, obj)
continue
f_match = obj[len(eroot)-1:]
ignore = False
for pattern in uninstall_ignore:
if fnmatch.fnmatch(f_match, pattern):
ignore = True
break
if not ignore:
if islink and f_match in \
("/lib", "/usr/lib", "/usr/local/lib"):
# Ignore libdir symlinks for bug #423127.
ignore = True
if ignore:
show_unmerge("---", unmerge_desc["cfgpro"], file_type, obj)
continue
# don't use EROOT, CONTENTS entries already contain EPREFIX
if obj.startswith(real_root):
relative_path = obj[real_root_len:]
is_owned = False
for dblnk in others_in_slot:
if dblnk.isowner(relative_path):
is_owned = True
break
if is_owned and islink and \
file_type in ("sym", "dir") and \
statobj and stat.S_ISDIR(statobj.st_mode):
# A new instance of this package claims the file, so
# don't unmerge it. If the file is symlink to a
# directory and the unmerging package installed it as
# a symlink, but the new owner has it listed as a
# directory, then we'll produce a warning since the
# symlink is a sort of orphan in this case (see
# bug #326685).
symlink_orphan = False
for dblnk in others_in_slot:
parent_contents_key = \
dblnk._match_contents(relative_path)
if not parent_contents_key:
continue
if not parent_contents_key.startswith(
real_root):
continue
if dblnk.getcontents()[
parent_contents_key][0] == "dir":
symlink_orphan = True
break
if symlink_orphan:
protected_symlinks.setdefault(
(statobj.st_dev, statobj.st_ino),
[]).append(relative_path)
if is_owned:
show_unmerge("---", unmerge_desc["replaced"], file_type, obj)
continue
elif relative_path in cfgfiledict:
stale_confmem.append(relative_path)
# Don't unlink symlinks to directories here since that can
# remove /lib and /usr/lib symlinks.
if unmerge_orphans and \
lstatobj and not stat.S_ISDIR(lstatobj.st_mode) and \
not (islink and statobj and stat.S_ISDIR(statobj.st_mode)) and \
not self.isprotected(obj):
try:
unlink(obj, lstatobj)
except EnvironmentError as e:
if e.errno not in ignored_unlink_errnos:
raise
del e
show_unmerge("<<<", "", file_type, obj)
continue
lmtime = str(lstatobj[stat.ST_MTIME])
if (pkgfiles[objkey][0] not in ("dir", "fif", "dev")) and (lmtime != pkgfiles[objkey][1]):
show_unmerge("---", unmerge_desc["!mtime"], file_type, obj)
continue
if file_type == "dir" and not islink:
if lstatobj is None or not stat.S_ISDIR(lstatobj.st_mode):
show_unmerge("---", unmerge_desc["!dir"], file_type, obj)
continue
mydirs.add((obj, (lstatobj.st_dev, lstatobj.st_ino)))
elif file_type == "sym" or (file_type == "dir" and islink):
if not islink:
show_unmerge("---", unmerge_desc["!sym"], file_type, obj)
continue
# If this symlink points to a directory then we don't want
# to unmerge it if there are any other packages that
# installed files into the directory via this symlink
# (see bug #326685).
# TODO: Resolving a symlink to a directory will require
# simulation if $ROOT != / and the link is not relative.
if islink and statobj and stat.S_ISDIR(statobj.st_mode) \
and obj.startswith(real_root):
relative_path = obj[real_root_len:]
try:
target_dir_contents = os.listdir(obj)
except OSError:
pass
else:
if target_dir_contents:
# If all the children are regular files owned
# by this package, then the symlink should be
# safe to unmerge.
all_owned = True
for child in target_dir_contents:
child = os.path.join(relative_path, child)
if not self.isowner(child):
all_owned = False
break
try:
child_lstat = os.lstat(os.path.join(
real_root, child.lstrip(os.sep)))
except OSError:
continue
if not stat.S_ISREG(child_lstat.st_mode):
# Nested symlinks or directories make
# the issue very complex, so just
# preserve the symlink in order to be
# on the safe side.
all_owned = False
break
if not all_owned:
protected_symlinks.setdefault(
(statobj.st_dev, statobj.st_ino),
[]).append(relative_path)
show_unmerge("---", unmerge_desc["!empty"],
file_type, obj)
continue
# Go ahead and unlink symlinks to directories here when
# they're actually recorded as symlinks in the contents.
# Normally, symlinks such as /lib -> lib64 are not recorded
# as symlinks in the contents of a package. If a package
# installs something into ${D}/lib/, it is recorded in the
# contents as a directory even if it happens to correspond
# to a symlink when it's merged to the live filesystem.
try:
unlink(obj, lstatobj)
show_unmerge("<<<", "", file_type, obj)
except (OSError, IOError) as e:
if e.errno not in ignored_unlink_errnos:
raise
del e
show_unmerge("!!!", "", file_type, obj)
elif pkgfiles[objkey][0] == "obj":
if statobj is None or not stat.S_ISREG(statobj.st_mode):
show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
continue
mymd5 = None
try:
mymd5 = perf_md5(obj, calc_prelink=calc_prelink)
except FileNotFound as e:
# the file has disappeared between now and our stat call
show_unmerge("---", unmerge_desc["!obj"], file_type, obj)
continue
# string.lower is needed because db entries used to be in upper-case. The
# string.lower allows for backwards compatibility.
if mymd5 != pkgfiles[objkey][2].lower():
show_unmerge("---", unmerge_desc["!md5"], file_type, obj)
continue
try:
unlink(obj, lstatobj)
except (OSError, IOError) as e:
if e.errno not in ignored_unlink_errnos:
raise
del e
show_unmerge("<<<", "", file_type, obj)
elif pkgfiles[objkey][0] == "fif":
if not stat.S_ISFIFO(lstatobj[stat.ST_MODE]):
show_unmerge("---", unmerge_desc["!fif"], file_type, obj)
continue
show_unmerge("---", "", file_type, obj)
elif pkgfiles[objkey][0] == "dev":
show_unmerge("---", "", file_type, obj)
self._unmerge_dirs(mydirs, infodirs_inodes,
protected_symlinks, unmerge_desc, unlink, os)
mydirs.clear()
if protected_symlinks:
self._unmerge_protected_symlinks(others_in_slot, infodirs_inodes,
protected_symlinks, unmerge_desc, unlink, os)
if protected_symlinks:
msg = "One or more symlinks to directories have been " + \
"preserved in order to ensure that files installed " + \
"via these symlinks remain accessible. " + \
"This indicates that the mentioned symlink(s) may " + \
"be obsolete remnants of an old install, and it " + \
"may be appropriate to replace a given symlink " + \
"with the directory that it points to."
lines = textwrap.wrap(msg, 72)
lines.append("")
flat_list = set()
flat_list.update(*protected_symlinks.values())
flat_list = sorted(flat_list)
for f in flat_list:
lines.append("\t%s" % (os.path.join(real_root,
f.lstrip(os.sep))))
lines.append("")
self._elog("elog", "postrm", lines)
# Remove stale entries from config memory.
if stale_confmem:
for filename in stale_confmem:
del cfgfiledict[filename]
writedict(cfgfiledict, self.vartree.dbapi._conf_mem_file)
#remove self from vartree database so that our own virtual gets zapped if we're the last node
self.vartree.zap(self.mycpv)
def _unmerge_protected_symlinks(self, others_in_slot, infodirs_inodes,
protected_symlinks, unmerge_desc, unlink, os):
real_root = self.settings['ROOT']
show_unmerge = self._show_unmerge
ignored_unlink_errnos = self._ignored_unlink_errnos
flat_list = set()
flat_list.update(*protected_symlinks.values())
flat_list = sorted(flat_list)
for f in flat_list:
for dblnk in others_in_slot:
if dblnk.isowner(f):
# If another package in the same slot installed
# a file via a protected symlink, return early
# and don't bother searching for any other owners.
return
msg = []
msg.append("")
msg.append(_("Directory symlink(s) may need protection:"))
msg.append("")
for f in flat_list:
msg.append("\t%s" % \
os.path.join(real_root, f.lstrip(os.path.sep)))
msg.append("")
msg.append(_("Searching all installed"
" packages for files installed via above symlink(s)..."))
msg.append("")
self._elog("elog", "postrm", msg)
self.lockdb()
try:
owners = self.vartree.dbapi._owners.get_owners(flat_list)
self.vartree.dbapi.flush_cache()
finally:
self.unlockdb()
for owner in list(owners):
if owner.mycpv == self.mycpv:
owners.pop(owner, None)
if not owners:
msg = []
msg.append(_("The above directory symlink(s) are all "
"safe to remove. Removing them now..."))
msg.append("")
self._elog("elog", "postrm", msg)
dirs = set()
for unmerge_syms in protected_symlinks.values():
for relative_path in unmerge_syms:
obj = os.path.join(real_root,
relative_path.lstrip(os.sep))
parent = os.path.dirname(obj)
while len(parent) > len(self._eroot):
try:
lstatobj = os.lstat(parent)
except OSError:
break
else:
dirs.add((parent,
(lstatobj.st_dev, lstatobj.st_ino)))
parent = os.path.dirname(parent)
try:
unlink(obj, os.lstat(obj))
show_unmerge("<<<", "", "sym", obj)
except (OSError, IOError) as e:
if e.errno not in ignored_unlink_errnos:
raise
del e
show_unmerge("!!!", "", "sym", obj)
protected_symlinks.clear()
self._unmerge_dirs(dirs, infodirs_inodes,
protected_symlinks, unmerge_desc, unlink, os)
dirs.clear()
def _unmerge_dirs(self, dirs, infodirs_inodes,
protected_symlinks, unmerge_desc, unlink, os):
show_unmerge = self._show_unmerge
infodir_cleanup = self._infodir_cleanup
ignored_unlink_errnos = self._ignored_unlink_errnos
ignored_rmdir_errnos = self._ignored_rmdir_errnos
real_root = self.settings['ROOT']
dirs = sorted(dirs)
dirs.reverse()
for obj, inode_key in dirs:
# Treat any directory named "info" as a candidate here,
# since it might have been in INFOPATH previously even
# though it may not be there now.
if inode_key in infodirs_inodes or \
os.path.basename(obj) == "info":
try:
remaining = os.listdir(obj)
except OSError:
pass
else:
cleanup_info_dir = ()
if remaining and \
len(remaining) <= len(infodir_cleanup):
if not set(remaining).difference(infodir_cleanup):
cleanup_info_dir = remaining
for child in cleanup_info_dir:
child = os.path.join(obj, child)
try:
lstatobj = os.lstat(child)
if stat.S_ISREG(lstatobj.st_mode):
unlink(child, lstatobj)
show_unmerge("<<<", "", "obj", child)
except EnvironmentError as e:
if e.errno not in ignored_unlink_errnos:
raise
del e
show_unmerge("!!!", "", "obj", child)
try:
parent_name = os.path.dirname(obj