diff --git a/unattended-upgrades/kylin-unattended-upgrade b/unattended-upgrades/kylin-unattended-upgrade index 2e3b2a9..d87dc31 100644 --- a/unattended-upgrades/kylin-unattended-upgrade +++ b/unattended-upgrades/kylin-unattended-upgrade @@ -1,4196 +1,217 @@ #!/usr/bin/python3 -# Copyright (c) 2005-2018 Canonical Ltd -# -# AUTHOR: -# Michael Vogt -# Balint Reczey - -# This file is part of unattended-upgrades -# -# unattended-upgrades is free software; you can redistribute it and/or -# modify it under the terms of the GNU General Public License as published -# by the Free Software Foundation; either version 2 of the License, or (at -# your option) any later version. -# -# unattended-upgrades is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with unattended-upgrades; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# -#from backend.build.lib.SystemUpdater.backend.DownloadBackend import FetchProgress -# from email.policy import default -import stat -import atexit -import copy -import datetime -import errno -# import email.charset -import fcntl -import fnmatch -import gettext - -# from backend.SystemUpdater.Core.utils import error -#from zoneinfo import ZoneInfoNotFoundError -''' -try: - from gi.repository.Gio import NetworkMonitor -except ImportError: - pass -''' -import grp -import io -import locale -import logging - -import re -import os -import select -import signal -import socket -import string -import subprocess -import sys -import syslog -import shutil -try: - from typing import AbstractSet, cast, DefaultDict, Dict, Iterable, List - AbstractSet # pyflakes - DefaultDict # pyflakes - Dict # pyflakes - Iterable # pyflakes - List # pyflakes - from typing import Set, Tuple, Union - Set # pyflakes - Tuple # pyflakes - Union # pyflakes -except ImportError: - pass - -from collections import defaultdict, namedtuple -from datetime import date -from email.message import Message -import email -from gettext import gettext as _ -from io import StringIO -from optparse import ( - OptionParser, - SUPPRESS_HELP, -) - -from subprocess import ( - Popen, - PIPE, -) -from textwrap import wrap - -import apt -import apt_inst -import apt_pkg +from optparse import OptionParser import dbus from dbus.mainloop.glib import DBusGMainLoop from gi.repository import GLib -#import distro_info -from apt.progress.base import InstallProgress -import configparser -import fcntl -import time -import subprocess +from threading import Event +from gettext import gettext as _ +import gettext +import apt_pkg +#from apt.progress.base import AcquireProgress +from apt.progress.text import AcquireProgress +import apt +import os import json - -SOURCESLIST = "/etc/apt/sources.list" -RELEASEOFFSET = 1 -ORIGINOFFSET = 2 -HTTPTYPE = "HTTP" -FTPTYPE = "FTP" -ARCHITECTUREMAP = ['arm64','amd64','armhf','i386','loongarch64','mips64el','sw64'] - -VERSION_FILE = '/etc/kylin-version/kylin-system-version.conf' -OTA_RESULT_FILE_PATH="/opt/apt_result/" -OTA_RESULT_FILE="/opt/apt_result/ota_result" -#SYSTEM_UPDATER_CORE_LIB_PATH="/usr/share/kylin-system-updater/SystemUpdater/Core" -# sys.path.append(SYSTEM_UPDATER_CORE_LIB_PATH) -# from OriginFilter import UnattendUpgradeFilter -KYLIN_VERSION_FILE = "/etc/kylin-version/kylin-system-version.conf" -CONFIG_FILE_ROOT_PATH="/var/lib/unattended-upgrades" -UNATTENDED_UPGRADE_CONFIG_FILE_PATH="/var/lib/unattended-upgrades/unattended-upgrade.conf" -UNATTENDED_UPGRADE_POLICY_FILE_PATH="/var/lib/unattended-upgrades/unattended-upgrades-policy.conf" -WHITE_LIST_FILE_PATH="/var/lib/kylin-system-updater/system-updater.conf" -TIMESTAMP_PATH="/var/lib/kylin-software-properties/template/kylin-source-status" -CONTROL_PANEL_LOCK_FILE = "/tmp/auto-upgrade/ukui-control-center.lock" -# the reboot required flag file used by packages -REBOOT_REQUIRED_FILE = "/var/run/reboot-required" -KEPT_PACKAGES_FILE = "var/lib/unattended-upgrades/kept-back" -MAIL_BINARY = "/usr/bin/mail" -SENDMAIL_BINARY = "/usr/sbin/sendmail" -USERS = "/usr/bin/users" -# no py3 lsb_release in debian :/ -DISTRO_CODENAME = subprocess.check_output( - ["lsb_release", "-c", "-s"], universal_newlines=True).strip() # type: str -DISTRO_DESC = subprocess.check_output( - ["lsb_release", "-d", "-s"], universal_newlines=True).strip() # type: str -DISTRO_ID = subprocess.check_output( - ["lsb_release", "-i", "-s"], universal_newlines=True).strip() # type: str - -# Number of days before release of devel where we enable unattended -# upgrades. -DEVEL_UNTIL_RELEASE = datetime.timedelta(days=21) - -# progress information is written here -PROGRESS_LOG = "/var/run/unattended-upgrades.progress" -PID_FILE = "/var/run/unattended-upgrades.pid" -LOCK_FILE = "/var/run/kylin-unattended-upgrade.lock" -NOTIFICATION_PIPE = '/tmp/notification.pipe' -TIME_STAMP = "/var/lib/unattended-upgrades/unattended-upgrades-timestamp" -UNATTENDED_UPGRADE_PKG_LIST_FILE_PATH="/var/lib/kylin-system-updater/json/auto-upgrade-list.json" -OTA_PKGS_TO_INSTALL_LIST="/var/lib/unattended-upgrades/ota_pkgs_to_install_list" -# 禁止关机锁文件路径 -FILELOCK_PATH = "/tmp/lock/" -SHUTDOWN_BLOCK_FILELOCK = "kylin-update.lock" -pidfile = None -# set from the sigint signal handler -SIGNAL_STOP_REQUEST = False - -def kysec_pre_upgrade(): - if os.path.exists("/usr/share/kysec-maintain/sys-upgrade-pre.sh"): - logging.debug("kysec pre-upgrade settings...") - subprocess.run(["/bin/sh","/usr/share/kysec-maintain/sys-upgrade-pre.sh"]) - -def kysec_post_upgrade(): - if os.path.exists("/usr/share/kysec-maintain/sys-upgrade-post.sh"): - logging.debug("kysec post-upgrade settings...") - subprocess.run(["/bin/sh","/usr/share/kysec-maintain/sys-upgrade-post.sh"]) - -def reload_options_config(): - #添加默认保留旧配置 - apt_pkg.config["DPkg::Options::"] = "--force-confold" - options_new = list(set(apt_pkg.config.value_list("DPkg::Options"))) - for option in ("--force-confnew","--force-confdef"): - if option in options_new: - options_new.remove(option) - #清除所有配置重新加载 - apt_pkg.config.clear("DPkg::Options") - for option in options_new: - apt_pkg.config["DPkg::Options::"] = option - #去除安装推荐和建议的软件包 - if apt_pkg.config.find_b("APT::Install-Recommends",False) == True: - apt_pkg.config.clear("APT::Install-Recommends") - if apt_pkg.config.find_b("APT::Install-Suggests",False) == True: - apt_pkg.config.clear("APT::Install-Suggests") - if apt_pkg.config.find("Dir::Etc::sourceparts","")!="": - apt_pkg.config["Dir::Etc::sourceparts"]="" - apt_pkg.init_system() - -def get_default_version(): - version = "" - data = {'version':""} - INPUT_CONFIG_PATH = '/usr/share/kylin-update-desktop-config/config/kylin-update-desktop-system.json' - if os.path.isfile(INPUT_CONFIG_PATH): # 存在 - # 读取JSON文件 - with open(INPUT_CONFIG_PATH, "r") as f: - try : - data = json.load(f) - version = data['version'] - except json.JSONDecodeError as e: - logging.error(str(e)) - return version - -def ReadOsRelease(file): - osreleasedict = {} - try: - with open(file) as f: - lines = f.readlines() - for line in lines: - ls = line.strip().split('=',1) - osreleasedict.update({ls[0]:ls[1].strip('"')}) - except Exception as e: - pass - if 'PROJECT_CODENAME' not in osreleasedict.keys(): - osreleasedict.update({'PROJECT_CODENAME':''}) - if 'SUB_PROJECT_CODENAME' not in osreleasedict.keys(): - osreleasedict.update({'SUB_PROJECT_CODENAME':''}) - return osreleasedict - ''' -#安装时禁止关机 进行加锁 -def LockedPreventShutdown(): - global pidfile - - #不为空是表示以及被锁 - if pidfile != None: - logging.error("pidfile file disc not is None,Has been locked...") - return False - - if not os.path.exists(FILELOCK_PATH): - #不存在创建 - logging.info("File(%s) is not exists and will be create",FILELOCK_PATH) - os.makedirs(FILELOCK_PATH) - else: - #当目录存在时进行删除 不删除进行创建文件的话会报错 - # file cannot be locked.[Errno 11] Resource temporarily unavailable - # 资源被占用报错 - shutil.rmtree(FILELOCK_PATH) - logging.info("File(%s) is exists and will be delete and create",FILELOCK_PATH) - os.makedirs(FILELOCK_PATH) - - try: - pidfile = open(os.path.join(FILELOCK_PATH, SHUTDOWN_BLOCK_FILELOCK), "w+") - fcntl.flock(pidfile, fcntl.LOCK_EX | fcntl.LOCK_NB) - logging.info("Shutdown Has been locked...") - return True - except Exception as e: - logging.error("file cannot be locked." + str(e)) - pidfile.close() - pidfile = None - return False - -#解锁禁止关机 -def unLockedEnableShutdown(): - global pidfile - #未加锁退出 - if not pidfile: - logging.info("Not locked and Quitting ...") - return False - try: - fcntl.flock(pidfile, fcntl.LOCK_UN) - logging.info("Shutdown Has been unlocked...") - pidfile.close() - pidfile = None - - # Fix 修复权限问题 当普通用户无法使用 所以直接删除目录 - if os.path.exists(FILELOCK_PATH): - shutil.rmtree(FILELOCK_PATH) - logging.info('Emptying the lockPath(%s) is complete...',FILELOCK_PATH) - else: - logging.info("Emptying the lockPath(%s) is Failed...",FILELOCK_PATH) - - return True - except Exception as e: - logging.error("unlock failed." + str(e)) - pidfile.close() - pidfile = None - return False +class FetchProgress(AcquireProgress): + def __init__(self) -> None: + super().__init__() + + def fetch(self, item: apt_pkg.AcquireItemDesc) -> None: + print("%s [%d%%]"%(item.description,self.current_bytes*100/self.total_bytes)) + return super().fetch(item) + + def fail(self, item: apt_pkg.AcquireItemDesc) -> None: + print("package fetch failed:%s"%item.description) + return super().fail(item) + + def ims_hit(self, item: apt_pkg.AcquireItemDesc) -> None: + return super().ims_hit(item) + + def media_change(self, media: str, drive: str) -> bool: + return super().media_change(media, drive) + + def pulse(self, owner: apt_pkg.Acquire) -> bool: + return super().pulse(owner) + + def start(self) -> None: + print("download start") + return super().start() + + def stop(self) -> None: + print("download finished") + return super().stop() ''' - -def is_dpkg_journal_dirty(): - # type: () -> bool - """ - Return True if the dpkg journal is dirty - (similar to debSystem::CheckUpdates) - """ - logging.debug("checking whether dpkg journal is dirty") - d = os.path.join("/var/lib/dpkg/", - #os.path.dirname(apt_pkg.config.find_file("Dir::State::status")), - "updates") - for f in os.listdir(d): - if re.match("[0-9]+", f) or re.match("tmp.i",f): - return True - return False - -def get_abnormally_installed_pkg_count(): - output = subprocess.check_output('dpkg -l|grep ^i[^i]|wc -l',shell=True) - return output.decode().strip() - -def get_white_list_with_version(srclist,list,namelist): - for name_with_version in srclist: - nvlist = name_with_version.strip().split('=',1) - if nvlist[0] != '' and nvlist[1] != '': - list.append(nvlist) - namelist.append(nvlist[0]) - -#global timeStamp - -def get_timestamp(): - global timeStamp - config=configparser.ConfigParser(allow_no_value=True) - config.optionxform = str - config.read(TIMESTAMP_PATH) - time_value=time.localtime(int(config.get("Server","UpdateTime"))) - logging.debug(("获取软件源时间戳:%s"),time_value) - timeStamp="自动备份:"+time.strftime("%Y-%m-%d %H:%M:%S",time_value)+" "+config.get("Server","UpdateTime") - return timeStamp - - -def WriteValueToFile(file,section,option,value): - config=configparser.ConfigParser(allow_no_value=True) - config.optionxform = str - config.add_section(section) - config.set(section,option,value) - config.write(open(file,"w")) - - -def signal_handler_int(signal,frame): - # type: (int, object) -> None - logging.warning("SIGINT received, will stop") - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - os._exit(1) - -def signal_handler_usr1(signal,frame): - # type: (int, object) -> None - logging.warning("SIGUSR1 received, will stop") - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - os._exit(1) - -def signal_handler_term(signal,frame): - # type: (int, object) -> None - logging.warning("SIGTERM received, will stop") - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - os._exit(1) - -# messages to be logged only once -logged_msgs = set() # type: AbstractSet[str] - -NEVER_PIN = -32768 - - -class InhibitShutdownLock(): - - def __init__(self): - self.inhibit_lock = None - - #安装时禁止关机 进行加锁 - def lock(self, caller='Kylin System Updater'): - """ - Send a dbus signal to logind to not suspend the system, it will be - released when the return value drops out of scope - """ - try: - from gi.repository import Gio, GLib - connection = Gio.bus_get_sync(Gio.BusType.SYSTEM) - - var, fdlist = connection.call_with_unix_fd_list_sync( - 'org.freedesktop.login1', '/org/freedesktop/login1', - 'org.freedesktop.login1.Manager', 'Inhibit', - GLib.Variant('(ssss)', - ('shutdown', - caller, 'Installing Packages', - 'block')), - None, 0, -1, None, None) - self.inhibit_lock = Gio.UnixInputStream(fd=fdlist.steal_fds()[var[0]]) - logging.info("Shutdown Has been locked...") - except Exception as e: - logging.error(e) - - #解锁禁止关机 - def unlock(self): - try: - if self.inhibit_lock != None: - self.inhibit_lock.close() - self.inhibit_lock == None - logging.info("Shutdown Has been unlocked...") - else: - logging.info("Not locked and Quitting ...") - except Exception as e: - logging.error("unlock failed." + str(e)) - -class LoggingDateTime: - """The date/time representation for the dpkg log file timestamps""" - LOG_DATE_TIME_FMT = "%Y-%m-%d %H:%M:%S" - - @classmethod - def as_string(cls): - # type: () -> str - """Return the current date and time as LOG_DATE_TIME_FMT string""" - return datetime.datetime.now().strftime(cls.LOG_DATE_TIME_FMT) - - @classmethod - def from_string(cls, logstr): - # type: (str) -> datetime.datetime - """Take a LOG_DATE_TIME_FMT string and return datetime object""" - return datetime.datetime.strptime(logstr, cls.LOG_DATE_TIME_FMT) - - -class UnknownMatcherError(ValueError): - pass - - -class NoAllowedOriginError(ValueError): - pass - -class FILE_LOCK(object): - def __init__(self,name): - self.fobj = open(name,'w') - self.fd = self.fobj.fileno() - - def get_lock(self): - try: - fcntl.flock(self.fd,fcntl.LOCK_EX|fcntl.LOCK_NB) - return True - except: - return False - - def unlock(self): - self.fobj.close() - -class ConfigFileManager: - def __init__(self,rootdir): - self.filenamelist = [] - if not os.path.exists(rootdir): - os.mkdirs(rootdir) - self.rootdir = rootdir - - def SetRootDir(self,rootdir): - if not os.path.exists(rootdir): - os.mkdirs(rootdir) - self.rootdir=rootdir - - def AddFileName(self,filename): - self.filenamelist.append(filename) - file = os.path.join(self.rootdir,filename) - if not os.path.exists(file): - with open(file,'w+') as f: - f.close() - - def RemoveFileName(self,filename): - file = os.path.join(self.rootdir,filename) - if os.path.exists(file): - os.remove(file) - if filename in self.filenamelist: - self.filenamelist.remove(filename) - - def CheckFileNameExistence(self,filename): - if filename in self.filenamelist: - return True - else: - return False - - def WriteListToFile(self,list,filename): - file = os.path.join(self.rootdir,filename) - if os.path.exists(file): - with open(file,'w+') as f: - f.write(" ".join(list)) - return 0 - else: - return 1 - - def ReadListFromFile(self,file,section,option): - config = configparser.ConfigParser(allow_no_value=True) - if os.path.exists(file): - config.read(file) - try: - str=config[section][option] - list=str.strip().split(",") - return list - except Exception as e: - logging.error(e) - return False - else: - logging.error("no config file") - return True - -def not_empty(s): - return s and s.strip() - -class OriginProperty(): - - def __init__(self): - # 包含了本地所有源 http & ftp - self.local_sourcelist = {"http":[],"ftp":[]} - # 经过解析后的本地源,获取所有的分发属性 - self.local_origin = {"http":[],"ftp":[]} - # 允许的源列表 - self.allow_sources = [] - # 允许的源+属性 - self.allow_origin = {"http":[],"ftp":[]} - # 加载本地所有源 - self.init_local_origin() - # 进行属性解析 - self.analytic_properties(self.local_sourcelist) - - def init_local_origin(self): - http_origin = {} - ftp_orgin = {} - #apt policy - sh_retval = os.popen("apt-cache policy").read().split("\n") - # policy = [ rv for rv in sh_retval if "http" in rv or "ftp" in rv or "release" in rv or "origin" in rv] - for rv in sh_retval: - if "http" in rv: - http_origin['sources'] = rv - http_origin['release'] = sh_retval[sh_retval.index(rv) + RELEASEOFFSET] - http_origin['origin'] = sh_retval[sh_retval.index(rv) + ORIGINOFFSET] - self.local_sourcelist['http'].append(http_origin.copy()) - elif "ftp" in rv: - ftp_orgin['sources'] = rv - ftp_orgin['release'] = sh_retval[sh_retval.index(rv) + RELEASEOFFSET] - ftp_orgin['origin'] = sh_retval[sh_retval.index(rv) + ORIGINOFFSET] - self.local_sourcelist['ftp'].append(ftp_orgin.copy()) - - def merge_origin(self, source_type, source_origin): - is_append = True - if source_type == HTTPTYPE: - if self.local_origin['http']: - for lo in self.local_origin['http']: - if lo['origin_source'] == source_origin['origin_source'] and lo['dist'] == source_origin['dist']: - lo['component'] = list(set(lo['component']).union(set(source_origin['component']))) - is_append = False - if is_append: - self.local_origin['http'].append(source_origin.copy()) - else: - self.local_origin['http'].append(source_origin.copy()) - elif source_type == FTPTYPE: - if self.local_origin['ftp']: - for lo in self.local_origin['ftp']: - if lo['origin_source'] == source_origin['origin_source'] and lo['dist'] == source_origin['dist']: - lo['component'] = list(set(lo['component']).union(set(source_origin['component']))) - is_append = False - if is_append: - self.local_origin['ftp'].append(source_origin.copy()) - else: - self.local_origin['ftp'].append(source_origin.copy()) - - def analytic_properties(self, local_sourcelist): - http_origin = {"component":[],"release":{}} - ftp_orgin = {"component":[],"release":{}} - dist_list = [] - # 经过解析后的本地源,获取所有的分发属性 - for ls in local_sourcelist['http']: - for item in filter(not_empty, ls['sources'].split(' ')): - if item.isdigit(): - http_origin['policy_priority'] = item - elif "http" in item: - http_origin['origin_source'] = item - elif "/" in item: - dist_list = item.split("/") - dist_list.pop() - http_origin['dist'] = "/".join(dist_list) - http_origin['component'].append(item.split("/")[1]) - elif item not in ARCHITECTUREMAP and item != "Packages": - http_origin['component'].append(item) - release_list = ls['release'].split(',') - release_list = [ rl.strip() for rl in release_list ] - if "release" in release_list[0]: - release_list[0] = release_list[0].lstrip("release").strip() - for rl in release_list: - if "=" in rl: - self.generate_dict(http_origin['release'], rl) - for item in filter(not_empty, ls['origin'].split(' ')): - if "origin" not in ls['origin']: - break - elif "origin" != item: - http_origin['origin'] = item - self.merge_origin(HTTPTYPE, http_origin) - http_origin = {"component":[],"release":{}} - - for ls in local_sourcelist['ftp']: - for item in filter(not_empty, ls['sources'].split(' ')): - if item.isdigit(): - ftp_orgin['policy_priority'] = item - elif "ftp" in item: - ftp_orgin['origin_source'] = item - elif "/" in item: - ftp_orgin['dist'] = item.split("/")[0] - ftp_orgin['component'].append(item.split("/")[1]) - elif item not in ARCHITECTUREMAP and item != "Packages": - ftp_orgin['component'].append(item) - release_list = ls['release'].split(',') - if "release " in release_list[0]: - release_list[0] = release_list[0].lstrip("release ") - for rl in release_list: - if "=" in rl: - self.generate_dict(ftp_orgin['release'], rl) - for item in filter(not_empty, ls['origin'].split(' ')): - if "origin" not in ls['origin']: - break - elif "origin" != item: - ftp_orgin['origin'] = item - self.merge_origin(FTPTYPE, ftp_orgin) - ftp_orgin = {"component":[],"release":{}} - - def generate_dict(self, dict, item): - item = item.strip() - if item == "": - logging.warning("empty match string matches nothing") - return False - (what, value) = [ s for s in item.split("=")] - if what in ('o', 'origin'): - dict['origin'] = value - elif what in ("l", "label"): - dict['label'] = value - elif what in ("a", "suite", "archive"): - dict['archive'] = value - elif what in ("c", "component"): - dict['component'] = value - elif what in ("site",): - dict['site'] = value - elif what in ("n", "codename",): - dict['codename'] = value - else: - dict[what] = value - # raise UnknownMatcherError( - # "Unknown whitelist entry for matcher %s (value %s)" % ( - # what, value)) - - def get_allowed_sources(self): - # 源地址,在本地源列表中查找. 源服务器下发source.list为允许的源, 本模块屏蔽了sources.list.d下的源 - # 获取允许的源 - try: - old_sources_list = apt_pkg.config.find("Dir::Etc::sourcelist") - old_sources_list_d = apt_pkg.config.find("Dir::Etc::sourceparts") - old_cleanup = apt_pkg.config.find("APT::List-Cleanup") - apt_pkg.config.set("Dir::Etc::sourcelist", - os.path.abspath(SOURCESLIST)) - apt_pkg.config.set("Dir::Etc::sourceparts", "xxx") - apt_pkg.config.set("APT::List-Cleanup", "0") - slist = apt_pkg.SourceList() - slist.read_main_list() - self.allow_sources = slist.list - except Exception as e: - logging.error(str(e)) - finally: - apt_pkg.config.set("Dir::Etc::sourcelist", - old_sources_list) - apt_pkg.config.set("Dir::Etc::sourceparts", - old_sources_list_d) - apt_pkg.config.set("APT::List-Cleanup", - old_cleanup) - - def get_allowed_origin(self): - # 获取允许的源 - # 生成源与属性 - self.local_origin - self.allow_sources - self.allow_origin - try: - for item in self.allow_sources: - for lo in self.local_origin['http']: - if item.uri.strip('/') == lo['origin_source'].strip('/') and item.dist == lo['dist']: - self.allow_origin['http'].append(lo) - for lo in self.local_origin['ftp']: - if item.uri.strip('/') == lo['origin_source'].strip('/') and item.dist == lo['dist']: - self.allow_origin['ftp'].append(lo) - except Exception as e: - logging.error(str(e)) - -def deleteDuplicatedElementFromList(list): - resultList = [] - for item in list: - if not item in resultList: - resultList.append(item) - return resultList - -class UnattendUpgradeFilter(): - def __init__(self) -> None: - pass - - def GetAllowOrigins(self): - # 获取源属性 - self.origin_property = OriginProperty() - self.origin_property.get_allowed_sources() - self.origin_property.get_allowed_origin() - - self.allowed_origins = get_allowed_origins(self.origin_property.allow_origin) - - self.allowed_origins = deleteDuplicatedElementFromList(self.allowed_origins) - # logging.info(_("Allowed origins: %s"), - # self.allowed_origins) - return self.allowed_origins - -class AcquireStatistics: - def __init__(self,fetcher) -> None: - self.fetcher = fetcher - self.local_pkg_amount = 0 - self.remote_pkg_amount = 0 - self.incomplete_pkg_amount = 0 - self.local_pkg_paths = [] - - def GetAquireStatisticsOfPkgs(self): - for item in self.fetcher.items: - self.local_pkg_paths.append(item.destfile) - if not item.complete: - self.incomplete_pkg_amount+=1 - if item.local: - self.local_pkg_amount+=1 - else: - self.remote_pkg_amount+=1 - - - def ResetFetcher(self,fetcher): - self.fetcher=fetcher - self.local_pkg_paths=[] - self.local_pkg_amount = 0 - self.remote_pkg_amount = 0 - self.incomplete_pkg_amount = 0 - -class KylinSystemUpdater: +class SystemUpdater: def __init__(self) -> None: + self.update_detect_status = False + self.update_detect_event = Event() + self.update_list = [] + self.resolve_depend_status = False + self.resolve_depend_status_event = Event() + self.remove_pkgs = [] + self.install_finish_status = False + self.install_finish_status_event = Event() + self.install_finish_group = [] DBusGMainLoop(set_as_default=True) self.loop = GLib.MainLoop() self.system_bus = dbus.SystemBus() - self.update_proxy = self.system_bus.get_object('com.kylin.systemupgrade','/com/kylin/systemupgrade') - self.data_collect_proxy = self.system_bus.get_object('com.kylin.systemupgrade','/com/kylin/systemupgrade/utils') - self.data_collect_interface = dbus.Interface(self.data_collect_proxy,dbus_interface='com.kylin.systemupgrade.interface') + self.update_proxy = self.system_bus.get_object('com.kylin.systemupgrade','/com/kylin/systemupgrade',follow_name_owner_changes=True) self.update_interface = dbus.Interface(self.update_proxy,dbus_interface='com.kylin.systemupgrade.interface') - self.success = False - self.whitelist_with_candidate_version = [] - self.update_group = [] - # self.group_rec=[] - # self.single_rec=[] - self.errdict = {} + self.update_proxy.connect_to_signal('UpdateDetectFinished',self.update_detect_finished_handler) + self.update_proxy.connect_to_signal('UpdateFixBrokenStatus',self.update_fix_broken_status) + self.update_proxy.connect_to_signal('UpdateDependResloveStatus',self.update_depend_resolve_status) + self.update_proxy.connect_to_signal('UpdateDloadAndInstStaChanged',self.update_download_install_status) + self.update_proxy.connect_to_signal('UpdateInstallFinished',self.update_install_finished) + + def update_detect_finished_handler(self,success,updatelist,error_status,error_cause): + print(_("update detect finished:sucess:%s,updatelist:%s,error_status:%s,error_cause:%s")\ + %(success,",".join(updatelist),error_status,error_cause)) + self.update_detect_status = success + self.update_list = [] + if success: + try: + for update_group in updatelist: + json_file_path = ("/var/lib/kylin-system-updater/json/%s.json"%(update_group)) + if os.path.exists(json_file_path): + with open(json_file_path,'r') as f: + data = json.load(f) + for key in data['upgrade_list'].keys(): + if key in ["total_download_size","total_install_size"]: + pass + else: + self.update_list.append(key) + for key in data['install_list'].keys(): + if key in ["total_download_size","total_install_size"]: + pass + else: + self.update_list.append(key) + except Exception as e: + print(e) + self.update_detect_event.set() + self.QuitMainLoop() - - def AddPackageInstallErrorRecord(self,pkg,errmsg): - self.errdict.update({pkg:errmsg}) - - def DumpInstallErrorRecord(self): - errlist = [] - for key in self.errdict.keys(): - errlist.append("%s,%s"%(key,self.errdict[key])) - return errlist - - def DataBackendCollect(self,updateinfo,json_file): - self.data_collect_interface.DataBackendCollect(updateinfo,json_file) - - def InsertUpgradeHistory(self,history): - return self.data_collect_interface.InsertUpgradeHistory(history) - - def GetConfigValue(self,section,value): - return self.update_interface.GetConfigValue(section,value) - - def SetConfigValue(self,section,option,value): - return self.update_interface.SetConfigValue(section,option,value) - - def UpdateDetect(self): - ret = self.update_interface.UpdateDetect() - return ret - - def GetUnattendedUpgradeValue(self): - ret = self.update_interface.UnattendedUpgradeValue('get','') - return ret[0] - - def CheckRebootRequired(self,msg): - ret = self.update_interface.CheckRebootRequired(msg) - - def GetDatabaseInfo(self,section,value): - return self.update_interface.GetSetDatabaseInfo(1,section,value) - - def ConnectToSignals(self): - def update_detect_finished_handler(success,updatelist,error_status,error_cause): - if success: - logging.info("update detect success,quiting main loop:%s,%s"%(success,",".join(updatelist))) - self.update_group = updatelist - try: - for update_group in self.update_group: - json_file_path = ("/var/lib/kylin-system-updater/json/%s.json"%(update_group)) - if os.path.exists(json_file_path): - with open(json_file_path,'r') as f: - data = json.load(f) - # package_name = data['package'] - # upgrade_list = [] - # install_list = [] - ''' - gp = PackageGroup(package_name) - for key in data['upgrade_list'].keys(): - PackageGroup.AddPackageToUpgradeList(Package(key,data['upgrade_list'][key]['new_version'])) - # upgrade_list.append((key,data['upgrade_list'][key]['new_version'])) - for key in data['install_list'].keys(): - PackageGroup.AddPackageToInstallList(Package(key,data['install_list'][key]['new_version'])) - # install_list.append((key,data['install_list'][key]['new_version'])) - self.group_list.append(gp) - ''' - for key in data['upgrade_list'].keys(): - if key in ["total_download_size","total_install_size"]: - pass - else: - self.whitelist_with_candidate_version.append((key,data['upgrade_list'][key]['new_version'])) - for key in data['install_list'].keys(): - if key in ["total_download_size","total_install_size"]: - pass - else: - self.whitelist_with_candidate_version.append((key,data['install_list'][key]['new_version'])) - ''' - if os.path.exists(UNATTENDED_UPGRADE_PKG_LIST_FILE_PATH): - with open(UNATTENDED_UPGRADE_PKG_LIST_FILE_PATH, "r") as f: - row_data = json.load(f) - for key in row_data['upgrade_list'].keys(): - self.whitelist_with_candidate_version.append((key,row_data['upgrade_list'][key]['new_version'])) - for key in row_data['install_list'].keys(): - self.whitelist_with_candidate_version.append((key,row_data['install_list'][key]['new_version'])) - for key in row_data['group_json'].keys(): - self.group_rec.append((key,row_data['group_json'][key]['new_version'],row_data['group_json'][key]['changelog'])) - for key in row_data['single_json'].keys(): - self.single_rec.append((key,row_data['single_json'][key]['new_version'],row_data['single_json'][key]['changelog'])) - ''' - except Exception as e: - logging.error(e) - self.loop.quit() - else: - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - logging.error("update detect failed:%s,%s"%(error_status,error_cause)) - os._exit(0) - return success - - self.update_proxy.connect_to_signal('UpdateDetectFinished',update_detect_finished_handler, - dbus_interface='com.kylin.systemupgrade.interface') - return - - - def RunMainloop(self): - logging.info("update manager:running mainloop") - self.loop.run() - - def QuitMainloop(self): - logging.info("update manager:quiting mainloop") - self.loop.quit() - - -class LoginManager: - def __init__(self) -> None: - DBusGMainLoop(set_as_default=True) - self.loop = GLib.MainLoop() - self.system_bus = dbus.SystemBus() - self.login_proxy = self.system_bus.get_object('org.freedesktop.login1', '/org/freedesktop/login1') - self.login_interface = dbus.Interface(self.login_proxy,dbus_interface='org.freedesktop.login1.Manager') - - def SetExtraInhibitShutdownDelaySec(self,time): - try: - self.login_interface.SetExtraInhibitShutdownDelaySec(time) - except Exception as e: - logging.error(e) - -class KylinBackupManager: - def __init__(self) -> None: - DBusGMainLoop(set_as_default=True) - self.loop = GLib.MainLoop() - self.system_bus = dbus.SystemBus() - self.backup_proxy = self.system_bus.get_object('com.kylin.backup','/') - self.backup_interface = dbus.Interface(self.backup_proxy,dbus_interface='com.kylin.backup.manager') - self.success = False - - def mount_backup_partition(self): - return self.backup_interface.Mount_backup_partition() - - def get_backup_state(self): - return self.backup_interface.getBackupState() - - def get_backup_comment_for_systemupdate(self): - return self.backup_interface.getBackupCommentForSystemUpdate() - - def auto_backup_for_system_update_noreturn(self,timeStamp,create_note,inc_note,userName,uid): - self.backup_interface.autoBackUpForSystemUpdate_noreturn(timeStamp,create_note,inc_note,userName,uid) - return - - def ConnectToSignals(self): - def backup_start_handler(result): - logging.debug("backup start result:%d"%result) - if result == 31 or result == 30: - logging.debug("backup start success") - else: - logging.error("backup start failed") - UpdateInfos = {} - UpdateInfos.update({"packageName":"kylin-unattended-upgrade"}) - UpdateInfos.update({"source":"kylin unattended upgrade"}) - UpdateInfos.update({"status":0}) - UpdateInfos.update({"errorCode":"backup start failed"}) - json_file = json.dumps(UpdateInfos.copy()) - UpdateInfos.clear() - kylin_system_updater.DataBackendCollect("UpdateInfos",json_file) - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - os._exit(1) + def update_fix_broken_status(self,resolver_status,remove_status,remove_pkgs,pkg_raw_description,delete_desc,error_string,error_desc): + print(_("update fix broken status:resolver_status:%s,remove_status:%s,error_string:%s,error_desc:%s")%(resolver_status,remove_status,error_string,error_desc)) + print(remove_pkgs,pkg_raw_description,delete_desc) + self.update_detect_status = False + self.update_list = [] + self.update_detect_event.set() + self.QuitMainLoop() - def backup_result_handler(result): - if result: - logging.debug("backup success,quiting main loop") - self.loop.quit() - else: - logging.error("backup failed") - UpdateInfos = {} - UpdateInfos.update({"packageName":"kylin-unattended-upgrade"}) - UpdateInfos.update({"source":"kylin unattended upgrade"}) - UpdateInfos.update({"status":0}) - UpdateInfos.update({"errorCode":"backup failed"}) - json_file = json.dumps(UpdateInfos.copy()) - UpdateInfos.clear() - kylin_system_updater.DataBackendCollect("UpdateInfos",json_file) - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - subprocess.Popen('dbus-send --system --type=signal / com.kylin.install.notification.BackupFailure',shell=True) - os._exit(1) - - def send_rate_handler(sta,pro): - logging.debug(("receive backup_rate_signal_handler 状态:%d 进度:%d"),sta,pro) - if pro == 100: - logging.debug("backup finished, quiting mainloop") - self.loop.quit() + def update_depend_resolve_status(self,resolver_status,remove_status,remove_pkgs,pkg_raw_description,delete_description,error_string,error_desc): + print(_("update depend resove status:%s,remove status:%s,remove pkgs:%s,pkg raw description:%s,delete_descrition:%s,error string:%s,error desc:%s")\ + %(resolver_status,remove_status,",".join(remove_pkgs),",".join(pkg_raw_description),",".join(delete_description),error_string,error_desc)) + self.resolve_depend_status = resolver_status + self.remove_pkgs = remove_pkgs + self.resolve_depend_status_event.set() + self.QuitMainLoop() - self.backup_proxy.connect_to_signal('sendStartBackupResult',backup_start_handler, - dbus_interface='com.kylin.backup.manager') - self.backup_proxy.connect_to_signal('sendBackupResult',backup_result_handler, - dbus_interface='com.kylin.backup.manager') - self.backup_proxy.connect_to_signal('sendRate',send_rate_handler, - dbus_interface='com.kylin.backup.manager') - return - - def RunMainloop(self): - logging.info("backup manager:running mainloop") - self.loop.run() - - def QuitMainloop(self): - logging.info("backup manager:quiting mainloop") - self.loop.quit() - -def ReadValueFromFile(file,section,option): - config=configparser.ConfigParser(allow_no_value=True) - config.optionxform = str - try: - config.read(file) - value = config[section][option] - except Exception as e: - return '' - return value - -def Backup(): - # do backup - kylin_backup_manager = KylinBackupManager() - backup_partition_status = kylin_backup_manager.mount_backup_partition() - logging.info("backup partition status:%d"%backup_partition_status) - if backup_partition_status not in [0,5]: - logging.error("backup partition error:%d"%backup_partition_status) - # return UnattendedUpgradesResult(False,"backup partition error") - return False - status_code,result = kylin_backup_manager.get_backup_state() - - if result == 0 and status_code == 99: - pass - else: - logging.error("backup state error:",status_code,result) - # return UnattendedUpgradesResult(False,"backup state error") - return False - #node_name,node_status = kylin_backup_manager.get_backup_comment_for_systemupdate() - # ts = get_timestamp() - ts = "自动备份" - kylin_backup_manager.ConnectToSignals() - create_note = "系统升级新建备份" - inc_note="系统升级增量备份" - userName="root" - uid=os.getuid() - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","backup") - kylin_backup_manager.auto_backup_for_system_update_noreturn(ts,create_note,inc_note,userName,uid) - kylin_backup_manager.RunMainloop() - return True - ''' - if node_name != timeStamp: - logging.info("need backup") - #do actual backup - kylin_backup_manager.ConnectToSignals() - create_note = "系统升级新建备份" - inc_note="系统升级增量备份" - userName="root" - uid=os.getuid() - kylin_backup_manager.auto_backup_for_system_update_noreturn(timeStamp,create_note,inc_note,userName,uid) - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","backup") - kylin_backup_manager.RunMainloop() - ''' - -PkgPin = namedtuple('PkgPin', ['pkg', 'priority']) -PkgFilePin = namedtuple('PkgFilePin', ['id', 'priority']) - - -class UnattendedUpgradesCache(apt.Cache): - - def __init__(self, rootdir, whitelist_with_version,blacklist): + def update_download_install_status(self,group,progress,status,details): + print(_("%s update progress:%d,status:%s,details:%s")%(",".join(group),progress,status,details)) - # self._cached_candidate_pkgnames = set() # type: Set[str] - unattended_upgrade_filter = UnattendUpgradeFilter() - self.allowed_origins = unattended_upgrade_filter.GetAllowOrigins() - logging.info(_("Allowed origins are: %s"), - ", ".join(self.allowed_origins)) - - self.blacklist = blacklist - ''' - apt_pkg.config.value_list( - "Unattended-Upgrade::Package-Blacklist") - ''' - # logging.info(_("Initial blacklist: %s"), " ".join(self.blacklist)) - # logging.info("pkg list with version:",whitelist_with_version) - self.whitelist_with_version = whitelist_with_version - self.whitelist = [] - self.get_white_list() - # self.whitelist_with_version = [] - # self.get_white_list_with_version() - # self.whitelist = apt_pkg.config.value_list( - # "Unattended-Upgrade::Package-Whitelist") - self.strict_whitelist = False - ''' - apt_pkg.config.find_b( - "Unattended-Upgrade::Package-Whitelist-Strict", False) - ''' - # logging.info(_("Initial whitelist (%s): %s"), - # "strict" if self.strict_whitelist else "not strict", - # " ".join(self.whitelist)) - apt.Cache.__init__(self, rootdir=rootdir) - - # pre-heat lazy-loaded modules to avoid crash on python upgrade - # datetime.datetime.strptime("", "") - - # generate versioned_kernel_pkgs_regexp for later use - # self.versioned_kernel_pkgs_regexp = versioned_kernel_pkgs_regexp() - # self.running_kernel_pkgs_regexp = running_kernel_pkgs_regexp() - ''' - if self.versioned_kernel_pkgs_regexp: - logging.debug("Using %s regexp to find kernel packages", - self.versioned_kernel_pkgs_regexp.pattern) + def update_install_finished(self,success,group,error_string,error_desc): + print(_("update install finisih success:%s,group:%s,error string:%s,error desc:%s")\ + %(success,",".join(group),error_string,error_desc)) + self.install_finish_status = success + self.install_finish_group=group + self.install_finish_status_event.set() + self.QuitMainLoop() + + def download(self): + print(_("start download")) + self.UpdateDetect() + self.RunMainLoop() + print(_("update detect finish:%s,%s")%(self.update_detect_status,",".join(self.update_list))) + if self.update_detect_status and len(self.update_list)>0: + pass + # elif not self.update_detect_status and 'kylin-system-updater' in self.update_list: + # print(_("self update finished")) else: - logging.debug("APT::VersionedKernelPackages is not set") - if self.running_kernel_pkgs_regexp: - logging.debug("Using %s regexp to find running kernel packages", - self.running_kernel_pkgs_regexp.pattern) - ''' - def get_white_list(self): - for name_with_version in self.whitelist_with_version: - self.whitelist.append(name_with_version[0]) - - def find_better_version(self, pkg): - # type (apt.Package) -> apt.package.Version - if pkg.is_installed and pkg.versions[0] > pkg.installed: - logging.debug( - "Package %s has a higher version available, checking if it is " - "from an allowed origin and is not pinned down.", pkg.name) - for v in pkg.versions: - if pkg.installed < v \ - and pkg.installed.policy_priority <= v.policy_priority \ - and is_in_allowed_origin(v, self.allowed_origins): - return v - return None - - def find_kept_packages(self, dry_run): - # type: (bool) -> KeptPkgs - """ Find kept packages not collected already """ - - kept_packages = KeptPkgs(set) - if dry_run: - logging.info(_("The list of kept packages can't be calculated in " - "dry-run mode.")) - return kept_packages - for pkg in self: - better_version = self.find_better_version(pkg) - if better_version: - logging.info(self.kept_package_excuse(pkg._pkg, - self.blacklist, - self.whitelist, - self.strict_whitelist, - better_version)) - kept_packages.add(pkg, better_version, self) - return kept_packages - - def kept_package_excuse(self, pkg, # apt.Package - blacklist, # type: List[str] - whitelist, # type: List[str] - strict_whitelist, # type: bool - better_version # type: apt.package.Version - ): - # type: (...) -> str - """ Log the excuse the package is kept back for """ - if pkg.selected_state == apt_pkg.SELSTATE_HOLD: - return _("Package %s is marked to be held back.") % pkg.name - elif is_pkgname_in_blacklist(pkg.name, blacklist): - return _("Package %s is blacklisted.") % pkg.name - elif whitelist: - if strict_whitelist: - if not is_pkgname_in_whitelist(pkg.name, whitelist): - return (_( - "Package %s is not on the strict whitelist.") - % pkg.name) + print(_("no pkgs to download")) + return False + cache = apt.Cache() + for pkg in self.update_list: + try: + package = cache[pkg] + if not package.installed: + package.mark_install() else: - if not is_pkgname_in_whitelist(pkg.name, whitelist): - return (_( - "Package %s is not whitelisted and it is not a" - " dependency of a whitelisted package.") - % pkg.name) - elif not any([o.trusted for o in better_version.origins]): - return _("Package %s's origin is not trusted.") % pkg.name - return (_("Package %s is kept back because a related package" - " is kept back or due to local apt_preferences(5).") - % pkg.name) - - def pinning_from_regex_list(self, regexps, priority): - # type: (List[str], int) -> List[PkgPin] - """ Represent blacklist as Python regexps as list of pkg pinnings""" - - pins = [] # type: List[PkgPin] - for regex in regexps: - if python_regex_is_posix(regex): - pins.append(PkgPin('/^' + regex + '/', priority)) - else: - # Python regex is not also an equivalent POSIX regexp. - # This is expected to be rare. Go through all the package names - # and pin all the matching ones. - for pkg in self._cache.packages: - if re.match(regex, pkg.name): - pins.append(PkgPin(pkg.name, priority)) - return pins - - def pinning_from_config(self): - # type: () -> List[Union[PkgPin, PkgFilePin]] - """ Represent configuration as list of pinnings - - Assumes self.allowed_origins to be already set. - """ - - pins = [] # type: List[Union[PkgPin, PkgFilePin]] - - # mark not allowed origins with 'never' pin - for pkg_file in self._cache.file_list: # type: ignore - if not is_allowed_origin(pkg_file, self.allowed_origins): - # Set the magic 'never' pin on not allowed origins - logging.debug("Marking not allowed %s with %s pin", pkg_file, - NEVER_PIN) - pins.append(PkgFilePin(pkg_file.id, NEVER_PIN)) - # TODO(rbalint) pin not trusted origins with NEVER_PIN - elif self.strict_whitelist: - # set even allowed origins to -1 and set individual package - # priorities up later - pins.append(PkgFilePin(pkg_file.id, -1)) - - # mark blacklisted packages with 'never' pin - pins.extend(self.pinning_from_regex_list( # type: ignore - self.blacklist, NEVER_PIN)) - # set priority of whitelisted packages to high - pins.extend(self.pinning_from_regex_list( # type: ignore - self.whitelist, 900)) - if self.strict_whitelist: - policy = self._depcache.policy - # pin down already pinned packages which are not on the whitelist - # to not install locally pinned up packages accidentally - for pkg in self._cache.packages: - if pkg.has_versions: - pkg_ver = policy.get_candidate_ver(pkg) # type: ignore - if pkg_ver is not None \ - and policy.get_priority(pkg_ver) > -1: - # the pin is higher than set for allowed origins, thus - # there is extra pinning configuration - if not is_pkgname_in_whitelist(pkg.name, - self.whitelist): - pins.append(PkgPin(pkg.name, NEVER_PIN)) - - return pins - - def apply_pinning(self, pins): - # type: (List[Union[PkgPin, PkgFilePin]]) -> None - """ Apply the list of pins """ - - policy = self._depcache.policy - pkg_files = {f.id: f for f in self._cache.file_list} # type: ignore - for pin in pins: - logging.debug("Applying pinning: %s" % str(pin)) - if isinstance(pin, PkgPin): - policy.create_pin('Version', pin.pkg, '*', # type: ignore - pin.priority) - elif isinstance(pin, PkgFilePin): - logging.debug("Applying pin %s to package_file: %s" - % (pin.priority, str(pkg_files[pin.id]))) - policy.set_priority(pkg_files[pin.id], # type: ignore - pin.priority) - - def open(self, progress=None): - apt.Cache.open(self, progress) - # apply pinning generated from unattended-upgrades configuration - # self.apply_pinning(self.pinning_from_config()) - - - def adjust_candidate_with_version(self,pkg,version): - if pkg.candidate.version == version: - return True - for v in pkg.versions: - if v.version == version: - logging.debug("pkg %s adjusting candidate version: %s" %(pkg.name,v)) - if is_in_allowed_origin(v,self.allowed_origins): - pkg.candidate = v - return True - return False - - def adjust_candidate(self, pkg): - # type: (apt.Package) -> bool - """ Adjust origin and return True if adjustment took place - - This is needed when e.g. a package is available in - the security pocket but there is also a package in the - updates pocket with a higher version number - """ - try: - new_cand = ver_in_allowed_origin(pkg, self.allowed_origins) - # Only adjust to lower versions to avoid flipping back and forth - # and to avoid picking a newer version, not selected by apt. - # This helps avoiding upgrades to experimental's packages. - if pkg.candidate is not None: #and new_cand < pkg.candidate: - logging.debug("adjusting candidate version: %s" % new_cand) - pkg.candidate = new_cand - return True - else: + package.mark_upgrade() + except Exception as e: + print(e) return False - except NoAllowedOriginError: - return False - - def call_checked(self, function, pkg, **kwargs): - """ Call function and check if package is in the wanted state - """ + list = apt_pkg.SourceList() + list.read_main_list() + recs = cache._records + pm = apt_pkg.PackageManager(cache._depcache) + fetcher = apt_pkg.Acquire(AcquireProgress()) try: - function(pkg, **kwargs) - except SystemError as e: - logging.warning( - _("package %s upgradable but fails to " - "be marked for upgrade (%s)"), pkg.name, e) - self.clear() - return False - - return ((function == apt.package.Package.mark_upgrade - or function == apt.package.Package.mark_install) - and (pkg.marked_upgrade or pkg.marked_install)) - - def call_adjusted(self, function, pkg, **kwargs): - """Call function, but with adjusting - packages in changes to come from allowed origins - - Note that as a side effect more package's candidate can be - adjusted than only the one's in the final changes set. - """ - new_pkgs_to_adjust = [] # List[str] - - # if not is_pkg_change_allowed(pkg, self.blacklist, self.whitelist, - # self.strict_whitelist): - # return - - # if function == apt.package.Package.mark_upgrade \ - # and not pkg.is_upgradable: - # if not apt_pkg.config.find_b("Unattended-Upgrade::Allow-downgrade", - # False): - # return - # else: - # function = apt.package.Package.mark_install - - marking_succeeded = self.call_checked(function, pkg, **kwargs) - - if not marking_succeeded: - logging.error("%s mark failed"%pkg.name) - - return marking_succeeded - ''' - if (not marking_succeeded - or not check_changes_for_sanity(self, desired_pkg=pkg)) \ - and allow_marking_fallback(): - logging.debug("falling back to adjusting %s's dependencies" - % pkg.name) - self.clear() - # adjust candidates in advance if needed - for pkg_name in self._cached_candidate_pkgnames: - self.adjust_candidate(self[pkg_name]) - - self.adjust_candidate(pkg) - for dep in transitive_dependencies(pkg, self, level=1): - try: - self.adjust_candidate(self[dep]) - except KeyError: - pass - - self.call_checked(function, pkg, **kwargs) - - for marked_pkg in self.get_changes(): - if marked_pkg.name in self._cached_candidate_pkgnames: - continue - if not is_in_allowed_origin(marked_pkg.candidate, - self.allowed_origins): - try: - ver_in_allowed_origin(marked_pkg, - self.allowed_origins) - # important! this avoids downgrades below - if pkg.is_installed and not pkg.is_upgradable and \ - apt_pkg.config.find_b("Unattended-Upgrade::Allow-" - "downgrade", False): - continue - new_pkgs_to_adjust.append(marked_pkg) - except NoAllowedOriginError: - pass - - if new_pkgs_to_adjust: - new_pkg_adjusted = False - for pkg_to_adjust in new_pkgs_to_adjust: - if self.adjust_candidate(pkg_to_adjust): - self._cached_candidate_pkgnames.add(pkg_to_adjust.name) - new_pkg_adjusted = True - if new_pkg_adjusted: - self.call_adjusted(function, pkg, **kwargs) - ''' - def mark_upgrade_adjusted(self, pkg, **kwargs): - self.call_adjusted(apt.package.Package.mark_upgrade, pkg, **kwargs) - - def mark_install_adjusted(self, pkg, **kwargs): - self.call_adjusted(apt.package.Package.mark_install, pkg, **kwargs) - - -class LogInstallProgress(apt.progress.base.InstallProgress): - """ Install progress that writes to self.progress_log - (/var/run/unattended-upgrades.progress by default) - """ - - def __init__(self, logfile_dpkg, verbose=False, - progress_log=PROGRESS_LOG): - # type: (str, bool, str) -> None - apt.progress.base.InstallProgress.__init__(self) - self.logfile_dpkg = logfile_dpkg - self.progress_log = progress_log - # self.progress_log = os.path.join(apt_pkg.config.find_dir("Dir"), - # progress_log) - self.verbose = verbose - self.output_logfd = None # type: int - self.start_time = None - self.max_delay = 3600 - - def status_change(self, pkg, percent, status): - ''' - if self.start_time is None: - self.start_time = time.time() - else: - if (time.time() - self.start_time) > self.max_delay: - logging.warning(_( - "Giving up on lockfile after %s minutes of delay"), - self.max_delay / 60) - sys.exit(1) - ''' - - # type: (str, float, str) -> None - with open(self.progress_log, "w") as f: - per=str(int(float(percent))) - f.write("%s"%per) - #f.write(_("Progress: %s %% (%s)") % (percent, pkg)) - ''' - if re.search("Installed",status): - UpdateInfos = {} - UpdateInfos.update({"packageName":"kylin-unattended-upgrade"}) - UpdateInfos.update({"appname":str(pkg)}) - UpdateInfos.update({"source":"kylin unattended upgrade"}) - UpdateInfos.update({"status":1}) - json_file = json.dumps(UpdateInfos.copy()) - UpdateInfos.clear() - kylin_system_updater.DataBackendCollect("UpdateInfos",json_file) - ''' - logging.info("%s:%s:%s"%(pkg,percent,status)) - - def error(self,pkg, errormsg): - ''' - for key in package_deps.keys(): - if str(pkg) in package_deps[key]: - group_name = kylin_system_updater.FindPackageGroup(key) - UpdateInfos = {} - UpdateInfos.update({"packageName":"kylin-unattended-upgrade"}) - UpdateInfos.update({"appname":group_name}) - UpdateInfos.update({"source":"kylin unattended upgrade"}) - UpdateInfos.update({"status":0}) - UpdateInfos.update({"errorCode":str(errormsg)}) - json_file = json.dumps(UpdateInfos.copy()) - UpdateInfos.clear() - kylin_system_updater.DataBackendCollect("UpdateInfos",json_file) - kylin_system_updater.AddPackageInstallErrorRecord(str(pkg),str(errormsg)) - ''' - logging.error("%s:%s"%(pkg,errormsg)) - kylin_system_updater.AddPackageInstallErrorRecord(str(pkg),str(errormsg)) - - - def _fixup_fds(self): - # () -> None - required_fds = [0, 1, 2, # stdin, stdout, stderr - self.writefd, - self.write_stream.fileno(), - self.statusfd, - self.status_stream.fileno() - ] - # ensure that our required fds close on exec - for fd in required_fds[3:]: - old_flags = fcntl.fcntl(fd, fcntl.F_GETFD) - fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC) - # close all fds - proc_fd = "/proc/self/fd" - if os.path.exists(proc_fd): - error_count = 0 - for fdname in os.listdir(proc_fd): - try: - fd = int(fdname) - except Exception: - print("ERROR: can not get fd for %s" % fdname) - if fd in required_fds: - continue - try: - os.close(fd) - # print("closed: ", fd) - except OSError as e: - # there will be one fd that can not be closed - # as its the fd from pythons internal diropen() - # so its ok to ignore one close error - error_count += 1 - if error_count > 1: - print("ERROR: os.close(%s): %s" % (fd, e)) - - def _redirect_stdin(self): - # type: () -> None - REDIRECT_INPUT = os.devnull - fd = os.open(REDIRECT_INPUT, os.O_RDWR) - os.dup2(fd, 0) - - def _redirect_output(self): - # type: () -> None - # do not create log in dry-run mode, just output to stdout/stderr - if not apt_pkg.config.find_b("Debug::pkgDPkgPM", False): - logfd = self._get_logfile_dpkg_fd() - os.dup2(logfd, 1) - os.dup2(logfd, 2) - - def _get_logfile_dpkg_fd(self): - # type: () -> int - logfd = os.open( - self.logfile_dpkg, os.O_RDWR | os.O_APPEND | os.O_CREAT, 0o640) - try: - adm_gid = grp.getgrnam("adm").gr_gid - os.fchown(logfd, 0, adm_gid) - except (KeyError, OSError): - pass - return logfd - - def update_interface(self): - # type: () -> None - # call super class first - apt.progress.base.InstallProgress.update_interface(self) - self._do_verbose_output_if_needed() - - def _do_verbose_output_if_needed(self): - # type: () -> None - # if we are in debug mode, nothing to be more verbose about - if apt_pkg.config.find_b("Debug::pkgDPkgPM", False): - return - # handle verbose - if self.verbose: - if self.output_logfd is None: - self.output_logfd = os.open(self.logfile_dpkg, os.O_RDONLY) - os.lseek(self.output_logfd, 0, os.SEEK_END) - try: - select.select([self.output_logfd], [], [], 0) - # FIXME: this should be OSError, but in py2.7 it is still - # select.error - except select.error as e: - if e.errno != errno.EINTR: # type: ignore - logging.exception("select failed") - # output to stdout in verbose mode only - os.write(1, os.read(self.output_logfd, 1024)) - - def _log_in_dpkg_log(self, msg): - # type: (str) -> None - logfd = self._get_logfile_dpkg_fd() - os.write(logfd, msg.encode("utf-8")) - os.close(logfd) - - def finish_update(self): - # type: () -> None - self._log_in_dpkg_log("Log ended: %s\n\n" - % LoggingDateTime.as_string()) - - def fork(self): - # type: () -> int - self._log_in_dpkg_log("Log started: %s\n" - % LoggingDateTime.as_string()) - pid = os.fork() - if pid == 0: - self._fixup_fds() - self._redirect_stdin() - self._redirect_output() - return pid - - -class Unlocked: - """ - Context manager for unlocking the apt lock while cache.commit() is run - """ - - def __enter__(self): - # type: () -> None - try: - apt_pkg.pkgsystem_unlock_inner() - except Exception: - # earlier python-apt used to leak lock - logging.warning("apt_pkg.pkgsystem_unlock() failed due to not " - "holding the lock but trying to continue") - pass - - def __exit__(self, exc_type, exc_value, exc_tb): - # type: (object, object, object) -> None - apt_pkg.pkgsystem_lock_inner() - - -class KeptPkgs(defaultdict): - """ - Packages to keep by highest allowed pretty-printed origin - - """ - def add(self, pkg, # type: apt.Package - version, # type: apt.package.Version - cache # type: UnattendedUpgradesCache - ): - # type: (...) -> None - for origin in version.origins: - if is_allowed_origin(origin, cache.allowed_origins): - self[origin.origin + " " + origin.archive].add(pkg.name) - return - - -class UnattendedUpgradesResult: - """ - Represent the (potentially partial) results of an unattended-upgrades - run - """ - def __init__(self, - success, # type: bool - result_str="", # type: str - pkgs=[], # type: List[str] - pkgs_kept_back=KeptPkgs(set), # type: KeptPkgs - pkgs_removed=[], # type: List[str] - pkgs_kept_installed=[], # type: List[str] - update_stamp=False # type: bool - ): - # type: (...) -> None - self.success = success - self.result_str = result_str - self.pkgs = pkgs - self.pkgs_kept_back = pkgs_kept_back - self.pkgs_removed = pkgs_removed - self.pkgs_kept_installed = pkgs_kept_installed - self.update_stamp = update_stamp - - -def is_dpkg_journal_dirty(): - # type: () -> bool - """ - Return True if the dpkg journal is dirty - (similar to debSystem::CheckUpdates) - """ - logging.debug("checking whether dpkg journal is dirty") - d = os.path.join("/var/lib/dpkg/", - #os.path.dirname(apt_pkg.config.find_file("Dir::State::status")), - "updates") - for f in os.listdir(d): - if re.match("[0-9]+", f) or re.match("tmp.i",f): - return True - return False - -def get_abnormally_installed_pkg_count(): - output = subprocess.check_output('dpkg -l|grep ^i[^i]|wc -l',shell=True) - return output.decode().strip() - -def signal_handler(signal, frame): - # type: (int, object) -> None - logging.warning("SIGTERM received, will stop") - global SIGNAL_STOP_REQUEST - SIGNAL_STOP_REQUEST = True - - -def log_once(msg): - # type: (str) -> None - global logged_msgs - if msg not in logged_msgs: - logging.info(msg) - logged_msgs.add(msg) # type: ignore - - -def should_stop(): - # type: () -> bool - """ - Return True if u-u needs to stop due to signal received or due to the - system started to run on battery. - """ - if SIGNAL_STOP_REQUEST: - logging.warning("SIGNAL received, stopping") - return True - ''' - try: - if apt_pkg.config.find_b("Unattended-Upgrade::OnlyOnACPower", True) \ - and subprocess.call("on_ac_power") == 1: - logging.warning("System is on battery power, stopping") - return True - except FileNotFoundError: - log_once( - _("Checking if system is running on battery is skipped. Please " - "install powermgmt-base package to check power status and skip " - "installing updates when the system is running on battery.")) - if apt_pkg.config.find_b( - "Unattended-Upgrade::Skip-Updates-On-Metered-Connections", True): - try: - if NetworkMonitor.get_network_metered( - NetworkMonitor.get_default()): - logging.warning(_("System is on metered connection, stopping")) - return True - except NameError: - log_once(_("Checking if connection is metered is skipped. Please " - "install python3-gi package to detect metered " - "connections and skip downloading updates.")) - ''' - return False - - -def substitute(line): - # type: (str) -> str - """ substitude known mappings and return a new string - - Currently supported ${distro-release} - """ - mapping = {"distro_codename": get_distro_codename(), - "distro_id": get_distro_id()} - return string.Template(line).substitute(mapping) - - -def get_distro_codename(): - # type: () -> str - return DISTRO_CODENAME - - -def get_distro_id(): - # type: () -> str - return DISTRO_ID - - -def allow_marking_fallback(): - # type: () -> bool - return apt_pkg.config.find_b( - "Unattended-Upgrade::Allow-APT-Mark-Fallback", - get_distro_codename() != "sid") - - -def versioned_kernel_pkgs_regexp(): - apt_versioned_kernel_pkgs = apt_pkg.config.value_list( - "APT::VersionedKernelPackages") - if apt_versioned_kernel_pkgs: - return re.compile("(" + "|".join( - ["^" + p + "-[1-9][0-9]*\\.[0-9]+\\.[0-9]+-[0-9]+(-.+)?$" - for p in apt_versioned_kernel_pkgs]) + ")") - else: - return None - - -def running_kernel_pkgs_regexp(): - apt_versioned_kernel_pkgs = apt_pkg.config.value_list( - "APT::VersionedKernelPackages") - if apt_versioned_kernel_pkgs: - running_kernel_version = subprocess.check_output( - ["uname", "-r"], universal_newlines=True).rstrip() - kernel_escaped = re.escape(running_kernel_version) - try: - kernel_noflavor_escaped = re.escape( - re.match("[1-9][0-9]*\\.[0-9]+\\.[0-9]+-[0-9]+", - running_kernel_version)[0]) - return re.compile("(" + "|".join( - [("^" + p + "-" + kernel_escaped + "$|^" - + p + "-" + kernel_noflavor_escaped + "$") - for p in apt_versioned_kernel_pkgs]) + ")") - except TypeError: - # flavor could not be cut from version - return re.compile("(" + "|".join( - [("^" + p + "-" + kernel_escaped + "$") - for p in apt_versioned_kernel_pkgs]) + ")") - else: - return None - - -def get_allowed_origins_legacy(): - # type: () -> List[str] - """ legacy support for old Allowed-Origins var """ - allowed_origins = [] # type: List[str] - key = "Unattended-Upgrade::Allowed-Origins" - try: - for s in apt_pkg.config.value_list(key): - # if there is a ":" use that as seperator, else use spaces - if re.findall(r'(? bool - """ - take a whitelist string in the form "origin=Debian,label=Debian-Security" - and match against the given python-apt origin. A empty whitelist string - never matches anything. - """ - whitelist = whitelist.strip() - if whitelist == "": - logging.warning("empty match string matches nothing") - return False - res = True - # make "\," the html quote equivalent - whitelist = whitelist.replace("\\,", "%2C") - for token in whitelist.split(","): - # strip and unquote the "," back - (what, value) = [s.strip().replace("%2C", ",") - for s in token.split("=")] - # logging.debug("matching %s=%s against %s" % ( - # what, value, origin)) - # support substitution here as well - value = substitute(value) - # first char is apt-cache policy output, send is the name - # in the Release file - if what in ("o", "origin"): - match = fnmatch.fnmatch(origin.origin, value) - elif what in ("l", "label"): - match = fnmatch.fnmatch(origin.label, value) - elif what in ("a", "suite", "archive"): - match = fnmatch.fnmatch(origin.archive, value) - elif what in ("c", "component"): - match = fnmatch.fnmatch(origin.component, value) - elif what in ("site",): - match = fnmatch.fnmatch(origin.site, value) - elif what in ("n", "codename",): - match = fnmatch.fnmatch(origin.codename, value) - elif what in ("uri"): - pass - else: - raise UnknownMatcherError( - "Unknown whitelist entry for matcher %s (token %s)" % ( - what, token)) - # update res - res = res and match - # logging.debug("matching %s=%s against %s" % ( - # what, value, origin)) - return res - - -def python_regex_is_posix(expression): - # type: (str) -> bool - """ Returns if the Python regex is also an equivalent POSIX regex """ - return re.match("^[-a-zA-Z0-9\\^\\$\\+\\.:]*$", expression) is not None - - -def cache_commit(cache, # type: apt.Cache - logfile_dpkg, # type: str - verbose, # type: bool - iprogress=None, # type: apt.progress.base.InstallProgress - ): - # type: (...) -> Tuple[bool, Exception] - """Commit the changes from the given cache to the system""" - - error = None - res = False - if iprogress is None: - iprogress = LogInstallProgress(logfile_dpkg, verbose,progress_log=PROGRESS_LOG) - try: - # with Unlocked(): - res = cache.commit(fetch_progress=None,install_progress=iprogress,allow_unauthenticated=True) - #cache.open() - except SystemError as e: - error = e - if verbose: - logging.exception("Exception happened during upgrade.") - cache.clear() - return res, error - - -def upgrade_normal(cache, logfile_dpkg, verbose): - # type: (apt.Cache, str, bool) -> bool - res, error = cache_commit(cache, logfile_dpkg, verbose) - if res: - logging.info(_("All upgrades installed")) - else: - logging.error(_("Installing the upgrades failed!")) - logging.error(_("error message: %s"), error) - logging.error(_("dpkg returned a error! See %s for details"), - logfile_dpkg) - return res - - -def upgrade_in_minimal_steps(cache, # type: UnattendedUpgradesCache - pkgs_to_upgrade, # type: List[str] - logfile_dpkg="", # type: str - verbose=False, # type: bool - ): - # type: (...) -> bool - install_log = LogInstallProgress(logfile_dpkg, verbose) - - res = True - - # to upgrade contains the package names - to_upgrade = set(pkgs_to_upgrade) - for pkgname in upgrade_order(to_upgrade, cache): - # upgrade packages and dependencies in increasing expected size of - # package sets to upgrade/install together - if pkgname not in to_upgrade: - # pkg is upgraded in a previous set - continue - if should_stop(): - return False - try: - pkg = cache[pkgname] - except KeyError: - continue - - try: - if pkg.is_upgradable \ - or candidate_version_changed(pkg): - cache.mark_upgrade_adjusted( - pkg, from_user=not pkg.is_auto_installed) - elif not pkg.is_installed: - cache.mark_install_adjusted(pkg, from_user=False) - else: - continue + pm.get_archives(fetcher, list, recs) except Exception as e: - logging.warning( - _("package %s upgradable but fails to " - "be marked for upgrade (%s)"), pkgname, e) - cache.clear() - res = False - continue - - # double check that we are not running into side effects like - # what could have been caused LP: #1020680 - if not check_changes_for_sanity(cache): - logging.info("While building minimal partition: " - "cache has not allowed changes") - cache.clear() - continue - changes = [p.name for p in cache.get_changes()] - if not changes: - continue - - # write progress log information - if len(pkgs_to_upgrade) > 0: - all_count = len(pkgs_to_upgrade) - remaining_count = all_count - len(to_upgrade) - percent = remaining_count / float(all_count * 100.0) - else: - percent = 100.0 - install_log.status_change(pkg=",".join(changes), - percent=percent, - status="") - # apply changes - logging.debug("applying set %s" % changes) - - res, error = cache_commit(cache, logfile_dpkg, verbose, install_log) - if error: - if verbose: - logging.exception("Exception happened during upgrade.") - logging.error(_("Installing the upgrades failed!")) - logging.error(_("error message: %s"), error) - logging.error(_("dpkg returned a error! See %s for details"), - logfile_dpkg) - return False - to_upgrade = to_upgrade - set(changes) - logging.debug("left to upgrade %s" % to_upgrade) - if len(to_upgrade) == 0: - logging.info(_("All upgrades installed")) - break - return res - - -def is_allowed_origin(origin, allowed_origins): - # type: (Union[apt.package.Origin, apt_pkg.PackageFile], List[str]) -> bool - - # local origin is allowed by default - if origin.component == 'now' and origin.archive == 'now' and \ - not origin.label and not origin.site: - return True - for allowed in allowed_origins: - if match_whitelist_string(allowed, origin): + print(e) + res = fetcher.run() + print("fetch.run() result: %d"%res) + if res == 0: return True - return False - - -def is_in_allowed_origin(ver, allowed_origins): - # type: (apt.package.Version, List[str]) -> bool - if not ver: - return False - for origin in ver.origins: - if is_allowed_origin(origin, allowed_origins): - return True - return False - - -def ver_in_allowed_origin(pkg, allowed_origins): - # type: (apt.Package, List[str]) -> apt.package.Version - for ver in pkg.versions: - if is_in_allowed_origin(ver, allowed_origins): - # leave as soon as we have the highest new candidate - return ver - raise NoAllowedOriginError() - - -def is_pkgname_in_blacklist(pkgname, blacklist): - # type: (str, List[str]) -> bool - for blacklist_regexp in blacklist: - if re.match(blacklist_regexp, pkgname): - logging.debug("skipping blacklisted package %s" % pkgname) - return True - return False - - -def is_pkgname_in_whitelist(pkgname, whitelist): - # type: (str, List[str]) -> bool - # a empty whitelist means the user does not want to use this feature - if not whitelist: - return True - for whitelist_regexp in whitelist: - if re.match(whitelist_regexp, pkgname): - logging.debug("only upgrading the following package %s" % - pkgname) - return True - return False - - -def is_pkg_change_allowed(pkg, blacklist, whitelist, strict_whitelist): - # type: (apt.Package, List[str], List[str], bool) -> bool - if is_pkgname_in_blacklist(pkg.name, blacklist): - logging.debug("pkg %s package has been blacklisted" % pkg.name) - return False - # a strict whitelist will not allow any changes not in the - # whitelist, most people will want the relaxed whitelist - # that whitelists a package but pulls in the package - # dependencies - - if strict_whitelist and \ - not is_pkgname_in_whitelist(pkg.name, whitelist): - - logging.debug("pkg %s package is not whitelisted" % - pkg.name) - return False - - if pkg._pkg.selected_state == apt_pkg.SELSTATE_HOLD: - logging.debug("pkg %s is on hold" % pkg.name) - return False - return True - - -def transitive_dependencies(pkg, # type: apt.Package - cache, # type: apt.Cache - acc=set(), # type AbstractSet[str] - valid_types=None, # type: AbstractSet[str] - level=None # type: int - ): - # type (...) -> AbstractSet[str] - """ All (transitive) dependencies of the package - - Note that alternative (|) dependencies are collected, too - """ - if not pkg.candidate or level is not None and level < 1: - return acc - - for dep in pkg.candidate.dependencies: - for base_dep in dep: - if base_dep.name not in acc: - if not valid_types or base_dep.rawtype in valid_types: - acc.add(base_dep.name) - try: - transitive_dependencies( - cache[base_dep.name], cache, acc, valid_types, - level=(level - 1 if level is not None else None)) - except KeyError: - pass - return acc - - -def upgrade_order(to_upgrade, cache): - # type: (AbstractSet[str], apt.Cache) -> List[str] - """ Sort pkg names by the expected number of other packages to be upgraded - with it. The calculation is not 100% accurate, it is an approximation. - """ - - upgrade_set_sizes = {} - # calculate upgrade sets - follow_deps = {'Depends', 'PreDepends', 'Recommends'} - for pkgname in to_upgrade: - try: - pkg = cache[pkgname] - except KeyError: - continue - upgrade_set_sizes[pkgname] = len(transitive_dependencies( - pkg, cache, valid_types=follow_deps).intersection(to_upgrade)) - return sorted(upgrade_set_sizes, key=upgrade_set_sizes.get) - - -def check_changes_for_sanity(cache, desired_pkg=None): - # type: (UnattendedUpgradesCache, apt.Package) -> bool - sanity_check_result = sanity_problem(cache, desired_pkg) - if sanity_check_result is None: - return True - else: - logging.debug("sanity check failed for: %s:%s : %s" - % (desired_pkg.name,str({str(p.candidate) for p in cache.get_changes()}), - sanity_check_result)) - return False - - -def sanity_problem(cache, desired_pkg=None): - # type: (UnattendedUpgradesCache, apt.Package) -> str - # if cache._depcache.broken_count != 0: - # return ("there are broken packages in the cache") - # If there are no packages to be installed they were kept back - # if cache.install_count == 0: - # return ("no package is selected to be upgraded or installed") - NOW_UPDATE_CONFIG = '/usr/share/kylin-update-desktop-config/config/' - OLD_UPDATE_CONFIG = '/usr/share/kylin-update-desktop-config/data/' - read_path = NOW_UPDATE_CONFIG - if os.path.exists(NOW_UPDATE_CONFIG): - read_path = NOW_UPDATE_CONFIG - elif os.path.exists(OLD_UPDATE_CONFIG): - read_path = NOW_UPDATE_CONFIG - else: - pass - remove_white=[] - if os.path.exists(read_path): - with open(os.path.join(read_path,'kylin-update-desktop-system.json'),'r') as f: - try: - data = json.load(f) - remove_white=data['remove_white_list'] - except Exception as exc: - logging.warning(exc) - logging.debug("remove white list:%s"%",".join(remove_white)) - changes = cache.get_changes() - # if desired_pkg and desired_pkg not in changes: - # logging.warning("pkg %s to be marked for upgrade/install is not marked accordingly" % desired_pkg.name) - # return False - - pkgs_to_remove = [] - for pkg in changes: - if pkg.marked_delete: - # logging.warning("pkg %s is marked to be deleted" % pkg.name) - if pkg.name not in remove_white: - pkgs_to_remove.append(pkg.name) - ''' - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - pass - elif pkg.marked_delete: - logging.warning("pkg %s is marked to be deleted" % pkg.name) - pkgs_to_remove.append(pkg.name) - - if pkg.marked_install or pkg.marked_upgrade: - # apt will never fallback from a trusted to a untrusted - # origin so its good enough if we have a single trusted one - - # if not any([o.trusted for o in pkg.candidate.origins]): - # return ("pkg %s is not from a trusted origin" % pkg.name) - if not is_in_allowed_origin(pkg.candidate, cache.allowed_origins): - return ("pkg %s is not in an allowed origin" % pkg.name) - - if not is_pkg_change_allowed(pkg, - cache.blacklist, - cache.whitelist, - cache.strict_whitelist): - return ("pkg %s is blacklisted or is not whitelisted" - % pkg.name) - - # check if the package is unsafe to upgrade unattended - - ignore_require_restart = apt_pkg.config.find_b( - "Unattended-Upgrade::IgnoreAppsRequireRestart", False) - upgrade_requires = pkg.candidate.record.get("Upgrade-Requires") - - if pkg.marked_upgrade and ignore_require_restart is False \ - and upgrade_requires == "app-restart": - return ("pkg %s requires app-restart, it is not safe to " - "upgrade it unattended") - - # check that the package we want to upgrade is in the change set - if desired_pkg and desired_pkg not in changes: - logging.warning("pkg %s to be marked for upgrade/install is not marked " - "accordingly" % desired_pkg.name) + else: return False - ''' - - logging.debug("remove list not in whitelist:%s"%",".join(pkgs_to_remove)) - if len(pkgs_to_remove)>0: - return False - return True - - -def is_deb(file): - # type: (str) -> bool - if file.endswith(".deb"): - return True - else: - return False - - -def pkgname_from_deb(debfile): - # type: (str) -> str - # FIXME: add error checking here - try: - control = apt_inst.DebFile(debfile).control.extractdata("control") - sections = apt_pkg.TagSection(control) - return sections["Package"] - except (IOError, SystemError) as e: - logging.error("failed to read deb file %s (%s)" % (debfile, e)) - # dumb fallback - return debfile.split("_")[0] - - -def get_md5sum_for_file_in_deb(deb_file, conf_file): - # type: (str, str) -> str - dpkg_cmd = ["dpkg-deb", "--fsys-tarfile", deb_file] - tar_cmd = ["tar", "-x", "-O", "-f", "-", "." + conf_file] - md5_cmd = ["md5sum"] - dpkg_p = Popen(dpkg_cmd, stdout=PIPE) - tar_p = Popen(tar_cmd, stdin=dpkg_p.stdout, stdout=PIPE, - universal_newlines=True) - md5_p = Popen(md5_cmd, stdin=tar_p.stdout, stdout=PIPE, - universal_newlines=True) - pkg_md5sum = md5_p.communicate()[0].split()[0] - for __p in [dpkg_p, tar_p, md5_p]: - p = cast(Popen, __p) - p.stdout.close() - p.wait() - return pkg_md5sum - - -def get_md5sum_for_file_installed(conf_file, prefix): - # type: (str, str) -> str - try: - with open(prefix + conf_file, 'rb') as fb: - for hash_string in apt_pkg.Hashes(fb).hashes: # type: ignore - if hash_string.hashtype == 'MD5Sum': - return hash_string.hashvalue - return None - except IsADirectoryError: - # the package replaces a directory wih a configuration file - # - # if the package changed this way it is safe to assume that - # the transition happens without showing a prompt but if the admin - # created the directory the admin will need to resolve it after - # being notified about the unexpected prompt - logging.debug("found conffile %s is a directory on the system " - % conf_file) - return "dir" - except FileNotFoundError: - # if the local file got deleted by the admin thats ok but it may still - # trigger a conffile promp (see debian #788049) - logging.debug("conffile %s in missing on the system" % conf_file) - return "" - - -def map_conf_file(conf_file, conffiles): - # type: (str, Union[AbstractSet[str], Dict[str, str]]) -> str - """Find respective conffile in a set of conffiles with some heuristics - """ - if conf_file in conffiles: - return conf_file - elif os.path.join(conf_file, os.path.basename(conf_file)) in conffiles: - # new /etc/foo may be old /etc/foo/foo, like in LP: #1822745 - return os.path.join(conf_file, os.path.basename(conf_file)) - elif os.path.dirname(conf_file) in conffiles: - # new /etc/foo/foo may be old /etc/foo, probably by accident - return os.path.dirname(conf_file) - # TODO: peek into package's dpkg-maintscript-helper mv_conffile usage - else: - return None - - -# prefix is *only* needed for the build-in tests -def conffile_prompt(destFile, prefix=""): - # type: (str, str) -> bool - logging.debug("check_conffile_prompt(%s)" % destFile) - pkgname = pkgname_from_deb(destFile) - - # get the conffiles for the /var/lib/dpkg/status file - status_file = apt_pkg.config.find("Dir::State::status") - with open(status_file, "r") as f: - tagfile = apt_pkg.TagFile(f) - conffiles = "" - for section in tagfile: - if section.get("Package") == pkgname: - logging.debug("found pkg: %s" % pkgname) - if "Conffiles" in section: - conffiles = section.get("Conffiles") - break - - # get conffile value from pkg, its ok if the new version - # does not have conffiles anymore - pkg_conffiles = set() # type: AbstractSet[str] - try: - deb = apt_inst.DebFile(destFile) - pkg_conffiles = set(deb.control.extractdata( - "conffiles").strip().decode("utf-8").split("\n")) - except SystemError as e: - print(_("Apt returned an error, exiting")) - print(_("error message: %s") % e) - logging.error(_("Apt returned an error, exiting")) - logging.error(_("error message: %s"), e) - raise - except LookupError as e: - logging.debug("No conffiles in deb %s (%s)" % (destFile, e)) - if not pkg_conffiles: - return False - - # Conffiles: - # /etc/bash_completion.d/m-a c7780fab6b14d75ca54e11e992a6c11c - dpkg_status_conffiles = {} - for line in conffiles.splitlines(): - # ignore empty lines - line = line.strip() - if not line: - continue - # show what we do - logging.debug("conffile line: %s", line) - li = line.split() - conf_file = li[0] - md5 = li[1] - if len(li) > 2: - obs = li[2] + + def install(self): + print(_("start install")) + self.UpdateDetect() + self.RunMainLoop() + print(_("update detect finish:%s,%s")%(self.update_detect_status,",".join(self.update_list))) + if self.update_detect_status and len(self.update_list)>0: + pass + # elif not self.update_detect_status and 'kylin-system-updater' in self.update_list: + # print(_("self update finished")) else: - obs = None - # ignore if conffile is obsolete - if obs == "obsolete": - continue - # ignore state "newconffile" until its clearer if there - # might be a dpkg prompt (LP: #936870) - if md5 == "newconffile": - continue - new_conf_file = map_conf_file(conf_file, pkg_conffiles) - if not new_conf_file: - logging.debug("%s not in package conffiles %s" % ( - conf_file, pkg_conffiles)) - continue - # record for later - dpkg_status_conffiles[conf_file] = md5 - - # test against the installed file, if the local file got deleted - # by the admin thats ok but it may still trigger a conffile prompt - # (see debian #788049) - current_md5 = get_md5sum_for_file_installed(conf_file, prefix) - logging.debug("current md5: %s" % current_md5) - - # hashes are the same, no conffile prompt - if current_md5 == md5: - continue - # calculate md5sum from the deb (may take a bit) - pkg_md5sum = get_md5sum_for_file_in_deb(destFile, new_conf_file) - logging.debug("pkg_md5sum: %s" % pkg_md5sum) - # the md5sum in the deb is unchanged, this will not - # trigger a conffile prompt - if pkg_md5sum == md5: - continue - # if we made it to this point: - # current_md5 != pkg_md5sum != md5 - # and that will trigger a conffile prompt, we can - # stop processing at this point and just return True - return True - - # now check if there are conffiles in the pkg that where not there - # in the previous version in the dpkg status file - if pkg_conffiles: - for conf_file in pkg_conffiles: - old_conf_file = map_conf_file(conf_file, dpkg_status_conffiles) - if not old_conf_file: - pkg_md5sum = get_md5sum_for_file_in_deb(destFile, conf_file) - current_md5 = get_md5sum_for_file_installed(conf_file, prefix) - if current_md5 != "" and pkg_md5sum != current_md5: - return True - return False - - -def dpkg_conffile_prompt(): - # type: () -> bool - if "DPkg::Options" not in apt_pkg.config: - return True - options = apt_pkg.config.value_list("DPkg::Options") - for option in options: - option = option.strip() - if option in ["--force-confold", "--force-confnew"]: return False - return True - - -def rewind_cache(cache, pkgs_to_upgrade): - # type: (UnattendedUpgradesCache, List[apt.Package]) -> None - """ set the cache back to the state with packages_to_upgrade """ - cache.clear() - for pkg2 in pkgs_to_upgrade: - cache.mark_install_adjusted(pkg2, from_user=not pkg2.is_auto_installed) - if cache.broken_count > 0: - raise AssertionError("rewind_cache created a broken cache") - - -def host(): - # type: () -> str - return socket.getfqdn() - - -def wrap_indent(t, subsequent_indent=" "): - # type: (str, str) -> str - return "\n".join(wrap(t, break_on_hyphens=False, - subsequent_indent=subsequent_indent)) - - -def setup_apt_listchanges(conf="/etc/apt/listchanges.conf"): - # type: (str) -> None - """ deal with apt-listchanges """ - # apt-listchanges will always send a mail if there is a mail address - # set in the config regardless of the frontend used, so set it to - # mail if we have a sendmail and to none if not (as it appears to - # not check if sendmail is there or not), debian bug #579733 - if os.path.exists(SENDMAIL_BINARY): - os.environ["APT_LISTCHANGES_FRONTEND"] = "mail" - else: - os.environ["APT_LISTCHANGES_FRONTEND"] = "none" - - -def _send_mail_using_mailx(from_address, to_address, subject, body): - # type: (str, str, str, str) -> int - # ensure that the body is a byte stream and that we do not - # break on encoding errors (the default error mode is "strict") - encoded_body = body.encode( - locale.getpreferredencoding(False), errors="replace") - # we use a binary pipe to stdin to ensure we do not break on - # unicode encoding errors (e.g. because the user is running a - # ascii only system like the buildds) - mail = subprocess.Popen( - [MAIL_BINARY, "-r", from_address, "-s", subject, to_address], - stdin=subprocess.PIPE, universal_newlines=False) - mail.stdin.write(encoded_body) - mail.stdin.close() - ret = mail.wait() - return ret - - -def _send_mail_using_sendmail(from_address, to_address, subject, body): - # type: (str, str, str, str) -> int - # format as a proper mail - msg = Message() - msg['Subject'] = subject - msg['From'] = from_address - msg['To'] = to_address - msg['Auto-Submitted'] = "auto-generated" - # order is important here, Message() first, then Charset() - # then msg.set_charset() - charset = email.charset.Charset("utf-8") - charset.body_encoding = email.charset.QP # type: ignore - msg.set_payload(body, charset) - # and send it away - sendmail = subprocess.Popen( - [SENDMAIL_BINARY, "-oi", "-t"], - stdin=subprocess.PIPE, universal_newlines=True) - sendmail.stdin.write(msg.as_string()) - sendmail.stdin.close() - ret = sendmail.wait() - return ret - - -def send_summary_mail(pkgs, # type: List[str] - res, # type: bool - result_str, # type: str - pkgs_kept_back, # type: KeptPkgs - pkgs_removed, # type: List[str] - pkgs_kept_installed, # type: List[str] - mem_log, # type: StringIO - dpkg_log_content, # type: str - ): - # type: (...) -> None - """ send mail (if configured in Unattended-Upgrade::Mail) """ - to_email = apt_pkg.config.find("Unattended-Upgrade::Mail", "") - if not to_email: - return - if not os.path.exists(MAIL_BINARY) and not os.path.exists(SENDMAIL_BINARY): - logging.error(_("No /usr/bin/mail or /usr/sbin/sendmail, " - "can not send mail. " - "You probably want to install the mailx package.")) - return - - # The admin may well wish to get a mail report regardless of what was done. - # This is now set by Unattended-Upgrade::MailReport values of: - # "always", "only-on-error" or "on-change" - # (you can achieve "never" by not setting Unattended-Upgrade::Mail). - # If this is not set, then set it using any legacy MailOnlyOnError - # setting (default True) - # - mail_opt = apt_pkg.config.find("Unattended-Upgrade::MailReport") - if (mail_opt == ""): # None set - map from legacy value - if apt_pkg.config.find_b("Unattended-Upgrade::MailOnlyOnError", False): - mail_opt = "only-on-error" + self.DistUpgradeAll(False) + self.RunMainLoop() + print(_("resolve dependency status:%s,%s")%(self.resolve_depend_status,",".join(self.remove_pkgs))) + if self.resolve_depend_status and len(self.remove_pkgs)==0: + pass else: - mail_opt = "on-change" - - # if the operation was successful and the user has requested to get - # mails only on errors, just exit here - if (res and (mail_opt == "only-on-error")): - return - - # if the run was successful but nothing had to be done skip sending email - # unless the admin wants it anyway - if (((mail_opt != "always") and res and not pkgs and not pkgs_kept_back - and not pkgs_removed)): - return - - # Check if reboot-required flag is present - reboot_flag_str = _( - "[reboot required]") if os.path.isfile(REBOOT_REQUIRED_FILE) else "" - # Check if packages are kept on hold - hold_flag_str = (_("[package on hold]") if pkgs_kept_back - or pkgs_kept_installed else "") - logging.debug("Sending mail to %s" % to_email) - subject = _( - "{hold_flag}{reboot_flag} unattended-upgrades result for " - "{machine}: {result}").format( - hold_flag=hold_flag_str, reboot_flag=reboot_flag_str, - machine=host(), result="SUCCESS" if res else "FAILURE").strip() - body = wrap_indent(_("Unattended upgrade result: %s") % result_str) - body += "\n\n" - if os.path.isfile(REBOOT_REQUIRED_FILE): - body += _( - "Warning: A reboot is required to complete this upgrade, " - "or a previous one.\n\n") - if pkgs: - if res: - body += _("Packages that were upgraded:\n") - else: - body += _("Packages that attempted to upgrade:\n") - body += " " + wrap_indent(" ".join(pkgs)) - body += "\n\n" - if pkgs_kept_back: - body += _("Packages with upgradable origin but kept back:\n") - for origin, origin_pkgs in pkgs_kept_back.items(): - body += " " + origin + ":\n" - body += " " + wrap_indent(" ".join(origin_pkgs), - subsequent_indent=" ") + "\n" - body += "\n" - if pkgs_removed: - body += _("Packages that were auto-removed:\n") - body += " " + wrap_indent(" ".join(pkgs_removed)) - body += "\n\n" - if pkgs_kept_installed: - body += _("Packages that were kept from being auto-removed:\n") - body += " " + wrap_indent(" ".join(pkgs_kept_installed)) - body += "\n\n" - if dpkg_log_content: - body += _("Package installation log:") + "\n" - body += dpkg_log_content - body += "\n\n" - body += _("Unattended-upgrades log:\n") - body += mem_log.getvalue() - - from_email = apt_pkg.config.find("Unattended-Upgrade::Sender", "root") - - if os.path.exists(SENDMAIL_BINARY): - ret = _send_mail_using_sendmail(from_email, to_email, subject, body) - elif os.path.exists(MAIL_BINARY): - ret = _send_mail_using_mailx(from_email, to_email, subject, body) - else: - raise AssertionError( - "This should never be reached as we previously validated that we " - "either have sendmail or mailx. Maybe they've been removed in " - "this right moment?") - logging.debug("mail returned: %s", ret) - - -def do_install(cache, # type: UnattendedUpgradesCache - pkgs_to_upgrade, # type: List[str] - options, # type: Options - logfile_dpkg, # type: str - ): - # type: (...) -> bool - - #setup_apt_listchanges() - - logging.info(_("Writing dpkg log to %s"), logfile_dpkg) - - # if cache.get_changes(): - # cache.clear() - - pkg_install_success = False - - iprogress = LogInstallProgress(logfile_dpkg, verbose=True,progress_log=PROGRESS_LOG) - try: - pkg_install_success = cache.commit(fetch_progress=apt.progress.text.AcquireProgress(outfile = logfile_fd),install_progress=iprogress,allow_unauthenticated=True) - except Exception as e: - logging.error("cache commit error:%s"%e) - - ''' - try: - if options.minimal_upgrade_steps: - # try upgrade all "pkgs" in minimal steps - pkg_install_success = upgrade_in_minimal_steps( - cache, pkgs_to_upgrade, - logfile_dpkg, - options.verbose or options.debug) - else: - mark_pkgs_to_upgrade(cache, pkgs_to_upgrade) - pkg_install_success = upgrade_normal( - cache, logfile_dpkg, options.verbose or options.debug) - except Exception as e: - # print unhandled exceptions here this way, while stderr is redirected - os.write(2, ("Exception: %s\n" % e).encode('utf-8')) - pkg_install_success = False - ''' - return pkg_install_success - - -def _setup_alternative_rootdir(rootdir): - # type: (str) -> None - # clear system unattended-upgrade stuff - apt_pkg.config.clear("Unattended-Upgrade") - # read rootdir (taken from apt.Cache, but we need to run it - # here before the cache gets initialized - if os.path.exists(rootdir + "/etc/apt/apt.conf"): - apt_pkg.read_config_file(apt_pkg.config, - rootdir + "/etc/apt/apt.conf") - if os.path.isdir(rootdir + "/etc/apt/apt.conf.d"): - apt_pkg.read_config_dir(apt_pkg.config, - rootdir + "/etc/apt/apt.conf.d") - logdir = os.path.join(rootdir, "var", "log", "unattended-upgrades") - if not os.path.exists(logdir): - os.makedirs(logdir) - apt.apt_pkg.config.set("Unattended-Upgrade::LogDir", logdir) - - -def _get_logdir(): - # type: () -> str - logdir = apt_pkg.config.find_dir( - "Unattended-Upgrade::LogDir", - # COMPAT only - apt_pkg.config.find_dir("APT::UnattendedUpgrades::LogDir", - "/var/log/unattended-upgrades/")) - return logdir - - -def _setup_logging(options,logfile): - - - # ensure this is run only once - if len(logging.root.handlers) > 0: - return None - - # init the logging - # logdir = _get_logdir() - # logfile = os.path.join( - # logdir, - # apt_pkg.config.find( - # "Unattended-Upgrade::LogFile", - # # COMPAT only - # apt_pkg.config.find("APT::UnattendedUpgrades::LogFile", - # "unattended-upgrades.log"))) - # if not options.dry_run and not os.path.exists(logdir): - # os.makedirs(logdir) - - logging.basicConfig(level=logging.INFO, - format='%(asctime)s %(levelname)s %(message)s', - filename=logfile) - - # additional logging - logger = logging.getLogger() - # mem_log = StringIO() - # if options.apt_debug: - # apt_pkg.config.set("Debug::pkgProblemResolver", "1") - # apt_pkg.config.set("Debug::pkgDepCache::AutoInstall", "1") - if options.debug: - logger.setLevel(logging.DEBUG) - # stdout_handler = logging.StreamHandler(sys.stdout) - # logger.addHandler(stdout_handler) - elif options.verbose: - logger.setLevel(logging.INFO) - stdout_handler = logging.StreamHandler(sys.stdout) - logger.addHandler(stdout_handler) - ''' - if apt_pkg.config.find("Unattended-Upgrade::Mail", ""): - mem_log_handler = logging.StreamHandler(mem_log) - logger.addHandler(mem_log_handler) - Configure syslog if necessary - syslogEnable = apt_pkg.config.find_b("Unattended-Upgrade::SyslogEnable", - False) - if syslogEnable: - syslogFacility = apt_pkg.config.find( - "Unattended-Upgrade::SyslogFacility", - "daemon") - syslogHandler = logging.handlers.SysLogHandler( - address='/dev/log', - facility=syslogFacility) # type: ignore - syslogHandler.setFormatter( - logging.Formatter("unattended-upgrade: %(message)s")) - known = syslogHandler.facility_names.keys() # type: ignore - if syslogFacility.lower() in known: - logger.addHandler(syslogHandler) - logging.info("Enabled logging to syslog via %s facility " - % syslogFacility) - else: - logging.warning("Syslog facility %s was not found" - % syslogFacility) - return mem_log - ''' - -def logged_in_users(): - # type: () -> AbstractSet[str] - """Return a list of logged in users""" - # the "users" command always returns a single line with: - # "user1, user1, user2" - users = subprocess.check_output( - USERS, universal_newlines=True).rstrip('\n') - return set(users.split()) - -def reboot_if_needed(): - # type: () -> None - """auto-reboot (if required and the config for this is set)""" - if not os.path.exists(REBOOT_REQUIRED_FILE): - return - needreboot = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH,"autoUpgradePolicy","automaticReboot") - if needreboot == 'off': - return - # reboot at the specified time - when = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH,"autoUpgradePolicy","automaticRebootTime") - logging.warning("Found %s, rebooting" % REBOOT_REQUIRED_FILE) - cmd = ["/sbin/shutdown", "-r", when] - try: - shutdown_msg = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if shutdown_msg.strip(): - logging.warning("Shutdown msg: %s", shutdown_msg.strip()) - except Exception as e: - logging.error("Failed to issue shutdown: %s", e) - -def reboot_if_requested_and_needed(): - # type: () -> None - """auto-reboot (if required and the config for this is set)""" - if not os.path.exists(REBOOT_REQUIRED_FILE): - return - if not apt_pkg.config.find_b( - "Unattended-Upgrade::Automatic-Reboot", False): - return - # see if we need to check for logged in users - if not apt_pkg.config.find_b( - "Unattended-Upgrade::Automatic-Reboot-WithUsers", True): - users = logged_in_users() - if users: - msg = gettext.ngettext( - "Found %s, but not rebooting because %s is logged in." % ( - REBOOT_REQUIRED_FILE, users), - "Found %s, but not rebooting because %s are logged in." % ( - REBOOT_REQUIRED_FILE, users), - len(users)) - logging.warning(msg) - return - # reboot at the specified time - when = apt_pkg.config.find( - "Unattended-Upgrade::Automatic-Reboot-Time", "now") - logging.warning("Found %s, rebooting" % REBOOT_REQUIRED_FILE) - cmd = ["/sbin/shutdown", "-r", when] - try: - shutdown_msg = subprocess.check_output(cmd, stderr=subprocess.STDOUT) - if shutdown_msg.strip(): - logging.warning("Shutdown msg: %s", shutdown_msg.strip()) - except Exception as e: - logging.error("Failed to issue shutdown: %s", e) - - -def write_stamp_file(): - # type: () -> None - statedir = os.path.join(apt_pkg.config.find_dir("Dir::State"), "periodic") - if not os.path.exists(statedir): - os.makedirs(statedir) - with open(os.path.join(statedir, "unattended-upgrades-stamp"), "w"): - pass - - -def try_to_upgrade(pkg, # type: apt.Package - pkgs_to_upgrade, # type: List[apt.Package] - cache, # type: UnattendedUpgradesCache - version): - # type: (...) -> None - try: - - try: - # try to adjust pkg itself first, if that throws an exception it - # can't be upgraded on its own - cache.adjust_candidate_with_version(pkg,version) - ''' - if not pkg.is_upgradable and not apt_pkg.config.find_b( - "Unattended-Upgrade::Allow-downgrade", False): - return - ''' - except NoAllowedOriginError: - return - - # cache._cached_candidate_pkgnames.add(pkg.name) - if not pkg.installed: - cache.mark_install_adjusted(pkg,from_user=True) - else: - cache.mark_upgrade_adjusted(pkg, from_user=not pkg.is_auto_installed) - if check_changes_for_sanity(cache, pkg): - # add to packages to upgrade - pkgs_to_upgrade.append(pkg) - else: - rewind_cache(cache, pkgs_to_upgrade) - except (SystemError, NoAllowedOriginError) as e: - # can't upgrade - logging.warning( - _("package %s upgradable but fails to " - "be marked for upgrade (%s)"), pkg.name, e) - rewind_cache(cache, pkgs_to_upgrade) - - -def candidate_version_changed(pkg): - '''type: apt.Package''' - return (pkg.is_installed and pkg.candidate - and pkg.candidate.version != pkg.installed.version) - # and apt_pkg.config.find_b( - # 'Unattended-Upgrade::Allow-downgrade', False) - # ) - - -def calculate_upgradable_pkgs(cache, # type: UnattendedUpgradesCache - options, # type: Options - whitelist): - # type: (...) -> List[apt.Package] - pkgs_to_upgrade = [] # type: List[apt.Package] - - # now do the actual upgrade - for pkgname in whitelist: - try: - pkg = cache[pkgname[0]] - adjust_candidate_result = cache.adjust_candidate_with_version(pkg,pkgname[1]) - if (not adjust_candidate_result): - logging.warning("%s-%s :can not adjust candidate version"%(pkgname[0],pkgname[1])) - continue - if not pkg.installed: - cache.mark_install_adjusted(pkg,from_user=True) - elif pkg.is_upgradable: - cache.mark_upgrade_adjusted(pkg, from_user=not pkg.is_auto_installed) - else: - pass - # if sanity_problem(cache,pkg): - # pkgs_to_upgrade.append(pkg) - except Exception as e: - logging.error("error checking pkg:%s"%e) - continue - - ''' - if check_changes_for_sanity(cache, pkg): - # add to packages to upgrade - pkgs_to_upgrade.append(pkg) - ''' - #for pkg in cache: - # if pkg.name not in cache.whitelist: - # logging.debug("%s not in whitelist skipping..."%(pkg.name)) - # continue - ''' - if options.debug and pkg.is_upgradable \ - or candidate_version_changed(pkg): - logging.debug("Checking: %s (%s)" % ( - pkg.name, getattr(pkg.candidate, "origins", []))) - - if (pkg.is_upgradable or candidate_version_changed(pkg) or not pkg.is_installed): - try: - ver_in_allowed_origin(pkg, cache.allowed_origins) - except NoAllowedOriginError: - continue - - - try_to_upgrade(pkg, - pkgs_to_upgrade, - cache,pkgname[1]) - ''' - # logging.debug("Checking: %s (%s)" % ( - # pkg.name, getattr(pkg.candidate, "origins", []))) - #pkgs_to_upgrade.append(pkg) - # if cache.get_changes(): - # cache.clear() - - return cache.get_changes() - - -def get_dpkg_log_content(logfile_dpkg, install_start_time): - # type: (str, datetime.datetime) -> str - logging.debug("Extracting content from %s since %s" % ( - logfile_dpkg, install_start_time)) - content = [] - found_start = False - try: - with io.open(logfile_dpkg, encoding='utf-8', errors='replace') as fp: - # read until we find the last "Log started: " - for line in fp.readlines(): - # scan for the first entry we need (minimal-step mode - # creates a new stanza for each individual install) - if not found_start and line.startswith("Log started: "): - stanza_start = LoggingDateTime.from_string( - line[len("Log started: "):-1]) - if stanza_start >= install_start_time: - found_start = True - if found_start: - # skip progress indicator until #860931 is fixed in apt - # and dpkg - if re.match( - "^\\(Reading database \\.\\.\\. ()|([0-9]+%)$", - line): - continue - content.append(line) - return "".join(content) - except FileNotFoundError: - return "" - - -def get_auto_removable(cache): - # type: (apt.Cache) -> AbstractSet[str] - return {pkg.name for pkg in cache - if pkg.is_auto_removable} - - -def is_autoremove_valid(cache, # type: UnattendedUpgradesCache - pkgname, # type: str - auto_removable, # type: AbstractSet[str] - ): - # type: (...) -> bool - changes = cache.get_changes() - if not changes: - # package is already removed - return True - pkgnames = {pkg.name for pkg in changes} - for pkg in changes: - if not is_pkg_change_allowed(pkg, cache.blacklist, cache.whitelist, - cache.strict_whitelist): - logging.warning( - _("Keeping the following auto-removable package(s) because " - "they include %s which is set to be kept unmodified: %s"), - pkg.name, " ".join(sorted(pkgnames))) return False - if not pkgnames.issubset(auto_removable): - if pkgname != "": - logging.warning( - _("Keeping auto-removable %s package(s) because it would" - " also remove the following packages which should " - "be kept in this step: %s"), pkgname, - " ".join(sorted(pkgnames - auto_removable))) + self.DistUpgradeAll(True) + self.RunMainLoop() + print(_("install finish status:%s,%s")%(self.install_finish_status,",".join(self.install_finish_group))) + if self.install_finish_status and len(self.install_finish_group)>0: + pass else: - logging.warning( - _("Keeping %s auto-removable package(s) because it would" - " also remove the following packages which should " - "be kept in this step: %s"), len(auto_removable), - " ".join(sorted(pkgnames - auto_removable))) - - return False - for packagename in pkgnames: - if cache.running_kernel_pkgs_regexp and \ - cache.running_kernel_pkgs_regexp.match(packagename): - logging.warning( - _("Keeping the following auto-removable package(s) because " - "they include %s which package is related to the running " - "kernel: %s"), packagename, " ".join(sorted(pkgnames))) return False - if cache.install_count > 0: - logging.error( - "The following packages are marked for installation or upgrade " - "which is not allowed when performing autoremovals: %s", - " ".join([pkg.name for pkg in changes if not pkg.marked_delete])) - return False - return True - - -def do_auto_remove(cache, # type: UnattendedUpgradesCache - auto_removable, # type: AbstractSet[str] - logfile_dpkg, # type: str - minimal_steps, # type: bool - verbose=False, # type: bool - dry_run=False # type: bool - ): - # type: (...) -> Tuple[bool, List[str], List[str]] - res = True - if not auto_removable: - return (res, [], []) - - pkgs_removed = [] # type: List[str] - pkgs_kept_installed = [] # type: List[str] - if minimal_steps: - for pkgname in auto_removable: - if should_stop(): - pkgs_kept_installed = list(auto_removable - set(pkgs_removed)) - return (False, pkgs_removed, pkgs_kept_installed) - logging.debug("marking %s for removal" % pkgname) - if pkgname in pkgs_removed: - continue - try: - pkg = cache[pkgname] - except KeyError: - continue - pkg.mark_delete() - if not is_autoremove_valid(cache, pkgname, auto_removable): - # this situation can occur when removing newly unused packages - # would also remove old unused packages which are not set - # for removal, thus getting there is not handled as an error - pkgs_kept_installed.append(pkgname) - cache.clear() - continue - if not dry_run: - changes = cache.get_changes() - pkgnames = {pkg.name for pkg in changes} - res, error = cache_commit(cache, logfile_dpkg, verbose) - if not res: - break - pkgs_removed.extend(pkgnames) - else: - cache.clear() - else: - for pkgname in auto_removable: - try: - pkg = cache[pkgname] - except KeyError: - continue - pkg.mark_delete() - if is_autoremove_valid(cache, "", auto_removable): - # do it in one step - if not dry_run: - res, error = cache_commit(cache, logfile_dpkg, verbose) - else: - cache.clear() - else: - cache.clear() - - if res: - logging.info(_("Packages that were successfully auto-removed: %s"), - " ".join(sorted(pkgs_removed))) - logging.info(_("Packages that are kept back: %s"), - " ".join(sorted(pkgs_kept_installed))) - if not res: - cache.clear() - logging.error(_("Auto-removing the packages failed!")) - logging.error(_("Error message: %s"), error) - logging.error(_("dpkg returned an error! See %s for details"), - logfile_dpkg) - return (res, pkgs_removed, pkgs_kept_installed) - - -def clean_downloaded_packages(fetcher): - # type: (apt_pkg.Acquire) -> None - for item in fetcher.items: - try: - os.unlink(item.destfile) - except OSError: - pass - ''' - archivedir = os.path.dirname( - apt_pkg.config.find_dir("Dir::Cache::archives")) - for item in fetcher.items: - if os.path.dirname(os.path.abspath(item.destfile)) == archivedir: - try: - os.unlink(item.destfile) - except OSError: - pass - ''' - - -def is_update_day(): - # type: () -> bool - # check if patch days are configured - patch_days = apt_pkg.config.value_list("Unattended-Upgrade::Update-Days") - if not patch_days: - return True - # validate patch days - today = date.today() - # abbreviated localized dayname - if today.strftime("%a") in patch_days: - return True - # full localized dayname - if today.strftime("%A") in patch_days: - return True - # by number (Sun: 0, Mon: 1, ...) - if today.strftime("%w") in patch_days: - return True - # today is not a patch day - logging.info( - "Skipping update check: today is %s,%s,%s but patch days are %s", - today.strftime("%w"), today.strftime("%a"), today.strftime("%A"), - ", ".join(patch_days)) - return False - - -def update_kept_pkgs_file(kept_pkgs, kept_file): - # type: (DefaultDict[str, List[str]], str) -> None - if kept_pkgs: - pkgs_all_origins = set() - for origin_pkgs in kept_pkgs.values(): - pkgs_all_origins.update(origin_pkgs) - try: - with open(kept_file, "w") as kf: - kf.write(" ".join(sorted(pkgs_all_origins))) - except FileNotFoundError: - logging.error(_("Could not open %s for saving list of packages " - "kept back." % kept_file)) - else: - if os.path.exists(kept_file): - os.remove(kept_file) - - -def main(options, rootdir="/"): - # type: (Options, str) -> int - # useful for testing - # if not rootdir == "/": - # _setup_alternative_rootdir(rootdir) - - # see debian #776752 - # install_start_time = datetime.datetime.now().replace(microsecond=0) - # logging.info("unattended-upgrades start time:%s"%install_start_time) - # get log - ''' - dpkg_journal_dirty = is_dpkg_journal_dirty() - abnormal_pkg_count = get_abnormally_installed_pkg_count() - logging.info("abnormal pkg count:%s,dpkg dirty:%s"%(abnormal_pkg_count,dpkg_journal_dirty)) - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - if dpkg_journal_dirty or abnormal_pkg_count != '0': - ret = subprocess.run("dpkg --configure -a",shell=True,stdout=open(logfile,'a+'),stderr=open(logfile,'a+')) - logging.info("dpkg fix return :%s"%ret.returncode) - ''' - # lock for the shutdown check - # uu_lock = apt_pkg.get_lock(LOCK_FILE) - # if uu_lock < 0: - # logging.error("Lock file is already taken, exiting") - # WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - # return 1 - - try: - kysec_pre_upgrade() - res = run(options, rootdir, logfile_dpkg) - kysec_post_upgrade() - logging.info("result:%s,%s"%(res.success,res.result_str)) - release = '' - version = '' - os_release_info = ReadOsRelease('/etc/os-release') - if 'KYLIN_RELEASE_ID' in os_release_info: - release = os_release_info['KYLIN_RELEASE_ID'] - #version = ReadValueFromFile(VERSION_FILE,'SYSTEM','version') - version = get_default_version() - logging.debug("release:%s,version:%s"%(release,version)) - if options.install_only or options.download_and_install: - #history record - history = {} - date = time.strftime("%Y-%m-%d %H:%M:%S") - history.update({"date":date}) - history.update({"appname":"kylin-unattended-upgrade"}) - history.update({"appname_cn":"自动更新"}) - history.update({"version":""}) - history.update({"description":"download and install security upgrades automatically"}) - history.update({"keyword":"1"}) - history.update({"changelog":""}) - history.update({"status":"success"}) - history.update({"errorcode":"cache commit error"}) - history.update({"status_cn":"成功"}) - #data collect info - UpdateInfos = {} - UpdateInfos.update({"packageName":"kylin-unattended-upgrade"}) - UpdateInfos.update({"appname":"kylin-unattended-upgrade"}) - UpdateInfos.update({"source":"kylin unattended upgrade"}) - UpdateInfos.update({"status":1}) - UpdateInfos.update({"errorCode":"cache commit error"}) - if res.success and len(res.pkgs) > 0 : - #if res.result_str == "total_install": - # with open(TIME_STAMP,'w') as f: - # f.write(time.time()) - config=configparser.ConfigParser(allow_no_value=True) - config.read(KYLIN_VERSION_FILE) - config.set("SYSTEM","os_version",release) - config.set("SYSTEM","update_version",version) - with open(KYLIN_VERSION_FILE,'w') as f: - config.write(f) - # kylin_system_updater.SetConfigValue("SYSTEM","os_version",release) - # kylin_system_updater.SetConfigValue("SYSTEM","update_version",original_version) - kylin_system_updater.InsertUpgradeHistory(history) - json_file = json.dumps(UpdateInfos.copy()) - kylin_system_updater.DataBackendCollect("UpdateInfos",json_file) - elif not res.success: - errorlist = kylin_system_updater.DumpInstallErrorRecord() - errorlist.append("cache commit error") - errcode = "\n".join(errorlist) - if options.install_only: - history.update({"status":"failed"}) - history.update({"status_cn":"失败"}) - history.update({"errorcode":errcode}) - kylin_system_updater.InsertUpgradeHistory(history) - UpdateInfos.update({"status":0}) - UpdateInfos.update({"errorCode":errcode}) - json_file = json.dumps(UpdateInfos.copy()) - kylin_system_updater.DataBackendCollect("UpdateInfos",json_file) - else: - logging.info("no pkgs to install") - - - if 'PROJECT_CODENAME' in os_release_info: - if os_release_info['PROJECT_CODENAME']=='V10SP1-edu': - if 'SUB_PROJECT_CODENAME' in os_release_info: - if os_release_info['SUB_PROJECT_CODENAME']=='mavis': - localtime = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) - config_to_result = configparser.ConfigParser(allow_no_value=True) - config_to_result.add_section("OTA") - config_to_result.set("OTA","time",localtime) - config_to_result.set("OTA","version","1.0") - config_to_result.set("OTA","upgrade","0") - config_to_result.set("OTA","status","failed") - if res.success: - if options.mode == 'shutdown': - config_to_result.set("OTA","status","success") - if len(res.pkgs) > 0 : - config_to_result.set("OTA","upgrade","1") - if not os.path.exists(OTA_RESULT_FILE_PATH): - os.makedirs(OTA_RESULT_FILE_PATH) - # os.chmod(OTA_RESULT_FILE_PATH,stat.S_IRUSR|stat.S_IWUSR|stat.S_IWGRP|stat.S_IRGRP|stat.S_IWOTH|stat.S_IROTH) - if not os.path.exists(OTA_RESULT_FILE): - f = open(OTA_RESULT_FILE,'w') - f.close() - with open(OTA_RESULT_FILE,"w+") as f: - config_to_result.write(f) - subprocess.Popen("chmod -R 777 %s"%(OTA_RESULT_FILE_PATH),shell=True) - # os.chmod(OTA_RESULT_FILE,stat.S_IRUSR|stat.S_IWUSR|stat.S_IWGRP|stat.S_IRGRP|stat.S_IWOTH|stat.S_IROTH) - # os.chmod(OTA_RESULT_FILE,stat.S_IRWXU|stat.S_IRWXG|stat.S_IRWXO) - + return True + + def UpdateDetect(self): + return self.update_interface.UpdateDetect() + + def DistUpgradeAll(self,is_install): + return self.update_interface.DistUpgradeAll(is_install) + + def RunMainLoop(self): + self.loop.run() + + def QuitMainLoop(self): + self.loop.quit() - # WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - ''' - if res.success and res.result_str: - # complete, successful run - update_kept_pkgs_file(res.pkgs_kept_back, - os.path.join(rootdir, KEPT_PACKAGES_FILE)) - - if res.result_str and not options.dry_run: - # there is some meaningful result which is worth an email - log_content = get_dpkg_log_content(logfile_dpkg, - install_start_time) - - send_summary_mail(res.pkgs, res.success, res.result_str, - res.pkgs_kept_back, res.pkgs_removed, - res.pkgs_kept_installed, mem_log, - log_content) - - if res.update_stamp: - # write timestamp file - write_stamp_file() - if not options.dry_run: - # check if the user wants a reboot - reboot_if_requested_and_needed() - ''' - os.close(shutdown_lock) - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - if res.success: - return 0 - else: - return 1 - - except Exception as e: - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - logging.error(e) - if options.install_only: - reboot_if_needed() - # logger = logging.getLogger() - # logger.exception(_("An error occurred: %s"), e) - # log_content = get_dpkg_log_content(logfile_dpkg, - # install_start_time) - # if not options.dry_run: - # send_summary_mail([""], False, _("An error occurred"), - # None, [], [], mem_log, log_content) - # Re-raise exceptions for apport - # raise - - -def mark_pkgs_to_upgrade(cache, pkgs_to_upgrade): - # type (apt.Cache, List[str]) -> None - for pkg_name in pkgs_to_upgrade: - try: - pkg = cache[pkg_name] - except KeyError: - continue - - if pkg.is_upgradable \ - or (pkg.is_installed - and pkg.candidate.version != pkg.installed.version): - cache.mark_upgrade_adjusted(pkg, - from_user=not pkg.is_auto_installed) - # pkg.mark_upgrade() - - elif not pkg.is_installed: - cache.mark_install_adjusted(pkg, from_user=True) - # pkg.mark_install() - else: - pass - - -def adjust_candidate_with_version(cache,namelistwithversion): - for pkgname in namelistwithversion: - try: - pkg = cache[pkgname[0]] - except KeyError: - continue - for v in pkg.versions: - if v.version == pkgname[1] and is_in_allowed_origin(v,cache.allowed_origins): - logging.info("package:%s , candidate version:%s"%(pkgname[0],pkgname[1])) - pkg.candidate = v - # if str(v) in versionlist: - # pkg.candidate = v - ''' - dep_list = [] - dep = pkg.candidate.get_dependencies("PreDepends") - for d in dep: - dep_list.append(d.or_dependencies[0].name) - dep = pkg.candidate.get_dependencies("Depends") - for d in dep: - dep_list.append(d.or_dependencies[0].name) - package_deps.update({pkg:dep_list}) - ''' - -def run(options, # type: Options - rootdir, # type: str - # mem_log, # type: StringIO - logfile_dpkg, # type: str - # install_start_time, # type: datetime.datetime - ): - # type: (...) -> UnattendedUpgradesResult - - # check if today is a patch day - # if not is_update_day(): - # return UnattendedUpgradesResult(True) - logging.info(_("Starting unattended upgrades script")) - reload_options_config() - # check if u-u should be stopped already - if should_stop(): - return UnattendedUpgradesResult(False) - - #global os_release_info - # check to see if want to auto-upgrade the devel release - ''' - if apt_pkg.config.find("Unattended-Upgrade::DevRelease") == "auto": - try: - if DISTRO_ID.lower() == 'ubuntu': - devel = (distro_info.UbuntuDistroInfo() . - devel(result="object")) - elif DISTRO_ID.lower() == 'debian': - devel = (distro_info.DebianDistroInfo() . - devel(result="object")) - else: - devel = (distro_info.DistroInfo(DISTRO_ID) . - devel(result="object")) - except Exception as e: - logging.warning("Could not figure out development release: %s" % e) - else: - if ((devel.series == DISTRO_CODENAME - and devel.release is not None - and devel.release - date.today() > DEVEL_UNTIL_RELEASE)): - syslog.syslog((_("Not running on this development " - "release before %s") % - (devel.release - DEVEL_UNTIL_RELEASE - - datetime.timedelta(days=1)))) - logging.warning(_("Not running on this development " - "release before %s") % - (devel.release - DEVEL_UNTIL_RELEASE - - datetime.timedelta(days=1))) - return UnattendedUpgradesResult(True) - - logging.debug("Running on the development release") - elif "(development branch)" in DISTRO_DESC and not\ - apt_pkg.config.find_b("Unattended-Upgrade::DevRelease", True): - syslog.syslog(_("Not running on the development release.")) - logging.info(_("Not running on the development release.")) - return UnattendedUpgradesResult(True) - ''' - - #kylin_system_updater = KylinSystemUpdater() - ''' - if kylin_system_updater.GetUnattendedUpgradeValue: - pass - else: - return UnattendedUpgradesResult(False) - kylin_system_updater.ConnectToSignals() - kylin_system_updater.GetWhiteList() - kylin_system_updater.RunMainloop() - ''' - # check and get lock - try: - apt_pkg.pkgsystem_lock() - except SystemError: - logging.error(_("Lock could not be acquired (another package " - "manager running?)")) - #print(_("Cache lock can not be acquired, exiting")) - return UnattendedUpgradesResult( - False, _("Lock could not be acquired")) - - # check if the journal is dirty and if so, take emergceny action - # the alternative is to leave the system potentially unsecure until - # the user comes in and fixes - ''' - if is_dpkg_journal_dirty() and \ - apt_pkg.config.find_b("Unattended-Upgrade::AutoFixInterruptedDpkg", - False): - logging.warning( - _("Unclean dpkg state detected, trying to correct")) - print(_("Unclean dpkg state detected, trying to correct")) - env = copy.copy(os.environ) - env["DPKG_FRONTEND_LOCKED"] = "1" - try: - with Unlocked(): - output = subprocess.check_output( - ["dpkg", "--force-confold", "--configure", "-a"], - env=env, - universal_newlines=True) - except subprocess.CalledProcessError as e: - output = e.output - logging.warning(_("dpkg --configure -a output:\n%s"), output) - ''' - - white_list_with_version = kylin_system_updater.whitelist_with_candidate_version#config_manager.ReadListFromFile(WHITE_LIST_FILE_PATH,'AutoUpgrade','upgradelist') - logging.info("upgrade list from kylin system updater:") - logging.debug(white_list_with_version) - ''' - for w in white_list_with_version: - whitelistwithversion.append('-'.join(w)) - logging.debug("whitelist from kylin system updater:%s"%("\n".join(whitelistwithversion))) - ''' - # namelist = [] - # namelist_with_version = [] - - # get_white_list_with_version(white_list_with_version,namelist_with_version,namelist) - # get a cache - try: - cache = UnattendedUpgradesCache(rootdir=rootdir,whitelist_with_version=white_list_with_version,blacklist=[]) - #cache.whitelist=white_list - except SystemError as error: - # print(_("Apt returned an error, exiting")) - # print(_("error message: %s") % error) - logging.error(_("Apt returned an error, exiting")) - logging.error(_("error message: %s"), error) - return UnattendedUpgradesResult( - False, _("Apt returned an error, exiting")) - ''' - if cache._depcache.broken_count > 0: - print(_("Cache has broken packages, exiting")) - logging.error(_("Cache has broken packages, exiting")) - return UnattendedUpgradesResult( - False, _("Cache has broken packages, exiting")) - ''' - # FIXME: make this into a ContextManager - # be nice when calculating the upgrade as its pretty CPU intensive - ''' - old_priority = os.nice(0) - try: - # Check that we will be able to restore the priority - os.nice(-1) - os.nice(20) - except OSError as e: - if e.errno in (errno.EPERM, errno.EACCES): - pass - else: - raise - ''' - #auto_removable = get_auto_removable(cache) - - - # find out about the packages that are upgradable (in an allowed_origin) - pkgs_to_upgrade = calculate_upgradable_pkgs(cache, options,white_list_with_version) - if options.install_only or options.download_and_install: - # if (len(pkgs_to_upgrade)0: - # logging.warning("there're pkgs to download") - # try: - # apt_pkg.pkgsystem_unlock() - # except SystemError: - # logging.error(_("lock release failed")) - # return UnattendedUpgradesResult(False,_("there're pkgs to download")) - # else: - #only write the pkg list when dpkg journal is clean - # if not is_dpkg_journal_dirty(): - # configfilemanager.WriteListToFile(pkgs,"OTA_PKGS_TO_INSTALL_LIST") - try: - res = fetcher.run() - logging.debug("fetch.run() result: %s", res) - except SystemError as e: - logging.error("fetch.run() result: %s", e) - - # if cache.get_changes(): - # cache.clear() - - pkg_install_success = True - install_result = '' - if len(pkgs_to_upgrade) > 0: - if 'PROJECT_CODENAME' in os_release_info: - if os_release_info['PROJECT_CODENAME']=='V10SP1-edu': - if 'SUB_PROJECT_CODENAME' in os_release_info: - if os_release_info['SUB_PROJECT_CODENAME']=='mavis': - pass - else: - logging.info("need backup") - backup_result = False - backup_result = Backup() - if (backup_result): - pass - else: - logging.debug("backup failed...") - return UnattendedUpgradesResult(False,"backup failed") - - # do install - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","install") - #send install start msg to notify - # if os.path.exists(NOTIFICATION_PIPE): - # with open(NOTIFICATION_PIPE,'w') as p: - # p.write('install start') - with open(PROGRESS_LOG,'w+') as f: - f.write('0') - subprocess.Popen('dbus-send --system --type=signal / com.kylin.install.notification.InstallStart',shell=True) - inhibitshutdownlock.lock() - # if LockedPreventShutdown(): - # pass - # else: - # logging.error("cannot get shutdown lock,exiting...") - # WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - # sys.exit(1) - - logging.debug("InstCount=%i DelCount=%i BrokenCount=%i" - % (cache._depcache.inst_count, - cache._depcache.del_count, - cache._depcache.broken_count)) - logging.info("shutdown safe manager") - - pkg_install_success = do_install(cache, - pkgs, - options, - logfile_dpkg) - logging.info("reset safe manager") - - # unLockedEnableShutdown() - inhibitshutdownlock.unlock() - subprocess.Popen('dbus-send --system --type=signal / com.kylin.install.notification.InstallFinish',shell=True) - if pkg_install_success: - clean_downloaded_packages(fetcher) - kylin_system_updater.CheckRebootRequired("unattended-upgrades") - - logging.debug("pkg number:%d,pkg in whitelist number:%d"%(len(pkgs),len(white_list_with_version))) - if len(pkgs) == len(white_list_with_version): - install_result = "total_install" - else: - install_result = "partial_install" - logging.debug("install result:%s"%install_result) - try: - apt_pkg.pkgsystem_unlock() - except SystemError: - logging.error(_("lock release failed")) - return UnattendedUpgradesResult(pkg_install_success, - install_result, - pkgs) - - elif options.download_only: - - if fetcher_statistics.remote_pkg_amount>0: - pass - else: - logging.info("no pkgs need to download") - #return UnattendedUpgradesResult(True,_("there're no pkgs to download")) - retry_times=10 - if retry_times<0: - retry_times = 1 - while retry_times >0: - try: - res = fetcher.run() - logging.debug("fetch.run() result: %s", res) - except SystemError as e: - logging.error("fetch.run() result: %s", e) - fetcher_statistics.ResetFetcher(fetcher) - fetcher_statistics.GetAquireStatisticsOfPkgs() - logging.debug("incomplete download pkg number:%d"%fetcher_statistics.incomplete_pkg_amount) - retry_times-=1 - if fetcher_statistics.incomplete_pkg_amount >0: - logging.debug("%d incomplete pkgs,%d try times left") - fetcher.shutdown() - try: - pm.get_archives(fetcher, list, recs) - except SystemError as e: - logging.error(_("GetArchives() failed: %s"), e) - else: - break - #fetcher_statistics.ResetFetcher(fetcher) - #fetcher_statistics.GetAquireStatisticsOfPkgs() - insmod = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH,"autoUpgradePolicy","installmode") - if fetcher_statistics.incomplete_pkg_amount == 0 and len(pkgs_to_upgrade) > 0: - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - docker_image_fetch_result = 0 - #docker image fetch for mavis and laika - if os.path.exists("/usr/bin/service_runtime_ota.sh"): - docker_image_fetch_result = subprocess.run(["/usr/bin/service_runtime_ota.sh"], shell=True) - if docker_image_fetch_result.returncode == 0: - logging.info("all pkgs downloaded") - else: - return UnattendedUpgradesResult(False,_("docker fetch failed")) - logging.info("pkg number:%d"%(len(pkgs))) - login_manager.SetExtraInhibitShutdownDelaySec(1800) - configfilemanager.AddFileName("OTA_PKGS_TO_INSTALL") - subprocess.Popen('dbus-send --system --type=signal / com.kylin.update.notification.DownloadFinish', shell=True) - kylin_system_updater.SetConfigValue('InstallMode','auto_install','True') - elif insmod == 'bshutdown': - logging.info("pkg number:%d"%(len(pkgs))) - login_manager.SetExtraInhibitShutdownDelaySec(1800) - configfilemanager.AddFileName("OTA_PKGS_TO_INSTALL") - kylin_system_updater.SetConfigValue('InstallMode','auto_install','True') - elif insmod == 'timing': - pass - else: - pass - try: - apt_pkg.pkgsystem_unlock() - except SystemError: - logging.error(_("lock release failed")) - return UnattendedUpgradesResult(True,_("all pkgs downloaded")) - elif fetcher_statistics.incomplete_pkg_amount > 0 and len(pkgs_to_upgrade) > 0: - try: - apt_pkg.pkgsystem_unlock() - except SystemError: - logging.error(_("lock release failed")) - return UnattendedUpgradesResult(False,_("some pkgs incompletely fetched")) - else: - try: - apt_pkg.pkgsystem_unlock() - except SystemError: - logging.error(_("lock release failed")) - return UnattendedUpgradesResult(True,_("all pkgs downloaded")) - elif options.download_and_install: - if len(pkgs)==0: - logging.info("no pkgs to install") - return UnattendedUpgradesResult(True,_("there're no pkgs to install")) - if fetcher_statistics.remote_pkg_amount>0: - pass - else: - logging.info("no pkgs need to download") - #return UnattendedUpgradesResult(True,_("there're no pkgs to download")) - retry_times=10 - if retry_times<0: - retry_times = 1 - while retry_times >0: - try: - res = fetcher.run() - logging.debug("fetch.run() result: %s", res) - except SystemError as e: - logging.error("fetch.run() result: %s", e) - fetcher_statistics.ResetFetcher(fetcher) - fetcher_statistics.GetAquireStatisticsOfPkgs() - logging.debug("incomplete download pkg number:%d"%fetcher_statistics.incomplete_pkg_amount) - retry_times-=1 - if fetcher_statistics.incomplete_pkg_amount >0: - logging.debug("%d incomplete pkgs,%d try times left") - fetcher.shutdown() - try: - pm.get_archives(fetcher, list, recs) - except SystemError as e: - logging.error(_("GetArchives() failed: %s"), e) - else: - break - pkg_install_success = True - install_result = '' - backup_result = False - backup_result = Backup() - if (backup_result): - pass - else: - logging.debug("backup failed...") - return UnattendedUpgradesResult(False,"backup failed") - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","install") - inhibitshutdownlock.lock() - logging.debug("InstCount=%i DelCount=%i BrokenCount=%i" - % (cache._depcache.inst_count, - cache._depcache.del_count, - cache._depcache.broken_count)) - logging.info("shutdown safe manager") - pkg_install_success = do_install(cache, - pkgs, - options, - logfile_dpkg) - logging.info("reset safe manager") - # unLockedEnableShutdown() - inhibitshutdownlock.unlock() - subprocess.Popen('dbus-send --system --type=signal / com.kylin.install.notification.InstallFinish',shell=True) - if pkg_install_success: - clean_downloaded_packages(fetcher) - kylin_system_updater.CheckRebootRequired("unattended-upgrades") - logging.debug("pkg number:%d,pkg in whitelist number:%d"%(len(pkgs),len(white_list_with_version))) - if len(pkgs) == len(white_list_with_version): - install_result = "total_install" - else: - install_result = "partial_install" - logging.debug("install result:%s"%install_result) - try: - apt_pkg.pkgsystem_unlock() - except SystemError: - logging.error(_("lock release failed")) - return UnattendedUpgradesResult(pkg_install_success,install_result,pkgs) - else: - try: - apt_pkg.pkgsystem_unlock() - except SystemError: - logging.error(_("lock release failed")) - logging.debug(_("option is not install-only or download-only")) - try: - res = fetcher.run() - logging.debug("fetch.run() result: %s", res) - except SystemError as e: - logging.error("fetch.run() result: %s", e) - return UnattendedUpgradesResult(False,_("option is not install-only or download-only")) - - - ''' - pkg_conffile_prompt = False - if dpkg_conffile_prompt(): - # now check the downloaded debs for conffile conflicts and build - # a blacklist - conffile_blacklist = [] # type: List[str] - for item in fetcher.items: - logging.debug("%s" % item) - if item.status == item.STAT_ERROR: - print(_("An error occurred: %s") % item.error_text) - logging.error(_("An error occurred: %s"), item.error_text) - if not item.complete: - print(_("The URI %s failed to download, aborting") % - item.desc_uri) - logging.error(_("The URI %s failed to download, aborting"), - item.desc_uri) - return UnattendedUpgradesResult( - False, (_("The URI %s failed to download, aborting") % - item.desc_uri)) - if not os.path.exists(item.destfile): - print(_("Download finished, but file %s not there?!?") % - item.destfile) - logging.error("Download finished, but file %s not " - "there?!?", item.destfile) - return UnattendedUpgradesResult( - False, (_("Download finished, but file %s not there?!?") % - item.destfile)) - if not item.is_trusted and not apt_pkg.config.find_b( - "APT::Get::AllowUnauthenticated", False): - logging.debug("%s is blacklisted because it is not trusted") - pkg_name = pkgname_from_deb(item.destfile) - if not is_pkgname_in_blacklist(pkg_name, cache.blacklist): - conffile_blacklist.append("%s$" % re.escape(pkg_name)) - if not is_deb(item.destfile): - logging.debug("%s is not a .deb file" % item) - continue - if conffile_prompt(item.destfile): - # skip package (means to re-run the whole marking again - # and making sure that the package will not be pulled in by - # some other package again!) - # - # print to stdout to ensure that this message is part of - # the cron mail (only if no summary mail is requested) - email = apt_pkg.config.find("Unattended-Upgrade::Mail", "") - if not email: - print(_("Package %s has conffile prompt and needs " - "to be upgraded manually") % - pkgname_from_deb(item.destfile)) - # log to the logfile - logging.warning(_("Package %s has conffile prompt and " - "needs to be upgraded manually"), - pkgname_from_deb(item.destfile)) - pkg_name = pkgname_from_deb(item.destfile) - if not is_pkgname_in_blacklist(pkg_name, cache.blacklist): - conffile_blacklist.append("%s$" % re.escape(pkg_name)) - pkg_conffile_prompt = True - - # redo the selection about the packages to upgrade based on the new - # blacklist - logging.debug("Packages blacklist due to conffile prompts: %s" - % conffile_blacklist) - # find out about the packages that are upgradable (in a allowed_origin) - if len(conffile_blacklist) > 0: - for regex in conffile_blacklist: - cache.blacklist.append(regex) - cache.apply_pinning(cache.pinning_from_regex_list( - conffile_blacklist, NEVER_PIN)) # type: ignore - old_pkgs_to_upgrade = pkgs_to_upgrade[:] - pkgs_to_upgrade = [] - for pkg in old_pkgs_to_upgrade: - logging.debug("Checking the black and whitelist: %s" % - (pkg.name)) - cache.mark_upgrade_adjusted( - pkg, from_user=not pkg.is_auto_installed) - if check_changes_for_sanity(cache): - pkgs_to_upgrade.append(pkg) - else: - logging.info(_("package %s not upgraded"), pkg.name) - cache.clear() - for pkg2 in pkgs_to_upgrade: - cache.call_adjusted( - apt.package.Package.mark_upgrade, pkg2, - from_user=not pkg2.is_auto_installed) - if cache.get_changes(): - cache.clear() - - else: - logging.debug("dpkg is configured not to cause conffile prompts") - - # auto-removals - kernel_pkgs_remove_success = True # type: bool - kernel_pkgs_removed = [] # type: List[str] - kernel_pkgs_kept_installed = [] # type: List[str] - if (auto_removable and apt_pkg.config.find_b( - "Unattended-Upgrade::Remove-Unused-Kernel-Packages", True)): - # remove unused kernels before installing new ones because the newly - # installed ones may fill up /boot and break the system right before - # removing old ones could take place - # - # this step may also remove _auto-removable_ reverse dependencies - # of kernel packages - auto_removable_kernel_pkgs = { - p for p in auto_removable - if (cache.versioned_kernel_pkgs_regexp - and cache.versioned_kernel_pkgs_regexp.match(p) - and not cache.running_kernel_pkgs_regexp.match(p))} - if auto_removable_kernel_pkgs: - logging.info(_("Removing unused kernel packages: %s"), - " ".join(auto_removable_kernel_pkgs)) - (kernel_pkgs_remove_success, - kernel_pkgs_removed, - kernel_pkgs_kept_installed) = do_auto_remove( - cache, auto_removable_kernel_pkgs, logfile_dpkg, - options.minimal_upgrade_steps, - options.verbose or options.debug, options.dry_run) - auto_removable = get_auto_removable(cache) - - previous_autoremovals = auto_removable - if apt_pkg.config.find_b( - "Unattended-Upgrade::Remove-Unused-Dependencies", False): - pending_autoremovals = previous_autoremovals - else: - pending_autoremovals = set() - - # exit if there is nothing to do and nothing to report - if (len(pending_autoremovals) == 0 - and len(pkgs_to_upgrade) == 0): - logging.info(_("No packages found that can be upgraded unattended " - "and no pending auto-removals")) - - pkgs_kept_back = cache.find_kept_packages(options.dry_run) - return UnattendedUpgradesResult( - kernel_pkgs_remove_success, - _("No packages found that can be upgraded unattended and no " - "pending auto-removals"), - pkgs_removed=kernel_pkgs_removed, - pkgs_kept_back=pkgs_kept_back, - pkgs_kept_installed=kernel_pkgs_kept_installed, - update_stamp=True) - - # check if its configured for install on shutdown, if so, the - # environment UNATTENDED_UPGRADES_FORCE_INSTALL_ON_SHUTDOWN will - # be set by the unatteded-upgrades-shutdown script - if ("UNATTENDED_UPGRADES_FORCE_INSTALL_ON_SHUTDOWN" not in os.environ - and apt_pkg.config.find_b( - "Unattended-Upgrade::InstallOnShutdown", False)): - logger = logging.getLogger() - logger.debug("Configured to install on shutdown, so exiting now") - return UnattendedUpgradesResult(True) - - # check if we are in dry-run mode - if options.dry_run: - logging.info("Option --dry-run given, *not* performing real actions") - apt_pkg.config.set("Debug::pkgDPkgPM", "1") - - # do the install based on the new list of pkgs - pkgs = [pkg.name for pkg in pkgs_to_upgrade] - logging.info(_("Packages that will be upgraded: %s"), " ".join(pkgs)) - - # only perform install step if we actually have packages to install - pkg_install_success = True - if len(pkgs_to_upgrade) > 0: - if 'PROJECT_CODENAME' in os_release_info: - if os_release_info['PROJECT_CODENAME']=='V10SP1-edu': - if 'SUB_PROJECT_CODENAME' in os_release_info: - if os_release_info['SUB_PROJECT_CODENAME']=='mavis': - pass - else: - Backup() - # do install - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","install") - #send install start msg to notify - # if os.path.exists(NOTIFICATION_PIPE): - # with open(NOTIFICATION_PIPE,'w') as p: - # p.write('install start') - with open(PROGRESS_LOG,'w+') as f: - f.write('0') - subprocess.Popen('dbus-send --system --type=signal / com.kylin.install.notification.InstallStart',shell=True) - if LockedPreventShutdown(): - pass - else: - logging.error("cannot get shutdown lock,exiting...") - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - sys.exit(1) - - logging.debug("InstCount=%i DelCount=%i BrokenCount=%i" - % (cache._depcache.inst_count, - cache._depcache.del_count, - cache._depcache.broken_count)) - - pkg_install_success = do_install(cache, - pkgs, - options, - logfile_dpkg) - unLockedEnableShutdown() - subprocess.Popen('dbus-send --system --type=signal / com.kylin.install.notification.InstallFinish',shell=True) - if pkg_install_success: - kylin_system_updater.CheckRebootRequired("unattended-upgrades") - # Was the overall run succesful: only if everything installed - # fine and nothing was held back because of a conffile prompt. - # successful_run = (kernel_pkgs_remove_success and pkg_install_success - # and not pkg_conffile_prompt) - #send install finish msg to notify - # if successful_run and os.path.exists(NOTIFICATION_PIPE): - # with open(NOTIFICATION_PIPE,'w') as p: - # p.write('install finish') - # now check if any auto-removing needs to be done - - if cache._depcache.broken_count > 0: - print(_("Cache has broken packages, exiting")) - logging.error(_("Cache has broken packages, exiting")) - return UnattendedUpgradesResult( - False, _("Cache has broken packages, exiting"), pkgs=pkgs) - - # make sure we start autoremovals with a clear cache - # if cache.get_changes(): - # cache.clear() - - - # the user wants *all* auto-removals to be removed - # (unless u-u got signalled to stop gracefully quickly) - pkgs_removed = [] # type: List[str] - pkgs_kept_installed = [] # type: List[str] - if ((apt_pkg.config.find_b( - "Unattended-Upgrade::Remove-Unused-Dependencies", False) - and not SIGNAL_STOP_REQUEST)): - auto_removals = get_auto_removable(cache) - (pkg_remove_success, - pkgs_removed, - pkgs_kept_installed) = do_auto_remove( - cache, auto_removals, logfile_dpkg, options.minimal_upgrade_steps, - options.verbose or options.debug, - options.dry_run) - successful_run = successful_run and pkg_remove_success - # the user wants *only new* auto-removals to be removed - elif apt_pkg.config.find_b( - "Unattended-Upgrade::Remove-New-Unused-Dependencies", False): - # calculate the new auto-removals - new_pending_autoremovals = get_auto_removable(cache) - auto_removals = new_pending_autoremovals - previous_autoremovals - (pkg_remove_success, - pkgs_removed, - pkgs_kept_installed) = do_auto_remove( - cache, auto_removals, logfile_dpkg, options.minimal_upgrade_steps, - options.verbose or options.debug, - options.dry_run) - successful_run = successful_run and pkg_remove_success - - logging.debug("InstCount=%i DelCount=%i BrokenCount=%i" - % (cache._depcache.inst_count, - cache._depcache.del_count, - cache._depcache.broken_count)) - - clean after success install (if needed) - keep_key = "Unattended-Upgrade::Keep-Debs-After-Install" - - if (not apt_pkg.config.find_b(keep_key, False) - and not options.dry_run - and pkg_install_success): - clean_downloaded_packages(fetcher) - - pkgs_kept_back = cache.find_kept_packages(options.dry_run) - return UnattendedUpgradesResult( - successful_run, _("All upgrades installed"), pkgs, - pkgs_kept_back, - kernel_pkgs_removed + pkgs_removed, - kernel_pkgs_kept_installed + pkgs_kept_installed, - update_stamp=True) - - install_result = '' - logging.debug("pkg number:%d,pkg in whitelist number:%d"%(len(pkgs),len(namelist_with_version))) - if len(pkgs) == len(namelist_with_version): - install_result = "total_install" - else: - install_result = "partial_install" - logging.debug("install result:%s"%install_result) - return UnattendedUpgradesResult(pkg_install_success, - install_result, - pkgs) - ''' - -class Options: - def __init__(self): - self.download_only = False - self.install_only = False - self.download_and_install = False - self.dry_run = False - self.debug = False - self.apt_debug = False - self.verbose = False - self.minimal_upgrade_steps = False - self.mode = None - - -shutdown_lock = -1 - if __name__ == "__main__": - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - # lock for the shutdown check - shutdown_lock = apt_pkg.get_lock(LOCK_FILE) - if shutdown_lock < 0: - logging.error("Lock file is already taken, exiting") - #WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - sys.exit(1) - localesApp = "unattended-upgrades" - localesDir = "/usr/share/locale" - gettext.bindtextdomain(localesApp, localesDir) - gettext.textdomain(localesApp) - - # set debconf to NON_INTERACTIVE - os.environ["DEBIAN_FRONTEND"] = "noninteractive" - apt_pkg.init_config() - #remove sources in sources.list.d from indexes search - apt_pkg.config.set("Dir::Etc::sourceparts", "") - # this ensures the commandline is logged in /var/log/apt/history.log - apt_pkg.config.set("Commandline::AsString", " ".join(sys.argv)) - - # COMPAT with the mispelling - # minimal_steps_default = ( - # apt_pkg.config.find_b("Unattended-Upgrades::MinimalSteps", False) - # and apt_pkg.config.find_b("Unattended-Upgrade::MinimalSteps", False)) - minimal_steps_default = False - # init the options + gettext.bindtextdomain("unattended-upgrades","/usr/share/locale") + gettext.textdomain("unattended-upgrades") parser = OptionParser() - parser.add_option("-d", "--debug", - action="store_true", - default=True, - # default=apt_pkg.config.find_b( - # "Unattended-Upgrade::Debug", False), - help=_("print debug messages")) - parser.add_option("", "--apt-debug", - action="store_true", default=False, - help=_("make apt/libapt print verbose debug messages")) - parser.add_option("-v", "--verbose", - action="store_true", - default=apt_pkg.config.find_b( - "Unattended-Upgrade::Verbose", False), - help=_("print info messages")) - parser.add_option("", "--dry-run", - action="store_true", default=False, - help=_("Simulation, download but do not install")) parser.add_option("", "--download-only", - action="store_true", default=False, - help=_("Only download, do not even try to install.")) + action="store_true", dest="download_only", + default=False,help="only download without install") parser.add_option("", "--install-only", - action="store_true", default=False, - help=_("Only install, do not even try to download.")) - parser.add_option("", "--download-and-install", - action="store_true", default=False, - help=_("Download and Install.")) - parser.add_option("", "--minimal-upgrade-steps", - action="store_true", default=minimal_steps_default, - help=_("Upgrade in minimal steps (and allow " - "interrupting with SIGTERM) (default)")) - parser.add_option("", "--no-minimal-upgrade-steps", - action="store_false", default=minimal_steps_default, - dest="minimal_upgrade_steps", - help=_("Upgrade all packages together instead of in " - "smaller sets")) - parser.add_option("", "--minimal_upgrade_steps", - action="store_true", - help=SUPPRESS_HELP, - default=minimal_steps_default) - parser.add_option(""''"","--mode",action = "store",type = "string" , - dest = "mode",help="start mode.") - options = cast(Options, (parser.parse_args())[0]) - - if os.getuid() != 0: - print(_("You need to be root to run this application")) - sys.exit(1) - - # ensure that we are not killed when the terminal goes away e.g. on - # shutdown - signal.signal(signal.SIGHUP, signal.SIG_IGN) - - # setup signal handler for graceful stopping - signal.signal(signal.SIGTERM, signal_handler) - - signal.signal(signal.SIGINT,signal_handler_int) - - signal.signal(signal.SIGUSR1,signal_handler_usr1) - # write pid to let other processes find this one - - # pidf = os.path.join(apt_pkg.config.find_dir("Dir"), - # "var", "run", "unattended-upgrades.pid") - # clean up pid file on exit - with open(PID_FILE, "w") as fp: - fp.write("%s" % os.getpid()) - atexit.register(os.remove, PID_FILE) - #setup log dir - # logdir = os.path.join("var", "log", "kylin-unattended-upgrades") - logdir = "/var/log/kylin-unattended-upgrades" - if not os.path.exists(logdir): - os.makedirs(logdir) - #setup log - logfile = os.path.join(logdir, 'unattended-upgrades.log') - if not os.path.exists(logfile): - with open(logfile, 'w'): - pass - logfile_fd = open(logfile,'a+') - #setup dpkg log - logfile_dpkg = os.path.join(logdir, 'unattended-upgrades-dpkg.log') - if not os.path.exists(logfile_dpkg): - with open(logfile_dpkg, 'w'): - pass - - # setup logging - # _setup_logging(options,logfile) - logging.basicConfig(format='%(asctime)s-%(name)s-%(levelname)s-%(message)s',level=logging.DEBUG,filename=logfile) - - # logging.basicConfig() - # file_handler = logging.FileHandler(filename=logfile) - stdout_handler = logging.StreamHandler(sys.stdout) - stdout_handler.setLevel(logging.INFO) - # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') - # file_handler.setFormatter(formatter) - - logger=logging.getLogger() - logger.setLevel(logging.DEBUG) - # logger.addHandler(file_handler) - logger.addHandler(stdout_handler) - #get os release info - os_release_info = ReadOsRelease('/etc/os-release') - #print(os_release_info) - config_manager = ConfigFileManager(CONFIG_FILE_ROOT_PATH) - login_manager = LoginManager() - kylin_system_updater = KylinSystemUpdater() - - ''' - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - pass - else: - allow_autoupdate = kylin_system_updater.GetDatabaseInfo("display","autoupdate_allow") - if allow_autoupdate == "true": - pass - else: - logging.info("auto upgrade not allow, exit") - sys.exit(0) - ''' - #check control center lock - ''' - if os.path.exists(CONTROL_PANEL_LOCK_FILE): - file_lock = FILE_LOCK(CONTROL_PANEL_LOCK_FILE) - if file_lock.get_lock(): - logging.debug("control center not running") - file_lock.unlock() - else: - logging.warning("control center running ,exiting...") - sys.exit(1) - ''' - # package_deps = {} - kylin_system_updater.ConnectToSignals() - kylin_system_updater.UpdateDetect() - kylin_system_updater.RunMainloop() - inhibitshutdownlock = InhibitShutdownLock() - if options.download_only: - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","download") + action="store_true", dest="install_only", + default=False,help="only install without download") + (options, args) = parser.parse_args() + print(options,args) + systemupdater = SystemUpdater() + if options.download_only: + systemupdater.download() elif options.install_only: - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","preinstall") - elif options.download_and_install: - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","preinstall") - - # run the main code - install_start_time = datetime.datetime.now().replace(microsecond=0) - logging.info("unattended-upgrades start time:%s"%install_start_time) - # get log - ''' - dpkg_journal_dirty = is_dpkg_journal_dirty() - abnormal_pkg_count = get_abnormally_installed_pkg_count() - logging.info("abnormal pkg count:%s,dpkg dirty:%s"%(abnormal_pkg_count,dpkg_journal_dirty)) - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - if dpkg_journal_dirty or abnormal_pkg_count != '0': - ret = subprocess.run("dpkg --configure -a",shell=True,stdout=open(logfile,'a+'),stderr=open(logfile,'a+')) - logging.info("dpkg fix return :%s"%ret.returncode) - ''' - - sys.exit(main(options)) + systemupdater.install() + else: + pass \ No newline at end of file diff --git a/unattended-upgrades/kylin-unattended-upgrade-shutdown b/unattended-upgrades/kylin-unattended-upgrade-shutdown index 8a3f55a..5fffe1e 100644 --- a/unattended-upgrades/kylin-unattended-upgrade-shutdown +++ b/unattended-upgrades/kylin-unattended-upgrade-shutdown @@ -25,139 +25,51 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # -import copy -# from multiprocessing.connection import wait -# from concurrent.futures import ThreadPoolExecutor -# from stat import filemode +from cmath import atan +from time import timezone import dbus import signal -import sys -import time import datetime import logging - -import gettext -import subprocess +import logging.config import os.path import os +import sys +import getpass import configparser -import psutil -# for dbus signal handling -try: - from dbus.mainloop.glib import DBusGMainLoop - from gi.repository import GLib -except ImportError: - pass - +from dbus.mainloop.glib import DBusGMainLoop +from gi.repository import GLib from optparse import OptionParser, Values -Values # pyflakes -from gettext import gettext as _, install -from threading import Event -from enum import IntEnum, Enum -from apscheduler.schedulers.blocking import BlockingScheduler +from threading import Event from apscheduler.schedulers.background import BackgroundScheduler +from apscheduler.jobstores.memory import MemoryJobStore +from apscheduler.executors.pool import ThreadPoolExecutor,ProcessPoolExecutor +from apt.progress.base import AcquireProgress import random -import threading -import re - -try: - import apt_pkg -except Exception: - # if there is no python-apt no unattended-upgrades can run so not - # need to stop the shutdown - logging.exception("importing of apt_pkg failed, exiting") - sys.exit(0) - -# progress information is written here -PROGRESS_LOG = "/var/run/unattended-upgrades.progress" -PID_FILE = "/var/run/unattended-upgrades.pid" -LOCK_FILE = "/var/run/kylin-unattended-upgrades.lock" -PKGS_TO_INSTALL_FLAG_FILE="/var/lib/unattended-upgrades/OTA_PKGS_TO_INSTALL" -TIME_STAMP = "/var/lib/unattended-upgrades/unattended-upgrades-timestamp" -OTA_PKGS_TO_INSTALL_LIST="/var/lib/unattended-upgrades/ota_pkgs_to_install_list" - -## analytic unattended-upgrades-policy.conf start -POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY = "autoUpgradePolicy" -AUTO_UPGRADE_POLICY_OPTION_PREDOWNLOAD = "preDownload" -AUTO_UPGRADE_POLICY_OPTION_AUTOUPGRADE = "autoUpgradeState" -AUTO_UPGRADE_POLICY_OPTION_DOWNLOAD_MODE = "downloadMode" -AUTO_UPGRADE_POLICY_OPTION_DOWNLOAD_TIME = "downloadTime" -AUTO_UPGRADE_POLICY_OPTION_INSTALL_MODE = "installMode" -AUTO_UPGRADE_POLICY_OPTION_UPGRADE_INTERVAL = "upgradeInverval" -INTERVAL_DOWN_INSTALL = 120 # 下载安装的间隔 分钟 -INSTALL_RANDOM = 5 # 安装时间随机数范围0-INSTALL_RANDOM 分钟 -DOWNLOAD_RANDOM = 180 # 下载时间随机数范围0-DOWNLOAD_RANDOM 分钟 - -class FeatureSwitch(Enum): - ON = 'on' - OFF = 'off' - -class DownloadMode(Enum): # 下载模式 - TIMING_DOWNLOAD = 'timing' # 定时下载 - MANUAL_DOWNLOAD = 'manual' # 手动下载 - -class InstallMode(Enum): # 安装模式 - TIMING_INSTALL = 'timing' # 定时安装 - MANUAL_INSTALL = 'manual' # 手动安装 - BEFORE_SHUTDOWN_INSTALL = 'bshutdown' # 关机前安装 - -class TimeElement(IntEnum): - TIME_HOUR = 0 - TIME_MINUTE = 1 - TIME_NUM = 2 - -## analytic unattended-upgrades-policy.conf end - +import subprocess +# from pytz import utc +from gettext import gettext as _ +import gettext +import apt +import apt_pkg +import json +#deprecated UNATTENDED_UPGRADE_CONFIG_FILE_PATH="/var/lib/unattended-upgrades/unattended-upgrade.conf" -UNATTENDED_UPGRADE_POLICY_FILE_PATH="/var/lib/unattended-upgrades/unattended-upgrades-policy.conf" -NOTIFICATION_PIPE = '/tmp/notification.pipe' UNATTENDED_UPGRADE_TIMESTAMP = "/var/lib/unattended-upgrades/unattended-upgrades-timestamp" -flag_file_list = ["/var/lib/unattended-upgrades/OTA_PKGS_TO_INSTALL",\ - "/var/lib/kylin-auto-upgrade/kylin-autoupgrade.conf","/tmp/notify.pid"] -def _setup_logging(options,logfile): - # ensure this is run only once - if len(logging.root.handlers) > 0: - return None - - # init the logging - # logdir = _get_logdir() - # logfile = os.path.join( - # logdir, - # apt_pkg.config.find( - # "Unattended-Upgrade::LogFile", - # # COMPAT only - # apt_pkg.config.find("APT::UnattendedUpgrades::LogFile", - # "unattended-upgrades.log"))) - # if not options.dry_run and not os.path.exists(logdir): - # os.makedirs(logdir) - - logging.basicConfig(level=logging.INFO, - format='%(asctime)s %(levelname)s %(message)s', - filename=logfile) - - # additional logging - logger = logging.getLogger() - # mem_log = StringIO() - # if options.apt_debug: - # apt_pkg.config.set("Debug::pkgProblemResolver", "1") - # apt_pkg.config.set("Debug::pkgDepCache::AutoInstall", "1") - if options.debug: - logger.setLevel(logging.DEBUG) - # stdout_handler = logging.StreamHandler(sys.stdout) - # logger.addHandler(stdout_handler) - elif options.verbose: - logger.setLevel(logging.INFO) - logging.getLogger('apscheduler').setLevel(logging.DEBUG) - stdout_handler = logging.StreamHandler(sys.stdout) - logger.addHandler(stdout_handler) - +UNATTENDED_UPGRADE_POLICY_FILE_PATH="/var/lib/unattended-upgrades/unattended-upgrades-policy.conf" +LOG_PATH = "/var/log/kylin-unattended-upgrades/unattended-upgrades-shutdown.log" +TIMESTAMP_PATH="/var/lib/kylin-software-properties/template/kylin-source-status" +ACTION_INSTALL = 1 +ACTION_CHECK_RESOLVER = 3 +ACTION_DOWNLOADONLY = 4 + def get_random_time(time_interval): + now = datetime.datetime.now() try: start_time = datetime.datetime.strptime(time_interval.split("-")[0],"%H:%M") end_time = datetime.datetime.strptime(time_interval.split("-")[1],"%H:%M") - now = datetime.datetime.now() start=datetime.datetime(now.year,now.month,now.day,start_time.hour,start_time.minute,0,0) end=datetime.datetime(now.year,now.month,now.day,end_time.hour,end_time.minute,0,0) time_diff = int((end-start).total_seconds()) @@ -165,100 +77,22 @@ def get_random_time(time_interval): time_diff=time_diff+86400 delta = random.randint(0,time_diff) actual_time = start+datetime.timedelta(seconds=delta) + time_diff = int((actual_time - now).total_seconds()) + if time_diff<0: + return actual_time+datetime.timedelta(seconds=86400) return actual_time except Exception as e: - logging.error(e) - - -def reload_options_config(): - #添加默认保留旧配置 - apt_pkg.config["DPkg::Options::"] = "--force-confold" - options_new = list(set(apt_pkg.config.value_list("DPkg::Options"))) - for option in ("--force-confnew","--force-confdef"): - if option in options_new: - options_new.remove(option) - #清除所有配置重新加载 - apt_pkg.config.clear("DPkg::Options") - for option in options_new: - apt_pkg.config["DPkg::Options::"] = option - #去除安装推荐和建议的软件包 - if apt_pkg.config.find_b("APT::Install-Recommends",False) == True: - apt_pkg.config.clear("APT::Install-Recommends") - if apt_pkg.config.find_b("APT::Install-Suggests",False) == True: - apt_pkg.config.clear("APT::Install-Suggests") - if apt_pkg.config.find("Dir::Etc::sourceparts","")!="": - apt_pkg.config["Dir::Etc::sourceparts"]="" - apt_pkg.init_system() - -def get_mavis_capacity(): - batterycapacity = 100 - try: - with open("/sys/class/power_supply/BAT1/capacity", 'r') as f: - batterycapacity = int(f.readline()) - except: - logging.error("no battery device") - return batterycapacity - - -def is_dpkg_journal_dirty(): - # type: () -> bool - """ - Return True if the dpkg journal is dirty - (similar to debSystem::CheckUpdates) - """ - logging.debug("checking whether dpkg journal is dirty") - d = os.path.join("/var/lib/dpkg/", - #os.path.dirname(apt_pkg.config.find_file("Dir::State::status")), - "updates") - for f in os.listdir(d): - if re.match("[0-9]+", f) or re.match("tmp.i",f): - return True - return False - -def get_abnormally_installed_pkg_count(): - output = subprocess.check_output('dpkg -l|grep ^i[^i]|wc -l',shell=True) - return output.decode().strip() - - -def GetDateTime(): - return datetime.datetime.now().replace(microsecond=0) - -def ReadOsRelease(file): - osreleasedict = {} - try: - with open(file) as f: - lines = f.readlines() - for line in lines: - ls = line.strip().split('=',1) - osreleasedict.update({ls[0]:ls[1].strip('"')}) - except Exception as e: - pass - if 'PROJECT_CODENAME' not in osreleasedict.keys(): - osreleasedict.update({'PROJECT_CODENAME':''}) - if 'SUB_PROJECT_CODENAME' not in osreleasedict.keys(): - osreleasedict.update({'SUB_PROJECT_CODENAME':''}) - return osreleasedict - -def FindRuningUnattendedUpgrades(): - if os.path.exists(PID_FILE): - pid = open(PID_FILE).readline().strip() - logging.info("runing unattended-upgrades pid:%s"%pid) - try: - ps = psutil.Process(int(pid)) - logging.debug("process name:%s,process status:%s"%(ps.name(),ps.status())) - return ps.is_running() - except Exception as e: - logging.error(e) - return False + logging.error(_("illegal time format:%s")%e) + return now+datetime.timedelta(seconds=random.randint(10,86400)) def ReadValueFromFile(file,section,option): config=configparser.ConfigParser(allow_no_value=True) config.optionxform = str - value = '' try: config.read(file) value = config[section][option] except Exception as e: + logging.error(_("read config file error:%s")%e) return '' return value @@ -268,246 +102,75 @@ def WriteValueToFile(file,section,option,value): config.add_section(section) config.set(section,option,value) config.write(open(file,"w")) - -def clean_flag_files(filelist): - WriteValueToFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status","idle") - for file in filelist: - if os.path.exists(file): - os.remove(file) -def init(): - if not os.path.exists(NOTIFICATION_PIPE): - os.mkfifo(NOTIFICATION_PIPE) -''' -def do_usplash(msg): - # type: (str) -> None - if os.path.exists("/sbin/usplash_write"): - logging.debug("Running usplash_write") - subprocess.call(["/sbin/usplash_write", "TEXT", msg]) - subprocess.call(["/sbin/usplash_write", "PULSATE"]) -''' - -def do_plymouth(msg): - # type: (str) -> None - if os.path.exists("/bin/plymouth"): - for line in msg.split("\n"): - logging.debug("Running plymouth --text") - subprocess.call(["/bin/plymouth", "message", "--text", line]) - -def do_plymouth_splash(): - if os.path.exists("/bin/plymouth"): - logging.debug("Running plymouth --splash") - subprocess.run(["/sbin/plymouthd", "--mode=shutdown","--attach-to-session"]) - #subprocess.run(["/sbin/plymouthd", "--mode=update","--attach-to-session"]) - #subprocess.run(["/bin/plymouth","--update=kylin update"]) - subprocess.Popen(["/bin/plymouth", "show-splash","--wait"]) - subprocess.call(["/bin/plymouth","system-update","--progress=0"]) - -def log_msg(msg, level=logging.WARN): - # type: (str, int) -> None - """ helper that will print msg to usplash, plymouth, console """ - logging.log(level, msg) - #do_plymouth(msg) - #do_usplash(msg) - - -def log_progress(): - # type: () -> None - """ helper to log the install progress (if any) """ - # wait a some seconds and try again - ''' - msg = _("Unattended-upgrade in progress during shutdown, " - "please don't turn off the computer") - ''' - # progress info - progress_file = PROGRESS_LOG - if os.path.exists(progress_file): - progress_text = open(progress_file).read() - logging.debug("progress text content %s"%progress_text) - if len(progress_text): - progress = int(float(progress_text)) - subprocess.call(["/bin/plymouth","system-update","--progress=%d"%progress]) - msg = "upgrage progress %s"%progress_text - log_msg(msg) - + def signal_term_handler(signal,frame): # type: (int, object) -> None logging.warning("SIGTERM received, will stop") os._exit(1) -def signal_stop_unattended_upgrade(): - """ send SIGTERM to running unattended-upgrade if there is any """ - pidfile = PID_FILE#"/var/run/unattended-upgrades.pid" - if os.path.exists(pidfile): - pid = int(open(pidfile).read()) - logging.debug("found running unattended-upgrades pid %s" % pid) - try: - os.kill(pid, signal.SIGTERM) - except ProcessLookupError: - logging.debug("sending SIGTERM failed because unattended-upgrades " - "already stopped") - -def exit_log_result(success): - if os.path.exists(PKGS_TO_INSTALL_FLAG_FILE): - os.remove(PKGS_TO_INSTALL_FLAG_FILE) - subprocess.call(["/bin/plymouth","system-update","--progress=100"]) - time.sleep(3) - subprocess.run(["/bin/plymouth","quit","--retain-splash"]) - if success: - logging.debug("All upgrades installed") - #log_msg(_("All upgrades installed"), logging.INFO) - os._exit(0) - #sys.exit(0) +def Predownload(): + logging.info(_("predownload task start")) + if unattended_upgrades_shutdown.Download(): + now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + unattended_upgrades_shutdown.update_timestamp('predownload',now) + return 0 else: - log_msg(_("Unattended-upgrades stopped. There may be upgrades" - " left to be installed in the next run."), logging.INFO) - os._exit(1) - #sys.exit(1) - -# 检查时间安全性 -- minute > 59; hour > 23; -def check_time_safety(inTime): - if inTime['m'] > 59 : - inTime['h'] = inTime['h'] + inTime['m']//60 - inTime['m'] = inTime['m']%60 - - if inTime['h'] > 23 : - inTime['h'] = inTime['h'] - 24 - - outTime = inTime - return outTime - -# 时间添加随机数 -def convert_time_by_random(inTime, inRandom): - diff = random.randint(0,inRandom) - inTime['h']=inTime['h'] + diff // 60 - inTime['m']=inTime['m'] + diff % 60 - outTime = check_time_safety(inTime) - return outTime - -class TimerThread(threading.Thread): - def __init__(self, scheduler): - threading.Thread.__init__(self) - self.scheduler = scheduler - def run(self): - self.scheduler.start() - -def empty_task(): - pass + return 1 -def task(task): - env = copy.copy(os.environ) - last_run_time = '2022-01-01 00:00:00' - config=configparser.ConfigParser(allow_no_value=True) - config.optionxform = str - config.read(UNATTENDED_UPGRADE_TIMESTAMP) - last_run_time = config['timestamp'][task] - logging.debug("%s timestamp:%s,"%(task,last_run_time)) - last_run_date = datetime.datetime.strptime(last_run_time,"%Y-%m-%d %H:%M:%S") - now = datetime.datetime.now() - duration = (now - last_run_date).days - update_interval = int(autoupgradepolicy.GetOptionValue('updateDays')) - cmd = "date" - if task in ["predownload","download"]: - cmd = "kylin-unattended-upgrade --download-only" - elif task == "install": - cmd = "kylin-unattended-upgrade --install-only --mode=timing" - elif task == "download_and_install": - cmd = "kylin-unattended-upgrade --download-and-install" - #do not check updgrade period when download and install - update_interval = 0 +def Download(): + logging.info(_("download task start")) + if unattended_upgrades_shutdown.Download(): + now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + unattended_upgrades_shutdown.update_timestamp('download',now) + return 0 else: - pass - if duration < update_interval: - logging.info("not long enough from last run") - return - ret = subprocess.run([cmd], shell=True,env=env) - logging.debug("task:%s return code:%d"%(task,ret.returncode)) - if ret.returncode == 0: - now = datetime.datetime.now() - config['timestamp'][task] = now.strftime("%Y-%m-%d %H:%M:%S") - with open(UNATTENDED_UPGRADE_TIMESTAMP,"w") as f: - config.write(f) - logging.info("successful run,new time stamp:%s"%now.strftime("%Y-%m-%d %H:%M:%S")) - return ret.returncode - -def timing_predownload(): - env = copy.copy(os.environ) - logging.debug("starting unattended-upgrades in pre-download mode") - pre_download_ret = subprocess.run(["kylin-unattended-upgrade","--download-only"], env=env) - if pre_download_ret.returncode == 0: - logging.debug("kylin-unattended-upgrade download success.") + return 1 + +def Install(): + logging.info(_("install task start")) + if unattended_upgrades_shutdown.Install(): + now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + unattended_upgrades_shutdown.update_timestamp('install',now) + return 0 else: - logging.debug("kylin-unattended-upgrade download %d .",pre_download_ret.returncode) - -def timing_download(): - env = copy.copy(os.environ) - logging.debug("starting unattended-upgrades in timing download mode") - timing_download_ret = subprocess.run(["kylin-unattended-upgrade","--download-only"], env=env) - if timing_download_ret.returncode == 0: - logging.debug("kylin-unattended-upgrade download success.") + return 1 + +def Upgrade(): + logging.info(_("upgrade task start")) + if unattended_upgrades_shutdown.Install(): + return 0 else: - logging.debug("kylin-unattended-upgrade download %d .",timing_download_ret.returncode) + return 1 -def timing_install(): - env = copy.copy(os.environ) - logging.debug("starting unattended-upgrades in timing install mode") - timing_install_ret = subprocess.run(["kylin-unattended-upgrade","--install-only","--mode=timing"], env=env) - if timing_install_ret.returncode == 0: - logging.debug("kylin-unattended-upgrade install success.") - else: - logging.debug("kylin-unattended-upgrade install %d .",timing_install_ret.returncode) - -def background_scheduler_init(background_scheduler): - - background_scheduler.start() - - random_time = get_random_time(autoupgradepolicy.GetOptionValue('downloadTime')) - background_scheduler.add_job(task,'cron', args=['download'],id='download', \ - hour = random_time.hour,minute = random_time.minute,replace_existing=True) - - instime = autoupgradepolicy.GetOptionValue('installTime') - # random_time = get_random_time("%s-%s"%(instime,instime)) - random_time = get_random_time(instime) - background_scheduler.add_job(task,'cron', args=['install'],id='install', \ - hour = random_time.hour,minute = random_time.minute,replace_existing=True) - - random_time = get_random_time(autoupgradepolicy.GetOptionValue('preDownloadTime')) - background_scheduler.add_job(task,'cron', args=['predownload'],id='predownload', \ - hour = random_time.hour,minute = random_time.minute,replace_existing=True) - - if autoupgradepolicy.GetOptionValue('autoUpgradeState') == 'on': - if autoupgradepolicy.GetOptionValue('downloadMode') != 'timing': - background_scheduler.pause_job('download') - if autoupgradepolicy.GetOptionValue('installMode') != 'timing': - background_scheduler.pause_job('install') - else: - background_scheduler.pause_job('download') - background_scheduler.pause_job('install') - - if autoupgradepolicy.GetOptionValue('preDownload') != 'on': - background_scheduler.pause_job('predownload') - - - joblist = background_scheduler.get_jobs() - - for job in joblist: - logging.debug("job:%s,next run time:%s"%(job.id,job.next_run_time)) - - -class KylinSystemUpdater: +class FetchProgress(AcquireProgress): def __init__(self) -> None: - DBusGMainLoop(set_as_default=True) - self.loop = GLib.MainLoop() - self.system_bus = dbus.SystemBus() - self.update_proxy = self.system_bus.get_object('com.kylin.systemupgrade','/com/kylin/systemupgrade',follow_name_owner_changes=True) - self.update_interface = dbus.Interface(self.update_proxy,dbus_interface='com.kylin.systemupgrade.interface') - - def GetConfigValue(self,section,value): - return self.update_interface.GetConfigValue(section,value) - - def SetConfigValue(self,section,option,value): - return self.update_interface.SetConfigValue(section,option,value) - - + super().__init__() + + def fetch(self, item: apt_pkg.AcquireItemDesc) -> None: + logging.debug("%s [%d%%]"%(item.description,self.current_bytes*100/self.total_bytes)) + return super().fetch(item) + + def fail(self, item: apt_pkg.AcquireItemDesc) -> None: + logging.error("package fetch failed:%s"%item.description) + return super().fail(item) + + def ims_hit(self, item: apt_pkg.AcquireItemDesc) -> None: + return super().ims_hit(item) + + def media_change(self, media: str, drive: str) -> bool: + return super().media_change(media, drive) + + def pulse(self, owner: apt_pkg.Acquire) -> bool: + return super().pulse(owner) + + def start(self) -> None: + logging.info("download start") + return super().start() + + def stop(self) -> None: + logging.info("download finished") + return super().stop() + class AutoUpgradePolicy(): def __init__(self) -> None: self.autoupgradepolicy = {} @@ -517,9 +180,51 @@ class AutoUpgradePolicy(): config.read(UNATTENDED_UPGRADE_POLICY_FILE_PATH) for option in config.options('autoUpgradePolicy'): self.autoupgradepolicy.update({option:config['autoUpgradePolicy'][option]}) + logging.info(_("auto upgrade policy:")) for key in self.autoupgradepolicy.keys(): - logging.debug("%s:%s"%(key,self.autoupgradepolicy[key])) - + logging.debug("%s:%s"%(key,self.autoupgradepolicy[key])) + self.timestamp = {} + if os.path.exists(UNATTENDED_UPGRADE_TIMESTAMP): + logging.info(_("loading update time stamp")) + config=configparser.ConfigParser(allow_no_value=True) + config.optionxform = str + config.read(UNATTENDED_UPGRADE_TIMESTAMP) + if 'TimeStamp' in config.sections(): + for option in config.options('TimeStamp'): + self.timestamp.update({option:config.get('TimeStamp',option)}) + logging.info(_("last run time:")) + for key in self.timestamp: + logging.info("%s:%s"%(key,self.timestamp[key])) + + def UpdatePolicy(self): + if os.path.exists(UNATTENDED_UPGRADE_POLICY_FILE_PATH): + config=configparser.ConfigParser(allow_no_value=True) + config.optionxform = str + config.read(UNATTENDED_UPGRADE_POLICY_FILE_PATH) + for option in config.options('autoUpgradePolicy'): + self.autoupgradepolicy.update({option:config['autoUpgradePolicy'][option]}) + logging.info(_("auto upgrade policy:")) + for key in self.autoupgradepolicy.keys(): + logging.debug("%s:%s"%(key,self.autoupgradepolicy[key])) + + def UpdateTimeStamp(self,task_id,last_run_time): + logging.debug(_("update timestamp:%s %s")%(task_id,last_run_time)) + self.timestamp.update({task_id:last_run_time}) + if os.path.exists(UNATTENDED_UPGRADE_TIMESTAMP): + config=configparser.ConfigParser(allow_no_value=True) + config.optionxform = str + config.read(UNATTENDED_UPGRADE_TIMESTAMP) + if 'TimeStamp' in config.sections(): + config.set('TimeStamp',task_id,last_run_time) + with open(UNATTENDED_UPGRADE_TIMESTAMP,'w') as f: + config.write(f) + + def GetTimeStamp(self,task_id): + if task_id in self.timestamp.keys(): + return self.timestamp[task_id] + else: + return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + def SetOptionValue(self,option,value): self.autoupgradepolicy.update({option:value}) @@ -528,890 +233,543 @@ class AutoUpgradePolicy(): return self.autoupgradepolicy[option] except Exception: return '' - - def ExecutePolicy(self,property,value): - if property == 'autoUpgradeState': - if value == 'off': - background_scheduler.pause_job('download') - background_scheduler.pause_job('install') - elif value == 'on': - if self.autoupgradepolicy['downloadMode'] == 'timing': - background_scheduler.resume_job('download') - if self.autoupgradepolicy['installMode'] == 'timing': - background_scheduler.resume_job('install') - else: - pass - elif property == 'downloadMode': - if value == 'timing': - if self.autoupgradepolicy['autoUpgradeState'] == 'on': - background_scheduler.resume_job('download') - elif value == 'manual': - background_scheduler.pause_job('download') - else: - pass - elif property == 'downloadTime': - if self.autoupgradepolicy['autoUpgradeState'] == 'on' and \ - self.autoupgradepolicy['downloadMode'] == 'timing': - random_time = get_random_time(value) - background_scheduler.reschedule_job('download',trigger='cron',hour=random_time.hour,minute = random_time.minute) - elif property == 'installMode': - if value == 'timing': - if self.autoupgradepolicy['autoUpgradeState'] == 'on': - background_scheduler.resume_job('install') - elif value == 'manual': - background_scheduler.pause_job('install') - elif value == 'bshutdown': - background_scheduler.pause_job('install') - else: - pass - elif property == 'installTime': - if self.autoupgradepolicy['autoUpgradeState'] == 'on' and \ - self.autoupgradepolicy['installMode'] == 'timing': - random_time = get_random_time(value) - background_scheduler.reschedule_job('install',trigger='cron',hour=random_time.hour,minute = random_time.minute) - elif property == 'preDownload': - if value == 'off': - background_scheduler.pause_job('predownload') - elif value == 'on': - background_scheduler.resume_job('predownload') - else: - pass - elif property == 'preDownloadTime': - if self.autoupgradepolicy['preDownload'] == 'on': - random_time = get_random_time(value) - background_scheduler.reschedule_job('predownload',trigger='cron',hour=random_time.hour,minute = random_time.minute) - else: - pass - - + class UnattendedUpgradesShutdown(): - # 加载配置文件 unattended-upgrades-policy.conf - def loadcfg(self): - if os.path.isfile(UNATTENDED_UPGRADE_POLICY_FILE_PATH): - self.preDownload = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_PREDOWNLOAD) - self.autoUpgrade = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_AUTOUPGRADE) - self.download_mode = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_DOWNLOAD_MODE) - self.install_mode = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_INSTALL_MODE) - download_time = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_DOWNLOAD_TIME) - self.download_random = int(kylin_system_updater.GetConfigValue('AutoUpgradeConfig','downloadRandom')[1]) - self.upgrade_interval = int(kylin_system_updater.GetConfigValue('AutoUpgradeConfig','upgradeInterval')[1]) - logging.info("download random:%s,upgrade interval:%s"%(self.download_random,self.upgrade_interval)) - # upgradeInterval = int(ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, 'upgradeInverval')) - ''' - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - self.download_time['h'] = 10 - self.download_time['m'] = 0 - self.download_time_r = convert_time_by_random(self.download_time, 5) - logging.debug("upgrade time: [%d] [%d] predown[%s] autoupgrade[%s] d-mode[%s] i-mode[%s]", - self.download_time_r['h'], self.download_time_r['m'],self.preDownload, self.autoUpgrade, \ - self.download_mode, self.install_mode) - return - ''' - timelist = download_time.strip().split(':') - if len(timelist) != TimeElement.TIME_NUM: - logging.debug("unattended-upgrades-policy.conf time err %s",download_time) - return - # 检查 传入时间 安全性 - try: - tmphour = int(timelist[TimeElement.TIME_HOUR]) - except ValueError: - logging.debug("unattended-upgrades-policy.conf download_time h error") - return - try: - tmpminute = int(timelist[TimeElement.TIME_MINUTE]) - except ValueError: - logging.debug("unattended-upgrades-policy.conf download_time m error") - return - - self.download_time['h'] = tmphour - self.download_time['m'] = tmpminute - self.download_time_r = convert_time_by_random(self.download_time, self.download_random) - self.install_time['h'] = self.download_time_r['h'] - self.install_time['m'] = self.download_time_r['m'] + INTERVAL_DOWN_INSTALL - self.install_time_r = convert_time_by_random(self.install_time, INSTALL_RANDOM) - logging.debug("upgrade time: [%d:%d] [%d:%d] predown[%s] autoupgrade[%s] d-mode[%s] i-mode[%s]", - self.download_time_r['h'], self.download_time_r['m'],self.install_time_r['h'],self.install_time_r['m'], - self.preDownload, self.autoUpgrade, self.download_mode, self.install_mode) - else: - logging.debug("unattended-upgrades-policy.conf not exist") - def __init__(self, options): # type: (Values) -> None self.options = options - self.max_delay = options.delay * 60 - self.mainloop = GLib.MainLoop() - self.iter_timer_set = False - self.apt_pkg_reinit_done = None - self.shutdown_pending = False - self.on_shutdown_mode = None - self.on_shutdown_mode_uu_proc = None - self.start_time = None - self.lock_was_taken = False - self.signal_sent = False - self.stop_signal_received = Event() - ''' - self.download_mode = DownloadMode.TIMING_DOWNLOAD.value #下载模式 - self.install_mode = InstallMode.TIMING_INSTALL.value #安装模式 - self.download_time = {'h':9, 'm':0} #定时下载时间 09:00 - self.install_time = {'h':12, 'm':0} #定时安装时间 12:00 - self.download_time_r = convert_time_by_random(self.download_time, DOWNLOAD_RANDOM) #随机化定时下载时间 - self.install_time_r = convert_time_by_random(self.install_time, INSTALL_RANDOM) #随机化定时安装时间 - self.preDownload = 'off' #预下载开关 - self.autoUpgrade = 'off' #自动更新开关 - self.download_job = None - self.install_job = None - self.startup_download_job = None - self.scheduler = BlockingScheduler() - ''' - + self.init_events_flags() + self.init_policy_config() + self.init_scheduler() + self.init_dbus_connections() + + def init_events_flags(self): + logging.info(_("init events and flags")) + self.update_detect_status = False + self.update_detect_event = Event() + self.update_list = [] + self.resolve_depend_status = False + self.resolve_depend_status_event = Event() + self.remove_pkgs = [] + self.install_finish_status = False + self.install_finish_status_event = Event() + self.install_finish_group = [] + self.backup_start_result = False + self.backup_start_event = Event() + self.backup_finish_result = False + self.backup_finish_event = Event() + + def init_policy_config(self): + logging.info(_("init policy config")) + self.autoupgradepolicy = AutoUpgradePolicy() + + def init_scheduler(self): + jobstores = {'default': MemoryJobStore()} + executors = {'default':ThreadPoolExecutor(1),'processpool':ProcessPoolExecutor(1)} + job_defaults = {'misfire_grace_time':3600,'coalesce':True,'max_instances':1} + self.background_scheduler = BackgroundScheduler(jobstores=jobstores,executors=executors,job_defaults=job_defaults,timezone="Asia/Shanghai") + updatedays = 1 try: - hasattr(GLib, "MainLoop") - DBusGMainLoop(set_as_default=True) - except NameError: - logging.error("DBusGMainLoop error") - pass - + updatedays = int(self.autoupgradepolicy.GetOptionValue('updateDays')) + except Exception as e: + logging.error(_("get update days error:%s")%e) try: - self.inhibit_lock = self.get_inhibit_shutdown_lock() - except dbus.exceptions.DBusException: - logging.warning("Could not get delay inhibitor lock") - self.inhibit_lock = None - self.logind_proxy = None - self.update_proxy = None - self.upgrade_strategy_proxy = None - self.wait_period = min(3, self.get_inhibit_max_delay() / 3) - self.preparing_for_shutdown = False - #self.loadcfg() - - def get_upgrade_strategy_proxy(self): - if not self.upgrade_strategy_proxy: - bus = dbus.SystemBus() - self.upgrade_strategy_proxy = bus.get_object('com.kylin.UpgradeStrategies','/com/kylin/UpgradeStrategies',follow_name_owner_changes=True) - return self.upgrade_strategy_proxy - - def get_upgrade_strategy_interface(self): - self.upgrade_strategy_interface = dbus.Interface(self.upgrade_strategy_proxy,dbus_interface='com.kylin.UpgradeStrategies.interface') - - def get_update_proxy(self): - if not self.update_proxy: - bus = dbus.SystemBus() - self.update_proxy = bus.get_object('com.kylin.systemupgrade','/com/kylin/systemupgrade',follow_name_owner_changes=True) - return self.update_proxy - - - def get_update_interface(self): + if self.autoupgradepolicy.GetOptionValue('autoUpgradeState') == 'on': + if self.autoupgradepolicy.GetOptionValue('downloadMode') == 'timing': + random_time = self.get_next_run_time('download') + self.background_scheduler.add_job(Download,trigger='interval',days=updatedays,\ + start_date=random_time,id='download',replace_existing=True) + if self.autoupgradepolicy.GetOptionValue('installMode') == 'timing': + random_time = self.get_next_run_time('install') + self.background_scheduler.add_job(Install,trigger='interval',days=updatedays,\ + start_date=random_time,id='install',replace_existing=True) + if self.autoupgradepolicy.GetOptionValue('preDownload') == 'on': + random_time = self.get_next_run_time('predownload') + self.background_scheduler.add_job(Predownload,trigger='interval',days=updatedays,\ + start_date=random_time,id='predownload',replace_existing=True) + except Exception as e: + logging.error(_("job initial error:%s")%e) + self.background_scheduler.start() + with open(LOG_PATH,'a+') as f: + self.background_scheduler.print_jobs(out=f) + joblist = self.background_scheduler.get_jobs() + for job in joblist: + logging.debug(_("initial job:%s,next run time:%s")%(job.id,job.next_run_time)) + + def load_systemupgrade_dbus_config(self): + logging.debug(_("init system upgrade dbus connections")) + self.update_proxy = self.system_bus.get_object('com.kylin.systemupgrade','/com/kylin/systemupgrade',follow_name_owner_changes=True) self.update_interface = dbus.Interface(self.update_proxy,dbus_interface='com.kylin.systemupgrade.interface') - return self.update_interface - ''' - def set_max_inhibit_time(self,time): - login_proxy = self.get_logind_proxy() - #首先设置systemd默认延长时间为1800 + self.update_proxy.connect_to_signal('UpdateDetectFinished',self.update_detect_finished_handler) + self.update_proxy.connect_to_signal('UpdateFixBrokenStatus',self.update_fix_broken_status) + self.update_proxy.connect_to_signal('UpdateDependResloveStatus',self.update_depend_resolve_status) + self.update_proxy.connect_to_signal('UpdateDloadAndInstStaChanged',self.update_download_install_status) + self.update_proxy.connect_to_signal('UpdateInstallFinished',self.update_install_finished) + self.update_proxy.connect_to_signal('ChangeUpgradePolicy',self.change_upgrade_policy) + + def load_strategy_dbus_config(self): + logging.debug(_("init strategy dbus connections")) + self.upgrade_strategy_proxy = self.system_bus.get_object('com.kylin.UpgradeStrategies','/com/kylin/UpgradeStrategies',follow_name_owner_changes=True) + self.upgrade_strategy_interface = dbus.Interface(self.upgrade_strategy_proxy,dbus_interface='com.kylin.UpgradeStrategies.interface') + self.upgrade_strategy_proxy.connect_to_signal("PropertyChanged",self.property_changed_handler) + self.upgrade_strategy_proxy.connect_to_signal("UpgradeAllNow",self.upgrade_all_now_handler) + + def load_backup_dbus_config(self): + logging.debug(_("init backup dbus connections")) + self.backup_proxy = self.system_bus.get_object('com.kylin.backup','/',follow_name_owner_changes=True) + self.backup_interface = dbus.Interface(self.backup_proxy,dbus_interface='com.kylin.backup.manager') + self.backup_proxy.connect_to_signal('sendStartBackupResult',self.backup_start_handler) + self.backup_proxy.connect_to_signal('sendBackupResult',self.backup_result_handler) + self.backup_proxy.connect_to_signal('sendRate',self.send_rate_handler) + + def load_sys_dbus_config(self): + logging.debug(_("load sys dbus config")) + self.sys_proxy = self.system_bus.get_object('org.freedesktop.DBus','/org/freedesktop/DBus') + self.sys_proxy.connect_to_signal('NameOwnerChanged',self.name_owner_changed) + + def init_dbus_connections(self): + logging.debug(_("loading dbus configures...")) + DBusGMainLoop(set_as_default=True) + self.loop = GLib.MainLoop() + self.system_bus = dbus.SystemBus() + self.load_sys_dbus_config() + self.load_systemupgrade_dbus_config() + self.load_strategy_dbus_config() + self.load_backup_dbus_config() + + def name_owner_changed(self,busname,oldname,newname): + if (busname == 'com.kylin.UpgradeStrategies'): + logging.debug("name owner changed:%s,%s,%s"%(busname,oldname,newname)) + elif(busname == 'com.kylin.systemupgrade'): + logging.debug("name owner changed:%s,%s,%s"%(busname,oldname,newname)) + elif(busname == 'com.kylin.backup'): + logging.debug("name owner changed:%s,%s,%s"%(busname,oldname,newname)) + else: + pass + + def change_upgrade_policy(self): + logging.debug("change upgrade policy") + self.autoupgradepolicy.UpdatePolicy() + autoupgradestate = self.autoupgradepolicy.GetOptionValue('autoUpgradeState') + self.property_changed_handler('autoUpgradeState',autoupgradestate) + + def property_changed_handler(self,property, value): + logging.info(_("property change:%s:%s")%(property,value)) + self.autoupgradepolicy.SetOptionValue(property,value) + self.ExecutePolicy(property,value) + with open(LOG_PATH,'a+') as f: + self.background_scheduler.print_jobs(out=f) + joblist = self.background_scheduler.get_jobs() + for job in joblist: + logging.debug(_("job:%s,next run time:%s")%(job.id,job.next_run_time)) + + def upgrade_all_now_handler(self): + logging.info(_("upgrade all now sinal received")) + delta = random.randint(0,int(self.autoupgradepolicy.GetOptionValue('randomRange'))) + run_date = datetime.datetime.now() + datetime.timedelta(minutes=delta) + self.background_scheduler.add_job(Upgrade,'date', run_date = run_date,id="upgrade",\ + max_instances=1,replace_existing=True) + with open(LOG_PATH,'a+') as f: + self.background_scheduler.print_jobs(out=f) + joblist = self.background_scheduler.get_jobs() + for job in joblist: + logging.debug(_("job:%s,next run time:%s")%(job.id,job.next_run_time)) + + def update_detect_finished_handler(self,success,updatelist,error_status,error_cause): + logging.info(_("update detect finished:sucess:%s,updatelist:%s,error_status:%s,error_cause:%s")\ + %(success,",".join(updatelist),error_status,error_cause)) + self.update_detect_status = success + self.update_list = [] + if success: + try: + for update_group in updatelist: + json_file_path = ("/var/lib/kylin-system-updater/json/%s.json"%(update_group)) + if os.path.exists(json_file_path): + with open(json_file_path,'r') as f: + data = json.load(f) + for key in data['upgrade_list'].keys(): + if key in ["total_download_size","total_install_size"]: + pass + else: + self.update_list.append(key) + for key in data['install_list'].keys(): + if key in ["total_download_size","total_install_size"]: + pass + else: + self.update_list.append(key) + except Exception as e: + logging.error(e) + self.update_detect_event.set() + + def update_fix_broken_status(self,resolver_status,remove_status,remove_pkgs,pkg_raw_description,delete_desc,error_string,error_desc): + logging.info(_("update fix broken status:resolver_status:%s,remove_status:%s,error_string:%s,error_desc:%s")%(resolver_status,remove_status,error_string,error_desc)) + #logging.info(remove_pkgs,pkg_raw_description,delete_desc) + self.update_detect_status = False + self.update_list = [] + self.update_detect_event.set() + + def update_depend_resolve_status(self,resolver_status,remove_status,remove_pkgs,pkg_raw_description,delete_description,error_string,error_desc): + logging.info(_("update depend resolve status:%s,remove status:%s,remove pkgs:%s,pkg raw description:%s,delete_descrition:%s,error string:%s,error desc:%s")\ + %(resolver_status,remove_status,",".join(remove_pkgs),",".join(pkg_raw_description),",".join(delete_description),error_string,error_desc)) + self.resolve_depend_status = resolver_status + self.remove_pkgs = remove_pkgs + self.resolve_depend_status_event.set() + + def update_download_install_status(self,group,progress,status,details): + logging.debug(_("%s update progress:%d,status:%s,details:%s")%(",".join(group),progress,status,details)) + + def update_install_finished(self,success,group,error_string,error_desc): + logging.info(_("update install finisih success:%s,group:%s,error string:%s,error desc:%s")\ + %(success,",".join(group),error_string,error_desc)) + self.install_finish_status = success + self.install_finish_group=group + self.install_finish_status_event.set() + + def backup_start_handler(self,result): + logging.debug(_("backup start result:%d")%result) + if result == 30: + self.backup_start_result = True + else: + self.backup_start_result = False + self.backup_finish_event.set() + self.backup_start_event.set() + + def backup_result_handler(self,result): + logging.debug(_("backup result:%s")%result) + if result: + self.backup_finish_result = True + else: + self.backup_finish_result = False + self.backup_finish_event.set() + + def send_rate_handler(self,sta,pro): + logging.debug(_("backup status:%d,progress:%d")%(sta,pro)) + + def run(self): + if self.options.wait_for_signal: + logging.debug(_("Waiting for signal to start operation ")) + self.loop.run() + elif options.download_only: + logging.debug(_("runing a download job")) + return self.Download() + elif options.install_only: + logging.debug(_("runing an install job")) + return self.Install() + elif options.upgrade: + logging.debug(_("runing an upgrade job")) + return self.Install() + else: + logging.info(_("illegal options")) + return True + + def print_jobs(self): + with open(LOG_PATH,'a+') as f: + self.background_scheduler.print_jobs(out=f) + + def list_jobs(self): + joblist = self.background_scheduler.get_jobs() + for job in joblist: + logging.debug(_("job:%s,next run time:%s")%(job.id,job.next_run_time)) + + def remove_job(self,job_id): + if self.background_scheduler.get_job(job_id): + self.background_scheduler.remove_job(job_id) + + def update_timestamp(self,task_id,timestamp): + self.autoupgradepolicy.UpdateTimeStamp(task_id,timestamp) + + def get_next_run_time(self,task_id): + now = datetime.datetime.now() try: - getter_interface = dbus.Interface( - login_proxy, - dbus_interface='org.freedesktop.login1.Manager') - ret = getter_interface.SetInhibitDelayMaxSec(time) + option = 'downloadTime' + if task_id == 'download': + option = 'downloadTime' + elif task_id == 'install': + option = 'installTime' + elif task_id == 'predownload': + option = 'preDownloadTime' + else: + pass + last_run_date=datetime.datetime.strptime(self.autoupgradepolicy.GetTimeStamp(task_id),"%Y-%m-%d %H:%M:%S") + next_run_date=last_run_date+datetime.timedelta(days=float(self.autoupgradepolicy.GetOptionValue('updateDays'))) + time_interval = self.autoupgradepolicy.GetOptionValue(option) + start_time = datetime.datetime.strptime(time_interval.split("-")[0],"%H:%M") + end_time = datetime.datetime.strptime(time_interval.split("-")[1],"%H:%M") + start=datetime.datetime(next_run_date.year,next_run_date.month,next_run_date.day,start_time.hour,start_time.minute,0,0) + end=datetime.datetime(next_run_date.year,next_run_date.month,next_run_date.day,end_time.hour,end_time.minute,0,0) + time_diff = int((end-start).total_seconds()) + delta=0 + if time_diff<0: + delta = random.randint(time_diff,0) + else: + delta = random.randint(0,time_diff) + nextdate = start+datetime.timedelta(seconds=delta) + if ((now-nextdate).total_seconds())>0: + return datetime.datetime(now.year,now.month,now.day,nextdate.hour,nextdate.minute,nextdate.second,0) + else: + return nextdate + except Exception as e: + logging.error(_("illegal time format:%s")%e) + return now+datetime.timedelta(seconds=random.randint(0,86400)) + + def ExecutePolicy(self,property,value): + updatedays = 1 + try: + updatedays = int(self.autoupgradepolicy.GetOptionValue('updateDays')) except Exception as e: logging.error(e) - ''' - - def get_logind_proxy(self): - """ Get logind dbus proxy object """ - if not self.logind_proxy: - bus = dbus.SystemBus() - if self.inhibit_lock is None: - # try to get inhibit_lock or throw exception quickly when - # logind is down - self.inhibit_lock = self.get_inhibit_shutdown_lock() - self.logind_proxy = bus.get_object( - 'org.freedesktop.login1', '/org/freedesktop/login1',follow_name_owner_changes=True) - return self.logind_proxy - - def get_inhibit_shutdown_lock(self): - """ Take delay inhibitor lock """ - bus = dbus.SystemBus() - return bus.call_blocking( - 'org.freedesktop.login1', '/org/freedesktop/login1', - 'org.freedesktop.login1.Manager', 'Inhibit', 'ssss', - ('shutdown', 'Unattended Upgrades Shutdown', - _('Stop ongoing upgrades or perform upgrades before shutdown'), - 'delay'), timeout=2.0) - - def get_inhibit_max_delay(self): - try: - logind_proxy = self.get_logind_proxy() - getter_interface = dbus.Interface( - logind_proxy, - dbus_interface='org.freedesktop.DBus.Properties') - return (getter_interface.Get( - "org.freedesktop.login1.Manager", "InhibitDelayMaxUSec") - / (1000 * 1000)) - except dbus.exceptions.DBusException: - return 3 - ''' - def is_preparing_for_shutdown(self): - if not self.shutdown_pending: - try: - logind_proxy = self.get_logind_proxy() - getter_interface = dbus.Interface( - logind_proxy, - dbus_interface='org.freedesktop.DBus.Properties') - self.shutdown_pending = getter_interface.Get( - "org.freedesktop.login1.Manager", "PreparingForShutdown") - except dbus.exceptions.DBusException: - return False - return self.shutdown_pending - ''' - def start_iterations(self): - while self.iter(): - time.sleep(1) - ''' - if not self.iter_timer_set: - try: - GLib.timeout_add(self.wait_period * 1000, self.iter) - # schedule first iteration immediately - GLib.timeout_add(0, lambda: self.iter() and False) - except NameError: - pass - ''' - return True - ''' - def run_polling(self, signal_handler): - logging.warning( - _("Unable to monitor PrepareForShutdown() signal, polling " - "instead.")) - logging.warning( - _("To enable monitoring the PrepareForShutdown() signal " - "instead of polling please install the python3-gi package")) - - signal.signal(signal.SIGTERM, signal_handler) - signal.signal(signal.SIGHUP, signal_handler) - - # poll for PrepareForShutdown then run final iterations - if self.options.wait_for_signal: - logging.debug("Waiting for signal to start operation ") - while (not self.stop_signal_received.is_set() - and not self.is_preparing_for_shutdown()): - self.stop_signal_received.wait(self.wait_period) - else: - logging.debug("Skip waiting for signals, starting operation " - "now") - while not self.iter(): - # TODO iter on sigterm and sighup, too - time.sleep(self.wait_period) - - # 定时下载 执行函数 - def timing_download(self): - env = copy.copy(os.environ) - logging.debug("starting unattended-upgrades in timing download mode") - timing_download_ret = subprocess.run(["kylin-unattended-upgrade","--download-only"], env=env) - if timing_download_ret.returncode == 0: - logging.debug("kylin-unattended-upgrade download success.") - else: - logging.debug("kylin-unattended-upgrade download %d .",timing_download_ret.returncode) - - # 定时安装 执行函数 - def timing_install(self): - env = copy.copy(os.environ) - logging.debug("starting unattended-upgrades in timing install mode") - timing_install_ret = subprocess.run(["kylin-unattended-upgrade","--install-only"], env=env) - if timing_install_ret.returncode == 0: - logging.debug("kylin-unattended-upgrade install success.") - else: - logging.debug("kylin-unattended-upgrade install %d .",timing_install_ret.returncode) - - - def _wait_for_unattended_upgrade_finish(self): - max_wait_time = 300 - wait_time = 0 - #read unattended-upgrade status - status = ReadValueFromFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status") - while (status != "idle"): - ReadValueFromFile(UNATTENDED_UPGRADE_CONFIG_FILE_PATH,"UNATTENDED_UPGRADE","autoupdate_run_status") - time.sleep(1) - wait_time += 1 - if wait_time >max_wait_time: - logging.info("wait for uu time out") - return - return 0 - - def _pause_timer(self): - if self.download_job is not None: - self.download_job.pause() - - if self.install_job is not None: - self.install_job.pause() - - def _resume_timer(self): - if self.download_job is not None: - self.download_job.resume() - - if self.install_job is not None: - self.install_job.resume() - ''' - def run(self): - """ delay shutdown and wait for PrepareForShutdown or other signals""" - # if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - # pass - # elif time.time() - float(time_stamp) < float(upgrade_interval): - # time_str1 = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(float(time_stamp))) - # time_str2 = time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) - # logging.info("upgrade interval not satisfied:%s-%s"%(time_str1,time_str2)) - # return 0 - - # set signal handlers - ''' - def signal_handler(signum, frame): - - logging.warning( - "SIGTERM or SIGHUP received, stopping unattended-upgrades " - "only if it is running") - self.stop_signal_received.set() - #self.start_iterations() - ''' - # fall back to polling without GLib - try: - hasattr(GLib, "MainLoop") - except NameError: - logging.error("MainLoop Not Found") - #self.run_polling(signal_handler) - return - ''' - for sig in (signal.SIGTERM, signal.SIGHUP): - GLib.unix_signal_add(GLib.PRIORITY_DEFAULT, sig, - signal_handler, None, None) - ''' - if self.options.wait_for_signal: - def property_changed_handler(property, value): - logging.debug("property change:%s:%s"%(property,value)) - autoupgradepolicy.SetOptionValue(property,value) - autoupgradepolicy.ExecutePolicy(property,value) - joblist = background_scheduler.get_jobs() - for job in joblist: - logging.debug("job:%s,next run time:%s"%(job.id,job.next_run_time)) - ''' - def change_upgrade_policy_handler(): - if os.path.isfile(UNATTENDED_UPGRADE_POLICY_FILE_PATH): - self.download_mode = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_DOWNLOAD_MODE) - self.install_mode = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_INSTALL_MODE) - self.preDownload = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_PREDOWNLOAD) - self.autoUpgrade = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_AUTOUPGRADE) - self.download_random = int(kylin_system_updater.GetConfigValue('AutoUpgradeConfig','downloadRandom')[1]) - self.upgrade_interval = int(kylin_system_updater.GetConfigValue('AutoUpgradeConfig','upgradeInterval')[1]) - download_time = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_DOWNLOAD_TIME) - timelist = download_time.strip().split(':') - if len(timelist) != TimeElement.TIME_NUM: - logging.debug("unattended-upgrades-policy.conf time err %s",download_time) - return - # 检查 传入时间 安全性 - try: - tmphour = int(timelist[TimeElement.TIME_HOUR]) - except ValueError: - logging.debug("unattended-upgrades-policy.conf download_time h error") - return - try: - tmpminute = int(timelist[TimeElement.TIME_MINUTE]) - except ValueError: - logging.debug("unattended-upgrades-policy.conf download_time m error") - return - - self.download_time['h'] = tmphour - self.download_time['m'] = tmpminute - self.download_time_r = convert_time_by_random(self.download_time, self.download_random) - self.install_time['h'] = self.download_time_r['h'] - self.install_time['m'] = self.download_time_r['m'] + INTERVAL_DOWN_INSTALL - self.install_time_r = convert_time_by_random(self.install_time, INSTALL_RANDOM) - - logging.info("download random:%d,upgrade interval:%d"%(self.download_random,self.upgrade_interval)) - - if self.preDownload == FeatureSwitch.ON.value: #open download timing - download_time_tmp = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_DOWNLOAD_TIME) - timelist = download_time_tmp.split(':') - if len(timelist) != TimeElement.TIME_NUM: - logging.debug("unattended-upgrades-policy.conf time err %s",download_time_tmp) - return - # 检查 传入时间 安全性 - try: - tmphour = int(timelist[TimeElement.TIME_HOUR]) - except ValueError: - logging.debug("unattended-upgrades-policy.conf download_time h error") - else: - self.download_time['h'] = tmphour - try: - tmpminute = int(timelist[TimeElement.TIME_MINUTE]) - except ValueError: - logging.debug("unattended-upgrades-policy.conf download_time m error") - else: - self.download_time['m'] = tmpminute - self.download_time_r = convert_time_by_random(self.download_time, self.download_random) - try: - if self.download_job is not None: - self.download_job.remove() - except Exception as e: - logging.error(e) - logging.info("pre-download time:%d:%d"%(self.download_time_r['h'], self.download_time_r['m'])) - self.download_job = self.scheduler.add_job(self.timing_download, 'cron', hour=self.download_time_r['h'], minute=self.download_time_r['m']) - elif self.preDownload == FeatureSwitch.OFF.value: - pass - - else: #close download timing - try: - self.download_job.pause() - except Exception as e: - logging.error(e) - - - if self.autoUpgrade == FeatureSwitch.OFF.value: - logging.info("auto upgrade turned off,removing download and instal jobs...") - try: - if self.download_job.pending: - self.download_job.remove() - except Exception as e: - pass - # logging.error(e) - try: - if self.install_job.pending: - self.install_job.remove() - except Exception as e: - pass - # logging.error(e) - return - else: - if self.download_mode == DownloadMode.TIMING_DOWNLOAD.value: - try: - if self.download_job.pending: - self.download_job.remove() - except Exception as e: - pass - # logging.error(e) - logging.info("download time:%d:%d"%(self.download_time_r['h'], self.download_time_r['m'])) - self.download_job = self.scheduler.add_job(self.timing_download, 'cron', hour=self.download_time_r['h'], minute=self.download_time_r['m']) - else: - try: - if self.download_job.pending: - self.download_job.remove() - except Exception as e: - pass - # logging.error(e) - - if self.install_mode == InstallMode.TIMING_INSTALL.value: - try: - if self.install_job.pending: - self.install_job.remove() - except Exception as e: - pass - # logging.error(e) - logging.info("install time:%d:%d"%(self.install_time_r['h'], self.install_time_r['m'])) - self.install_job = self.scheduler.add_job(self.timing_install, 'cron', hour=self.install_time_r['h'], minute=self.install_time_r['m']) - elif self.install_mode == InstallMode.BEFORE_SHUTDOWN_INSTALL.value: - try: - if self.install_job.pending: - self.install_job.remove() - except Exception as e: - pass - # logging.error(e) - logging.debug("install job removed,installation will conduct before shutdown") - else: #close install timing - try: - if self.download_job.pending: - self.install_job.remove() - except Exception as e: - pass - # logging.error(e) - - - logging.info("upgrade time: [%d:%d] [%d:%d] predown[%s] autoupgrade[%s] d-mode[%s] i-mode[%s]", - self.download_time_r['h'], self.download_time_r['m'],self.install_time_r['h'],self.install_time_r['m'], - self.preDownload, self.autoUpgrade, self.download_mode, self.install_mode) - + try: + if property == 'autoUpgradeState': + if value == 'on': + if self.autoupgradepolicy.GetOptionValue('downloadMode') == 'timing': + random_time = self.get_next_run_time("download") + self.background_scheduler.add_job(Download,trigger='interval',days=updatedays,\ + start_date=random_time,id='download',replace_existing=True) + if self.autoupgradepolicy.GetOptionValue('installMode') == 'timing': + random_time = self.get_next_run_time("install") + self.background_scheduler.add_job(Install,trigger='interval',days=updatedays,\ + start_date=random_time,id='install',replace_existing=True) + elif value == 'off': + self.remove_job('download') + self.remove_job('install') else: - logging.debug("unattended-upgrades-policy.conf not exist") - ''' - - def change_upgrade_policy_handler(): - autoUpgrade = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_AUTOUPGRADE) - if autoUpgrade == 'on': - background_scheduler.resume_job('download') - background_scheduler.resume_job('install') - elif autoUpgrade == 'off': - background_scheduler.pause_job('download') - background_scheduler.pause_job('install') - joblist = background_scheduler.get_jobs() - for job in joblist: - logging.debug("job:%s,next run time:%s"%(job.id,job.next_run_time)) - - def upgrade_all_now_handler(): - #self._wait_for_unattended_upgrade_finish() - logging.info("upgrade all now sinal received") - delta = random.randint(0,int(autoupgradepolicy.GetOptionValue('randomRange'))) - run_date = datetime.datetime.now() + datetime.timedelta(minutes=delta) - background_scheduler.add_job(task,'date', run_date = run_date,args=['download_and_install'],\ - id='download_and_install', replace_existing=True) - joblist = background_scheduler.get_jobs() - for job in joblist: - logging.debug("job:%s,next run time:%s"%(job.id,job.next_run_time)) - ''' - if FindRuningUnattendedUpgrades(): - logging.warning("find runing unattended-upgrades,please wait") - return False - else: - #self._pause_timer() - env = copy.copy(os.environ) - retdownload = subprocess.run(["kylin-unattended-upgrade","--download-only"], env=env) - retinstall = subprocess.run(["kylin-unattended-upgrade","--install-only"], env=env) - #self._resume_timer() - if retdownload == 0 and retinstall == 0: - return True - else: - return False - ''' - def prepare_for_shutdown_handler(active): - """ Handle PrepareForShutdown() """ - if not active: - logging.warning("PrepareForShutdown(false) received, " - "this should not happen") - # PrepareForShutdown arrived, starting final iterations - self.install_mode = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH,POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY,AUTO_UPGRADE_POLICY_OPTION_INSTALL_MODE) - autoUpgrade = ReadValueFromFile(UNATTENDED_UPGRADE_POLICY_FILE_PATH, POLICY_CONF_SECTION_AUTO_UPGRADE_POLICY, AUTO_UPGRADE_POLICY_OPTION_AUTOUPGRADE) - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - bat = get_mavis_capacity() - logging.info("battery capacity:%d"%bat) - if os.path.exists(PKGS_TO_INSTALL_FLAG_FILE) and bat >=25: - logging.info("mavis shutdown install") - do_plymouth_splash() - self.start_iterations() - logging.info("finished iteration") - elif autoUpgrade == FeatureSwitch.ON.value and self.install_mode == InstallMode.BEFORE_SHUTDOWN_INSTALL.value: - if self.update_interface.GetConfigValue('AutoUpgradeConfig','shutdown_install'): - #show plymouth splash if bsshutdown is set - if os.path.exists(PKGS_TO_INSTALL_FLAG_FILE): - do_plymouth_splash() - self.start_iterations() - logging.info("finished iteration") + pass + elif property == 'downloadMode': + if value == 'timing': + if self.autoupgradepolicy.GetOptionValue('autoUpgradeState') == 'on': + random_time = self.get_next_run_time("download") + self.background_scheduler.add_job(Download,trigger='interval',days=updatedays,\ + start_date=random_time,id='download',replace_existing=True) + elif value == 'manual': + self.remove_job('download') + else: + pass + elif property == 'downloadTime': + if self.autoupgradepolicy.GetOptionValue('autoUpgradeState') == 'on' and \ + self.autoupgradepolicy.GetOptionValue('downloadMode') == 'timing': + random_time = self.get_next_run_time("download") + self.background_scheduler.add_job(Download,trigger='interval',days=updatedays,\ + start_date=random_time,id='download',replace_existing=True) + elif property == 'installMode': + if value == 'timing': + if self.autoupgradepolicy.GetOptionValue('autoUpgradeState') == 'on': + random_time = self.get_next_run_time("install") + self.background_scheduler.add_job(Install,trigger='interval',days=updatedays,\ + start_date=random_time,id='install',replace_existing=True) + elif value == 'manual': + self.remove_job('install') + elif value == 'bshutdown': + self.remove_job('install') else: pass - self.mainloop.quit() - self.get_update_proxy() - self.get_update_interface() - self.get_upgrade_strategy_proxy() - self.get_upgrade_strategy_interface() - - self.upgrade_strategy_proxy.connect_to_signal("PropertyChanged",property_changed_handler) - self.upgrade_strategy_proxy.connect_to_signal("UpgradeAllNow",upgrade_all_now_handler) - self.update_proxy.connect_to_signal("ChangeUpgradePolicy",change_upgrade_policy_handler) - #self.update_proxy.connect_to_signal("UpgradeAllNow",upgrade_all_now_handler) - - try: - self.get_logind_proxy().connect_to_signal( - "PrepareForShutdown", prepare_for_shutdown_handler) - except dbus.exceptions.DBusException: - logging.warning( - _("Unable to monitor PrepareForShutdown() signal, polling " - "instead.")) - logging.warning( - _("Maybe systemd-logind service is not running.")) - # self.run_polling(signal_handler) - return - #self.set_max_inhibit_time(1800) - logging.debug("Waiting for signal to start operation ") - else: - # starting final iterations immediately - logging.debug("Skip waiting for signals, starting operation " - "now") - # self.start_iterations() - - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - logging.info("setting startup download timer") - GLib.timeout_add(300*1000, lambda: self.timing_download() and False) - #local_time =time.localtime(time.time()+300) - #self.startup_download_job = self.scheduler.add_job(self.timing_download,'cron',hour=self.download_time_r['h'],minute = self.download_time_r['m']) - else: - ''' - if self.autoUpgrade == FeatureSwitch.ON.value: - logging.debug("download time:[%d:%d] install time:[%d:%d]", self.download_time_r['h'], self.download_time_r['m'],self.install_time_r['h'],self.install_time_r['m']) - self.download_job = self.scheduler.add_job(self.timing_download, 'cron', hour=self.download_time_r['h'], minute=self.download_time_r['m']) - self.install_job = self.scheduler.add_job(self.timing_install, 'cron', hour=self.install_time_r['h'], minute=self.install_time_r['m']) - elif self.autoUpgrade == FeatureSwitch.OFF.value: - logging.info("auto upgrade turned off") - ''' + elif property == 'installTime': + if self.autoupgradepolicy.GetOptionValue('autoUpgradeState') == 'on' and \ + self.autoupgradepolicy.GetOptionValue('installMode') == 'timing': + random_time = self.get_next_run_time("install") + self.background_scheduler.add_job(Install,trigger='interval',days=updatedays,\ + start_date=random_time,id='install',replace_existing=True) + elif property == 'preDownload': + if value == 'on': + random_time = self.get_next_run_time("predownload") + self.background_scheduler.add_job(Predownload,trigger='interval',days=updatedays,\ + start_date=random_time,id='predownload',replace_existing=True) + elif value == 'off': + self.remove_job('predownload') + else: + pass + elif property == 'preDownloadTime': + if self.autoupgradepolicy.GetOptionValue('preDownload') == 'on': + random_time = self.get_next_run_time("predownload") + self.background_scheduler.add_job(Predownload,trigger='interval',days=updatedays,\ + start_date=random_time,id='predownload',replace_existing=True) + elif property == 'updateDays': + if self.autoupgradepolicy.GetOptionValue('preDownload') == 'on': + random_time = self.get_next_run_time("predownload") + self.background_scheduler.add_job(Predownload,trigger='interval',days=updatedays,\ + start_date=random_time,id='predownload',replace_existing=True) + if self.autoupgradepolicy.GetOptionValue('autoUpgradeState') == 'on': + if self.autoupgradepolicy.GetOptionValue('downloadMode') == 'timing': + random_time = self.get_next_run_time("download") + self.background_scheduler.add_job(Download,trigger='interval',days=updatedays,\ + start_date=random_time,id='download',replace_existing=True) + if self.autoupgradepolicy.GetOptionValue('installMode') == 'timing': + random_time = self.get_next_run_time("install") + self.background_scheduler.add_job(Install,trigger='interval',days=updatedays,\ + start_date=random_time,id='install',replace_existing=True) + else: + logging.info(_("other options:%s:%s")%(property,value)) + except Exception as e: + logging.error(_("policy execute error:%s")%e) + + def Backup(self): + logging.info(_("start backup")) + backup_partition_status = self.backup_interface.Mount_backup_partition() + logging.info(_("backup partition status:%d")%backup_partition_status) + if backup_partition_status not in [0,5]: + logging.error(_("backup partition error:%d")%backup_partition_status) + return False + status_code,result = self.backup_interface.getBackupState() + logging.debug(_("backup state code:%d,%s")%(status_code,result)) + if result == 0 and status_code == 99: pass - #TimerThread(self.scheduler).start() - self.mainloop.run() - logging.info("quit mainloop") - os._exit(0) - #GLib.MainLoop().run() - - def try_iter_on_shutdown(self): - # check if we need to run unattended-upgrades on shutdown and if - # so, run it - try: - if self.apt_pkg_reinit_done is None: - logging.debug("Initializing apt_pkg configuration") - apt_pkg.init_config() - self.apt_pkg_reinit_done = True - except apt_pkg.Error as error: - # apt may be in a transient state due to unattended-upgrades - # running, thus assuming non shutdown mode is reasonable - logging.error(_("Apt returned an error thus shutdown mode is " - "disabled")) - logging.error(_("error message: '%s'"), error) - self.apt_pkg_reinit_done = False - - if self.on_shutdown_mode is None: - self.on_shutdown_mode = True - #( - #not self.options.stop_only - #and not self.stop_signal_received.is_set() - #and self.apt_pkg_reinit_done - # and apt_pkg.config.find_b( - # "Unattended-Upgrade::InstallOnShutdown", False) - #) - if self.on_shutdown_mode: - env = copy.copy(os.environ) - #env["UNATTENDED_UPGRADES_FORCE_INSTALL_ON_SHUTDOWN"] = "1" - logging.info("starting unattended-upgrades in shutdown mode") - ''' - if FindRuningUnattendedUpgrades(): - logging.warning("another unattended-upgrade is running , quit") - return False - ''' - self.on_shutdown_mode_uu_proc = subprocess.Popen( - ["kylin-unattended-upgrade","--install-only","--mode=shutdown"], env=env) - #log_msg(_("Running unattended-upgrades in shutdown mode")) - # run u-u, but switch to stopping when receiving stop signal - # because it means shutdown progressed despite holding the lock - - if self.on_shutdown_mode: - log_progress() - if self.on_shutdown_mode_uu_proc.poll() is not None: - # unattended-upgrades stopped on its own - #exit_log_result(True) - if os.path.exists(PKGS_TO_INSTALL_FLAG_FILE): - os.remove(PKGS_TO_INSTALL_FLAG_FILE) - subprocess.call(["/bin/plymouth","system-update","--progress=100"]) - time.sleep(1) - subprocess.run(["/bin/plymouth","quit","--retain-splash"]) - return False - else: - return True - return False - - - def iter(self): - ''' - if self.start_time is None: - self.start_time = time.time() - logging.debug("Starting countdown of %s minutes", - self.max_delay / 60) + else: + return False + backup_name = _("unattended upgrades") + create_note = '' + inc_note = '' + userName=getpass.getuser() + uid=os.getuid() + self.backup_finish_event.clear() + self.backup_start_result =True + self.backup_finish_result=True + self.backup_interface.autoBackUpForSystemUpdate_noreturn(backup_name,create_note,inc_note,userName,uid) + self.backup_finish_event.wait() + logging.debug(_("backup start result:%s,backup result:%s")%(self.backup_start_result,self.backup_finish_result)) + if (self.backup_start_result and self.backup_finish_result): + return True else: - if (time.time() - self.start_time) > self.max_delay: - logging.warning(_( - "Giving up on lockfile after %s minutes of delay"), - self.max_delay / 60) - #os._exit(1) - #sys.exit(1) - return False + return False - if not self.stop_signal_received.is_set(): - if self.try_iter_on_shutdown(): - return True - else: - return False - - - # run monitoring and keep "UI" updated - res = apt_pkg.get_lock(self.options.lock_file) - logging.debug("get_lock returned %i" % res) - # exit here if there is no lock - if res > 0: - logging.debug("lock not taken") - #os._exit(0) - if self.lock_was_taken: - exit_log_result(self.signal_sent) - else: - sys.exit(0) - self.lock_was_taken = True - signal_stop_unattended_upgrade() - self.signal_sent = True - # show log - log_progress() - ''' - return self.try_iter_on_shutdown() - #return True - - -def main(): - # setup gettext - localesApp = "unattended-upgrades" - localesDir = "/usr/share/locale" - gettext.bindtextdomain(localesApp, localesDir) - gettext.textdomain(localesApp) - - # use a normal logfile instead of syslog too as on shutdown its too - # easy to get syslog killed - logdir = "/var/log/kylin-unattended-upgrades/" - try: - apt_pkg.init_config() - # logdir = apt_pkg.config.find_dir( - # "Unattended-Upgrade::LogDir", logdir) - except apt_pkg.Error as error: - logging.error(_("Apt returned an error when loading configuration, " - "using default values")) - logging.error(_("error message: '%s'"), error) - - parser = OptionParser() - parser.add_option("", "--debug", - action="store_true", dest="debug", - default=apt_pkg.config.find_b( - "Unattended-Upgrade::Debug", True), - help="print debug messages") - parser.add_option("", "--delay", default=25, type="int", - help="delay in minutes to wait for unattended-upgrades") - parser.add_option("", "--lock-file", - default="/var/run/kylin-unattended-upgrades.lock", - help="lock file location") - parser.add_option("", "--stop-only", - action="store_true", dest="stop_only", default=False, - help="only stop running unattended-upgrades, don't " - "start it even when " - "Unattended-Upgrade::InstallOnShutdown is true") - parser.add_option("", "--wait-for-signal", - action="store_true", dest="wait_for_signal", - default=False, - help="wait for TERM signal before starting operation") - (options, args) = parser.parse_args() - - # setup logging - level = logging.INFO - if options.debug: - level = logging.DEBUG - if not os.path.exists('/var/lib/unattended-upgrades'): - os.makedirs('/var/lib/unattended-upgrades') - if not os.path.exists(logdir): - os.makedirs(logdir) - logfile = os.path.join(logdir, "unattended-upgrades-shutdown.log") - logging.basicConfig(filename=logfile, - level=level, - format="%(asctime)s %(levelname)s - %(message)s") - clean_flag_files(flag_file_list) - signal.signal(signal.SIGTERM, signal_term_handler) - signal.signal(signal.SIGHUP, signal.SIG_IGN) - # init() - - UnattendedUpgradesShutdown(options).run() - - -if __name__ == "__main__": - parser = OptionParser() - parser.add_option("", "--debug", - action="store_true", dest="debug", - default=True,#apt_pkg.config.find_b( - #"Unattended-Upgrade::Debug", True), - help="print debug messages") - parser.add_option("", "--delay", default=25, type="int", - help="delay in minutes to wait for unattended-upgrades") - parser.add_option("", "--lock-file", - default="/var/run/kylin-unattended-upgrades.lock", - help="lock file location") - parser.add_option("", "--stop-only", - action="store_true", dest="stop_only", default=False, - help="only stop running unattended-upgrades, don't " - "start it even when " - "Unattended-Upgrade::InstallOnShutdown is true") - parser.add_option("", "--wait-for-signal", - action="store_true", dest="wait_for_signal", - default=False, - help="wait for TERM signal before starting operation") - (options, args) = parser.parse_args() - logdir = "/var/log/kylin-unattended-upgrades/" - # setup logging - level = logging.INFO - if options.debug: - level = logging.DEBUG - if not os.path.exists('/var/lib/unattended-upgrades'): - os.makedirs('/var/lib/unattended-upgrades') - if not os.path.exists(logdir): - os.makedirs(logdir) - logfile = os.path.join(logdir, "unattended-upgrades-shutdown.log") - #_setup_logging(options,logfile) - # logging.basicConfig(filename=logfile, - # level=level, - # format="%(asctime)s %(levelname)s - %(message)s") - logging.basicConfig(format='%(asctime)s-%(name)s-%(levelname)s-%(message)s',level=logging.DEBUG,filename=logfile) - # file_handler = logging.FileHandler(filename=logfile) - # file_handler.setLevel(logging.DEBUG) - # formatter = logging.Formatter() - # file_handler.setFormatter(formatter) - # stdout_handler = logging.StreamHandler(sys.stdout) - # stdout_handler.setLevel(logging.DEBUG) - - # logger=logging.getLogger() - # logger.setLevel(logging.DEBUG) - # logger.addHandler(file_handler) - # logger.addHandler(stdout_handler) - - # scheduler_logger = logging.getLogger('apscheduler') - # scheduler_logger.setLevel(logging.DEBUG) - # scheduler_logger.addHandler(file_handler) - # scheduler_logger.addHandler(stdout_handler) - os.environ['TZ']= "Asia/Shanghai" - os_release_info = ReadOsRelease('/etc/os-release') - logging.info("project id:%s,sub-project id:%s"%(os_release_info['PROJECT_CODENAME'],os_release_info['SUB_PROJECT_CODENAME'])) - ''' - time_stamp = "0" - if os.path.exists(TIME_STAMP): - with open(TIME_STAMP,'r') as f: - time_stamp = f.readline() - logging.info("time stamp:%s"%time_stamp) - ''' - # setup gettext - localesApp = "unattended-upgrades" - localesDir = "/usr/share/locale" - gettext.bindtextdomain(localesApp, localesDir) - gettext.textdomain(localesApp) - - # use a normal logfile instead of syslog too as on shutdown its too - # easy to get syslog killed - - try: - # apt_pkg.init_config() - reload_options_config() - # logdir = apt_pkg.config.find_dir( - # "Unattended-Upgrade::LogDir", logdir) - except apt_pkg.Error as error: - logging.error(_("Apt returned an error when loading configuration, " - "using default values")) - logging.error(_("error message: '%s'"), error) - - - clean_flag_files(flag_file_list) - signal.signal(signal.SIGTERM, signal_term_handler) - signal.signal(signal.SIGHUP, signal.SIG_IGN) - dpkg_fix=None - ''' - if os_release_info['PROJECT_CODENAME'] == 'V10SP1-edu' and os_release_info['SUB_PROJECT_CODENAME']=='mavis': - dpkg_journal_dirty = is_dpkg_journal_dirty() - logging.info("dpkg dirty:%s"%(dpkg_journal_dirty)) - if dpkg_journal_dirty: + def Download(self): + logging.debug(_("start download")) + self.update_detect_event.clear() + self.UpdateDetect() + self.update_detect_event.wait() + logging.debug(_("update detect finish:%s,%s")%(self.update_detect_status,",".join(self.update_list))) + if self.update_detect_status and len(self.update_list)>0: + pass + # elif not self.update_detect_status and 'kylin-system-updater' in self.update_list: + # logging.info(_("self update finished")) + else: + logging.info(_("no pkgs to download")) + return False + cache = apt.Cache() + for pkg in self.update_list: try: - with open(OTA_PKGS_TO_INSTALL_LIST,'r') as f: - pkgs = f.read() - ret = subprocess.run(["dpkg -i %s"%pkgs],shell=True,stdout=open(logfile,'a+'),stderr=open(logfile,'a+')) + package = cache[pkg] + if not package.installed: + package.mark_install() + else: + package.mark_upgrade() except Exception as e: logging.error(e) - if ret.returncode == 0: - logging.info("dpkg fix success") - subprocess.Popen('dbus-send --system --type=signal / com.kylin.update.notification.FixFinish', shell=True) - # dpkg_fix = subprocess.run("dpkg --configure -a",shell=True,stdout=open(logfile,'a+'),stderr=open(logfile,'a+')) - abnormal_pkg_count = get_abnormally_installed_pkg_count() - logging.info("abnormal pkg count:%s"%(abnormal_pkg_count)) - if abnormal_pkg_count != '0': - apt_fix = subprocess.run("echo y|apt install -f",shell=True,stdout=open(logfile,'a+'),stderr=open(logfile,'a+')) - ''' - kylin_system_updater = KylinSystemUpdater() - autoupgradepolicy = AutoUpgradePolicy() - background_scheduler = BackgroundScheduler(timezone="Asia/Shanghai") - background_scheduler_init(background_scheduler) - #executor = ThreadPoolExecutor(max_workers=1) - UnattendedUpgradesShutdown(options).run() - #main() + return False + list = apt_pkg.SourceList() + list.read_main_list() + recs = cache._records + pm = apt_pkg.PackageManager(cache._depcache) + fetcher = apt_pkg.Acquire(FetchProgress()) + try: + pm.get_archives(fetcher, list, recs) + except Exception as e: + logging.error(e) + res = fetcher.run() + logging.debug("fetch.run() result: %s", res) + if res == 0: + return True + else: + return False + + def Install(self): + logging.debug(_("start install")) + self.update_detect_event.clear() + self.UpdateDetect() + self.update_detect_event.wait() + logging.debug(_("update detect finish:%s,%s")%(self.update_detect_status,",".join(self.update_list))) + if self.update_detect_status and len(self.update_list)>0: + pass + # elif not self.update_detect_status and 'kylin-system-updater' in self.update_list: + # logging.info(_("self update finished")) + else: + return False + self.resolve_depend_status_event.clear() + self.DistUpgradeAll(False) + self.resolve_depend_status_event.wait() + logging.debug(_("resolve dependency status:%s,%s")%(self.resolve_depend_status,",".join(self.remove_pkgs))) + if self.resolve_depend_status and len(self.remove_pkgs)==0: + pass + else: + return False + needbackup = self.autoupgradepolicy.GetOptionValue('backupbeforeinstall') + logging.debug(_("checking if need backup:%s")%needbackup) + if needbackup == 'on': + if self.Backup(): + logging.debug(_("backup success")) + else: + logging.debug(_("backup failed")) + return False + self.install_finish_status_event.clear() + self.DistUpgradeAll(True) + self.install_finish_status_event.wait() + logging.debug(_("install finish status:%s,%s")%(self.install_finish_status,",".join(self.install_finish_group))) + if self.install_finish_status and len(self.install_finish_group)>0: + self.reboot_if_required() + else: + return False + return True + + def UpdateDetect(self): + return self.update_interface.UpdateDetect() + + def DistUpgradeAll(self,is_install): + return self.update_interface.DistUpgradeAll(is_install) + + def mount_backup_partition(self): + return self.backup_interface.Mount_backup_partition() + + def get_backup_state(self): + return self.backup_interface.getBackupState() + + def get_backup_comment_for_systemupdate(self): + return self.backup_interface.getBackupCommentForSystemUpdate() + + def auto_backup_for_system_update_noreturn(self,timeStamp,create_note,inc_note,userName,uid): + self.backup_interface.autoBackUpForSystemUpdate_noreturn(timeStamp,create_note,inc_note,userName,uid) + return + + def reboot_if_required(self): + needreboot = self.autoupgradepolicy.GetOptionValue('automaticReboot') + when = self.autoupgradepolicy.GetOptionValue('automaticRebootTime') + logging.debug(_("check if need reboot:%s when:%s")%(needreboot,when)) + if needreboot == "on": + try: + output = subprocess.check_output(["/sbin/shutdown","-r",when],shell=False) + logging.debug(output) + except Exception as e: + logging.error(_("Failed to issue shutdown: %s")%e) + return False + +if __name__ == "__main__": + gettext.bindtextdomain("unattended-upgrades","/usr/share/locale") + gettext.textdomain("unattended-upgrades") + parser = OptionParser() + parser.add_option("", "--debug", + action="store_true", dest="debug", + default=False,help="print debug messages") + parser.add_option("", "--download-only", + action="store_true", dest="download_only", + default=False,help="only download without install") + parser.add_option("", "--install-only", + action="store_true", dest="install_only", + default=False,help="only install without download") + parser.add_option("", "--upgrade", + action="store_true", dest="upgrade", + default=False,help="upgrade all packages") + parser.add_option("", "--wait-for-signal", + action="store_true", dest="wait_for_signal", + default=False, + help="wait for TERM signal before starting operation") + (options, args) = parser.parse_args() + logdir = "/var/log/kylin-unattended-upgrades/" + if not os.path.exists(logdir): + os.makedirs(logdir) + logfile = os.path.join(logdir, "unattended-upgrades-shutdown.log") + if options.debug: + logging.basicConfig(format='%(asctime)s-%(name)s-%(levelname)s-%(message)s',datefmt='%Y-%m-%d,%H:%M:%S',level=logging.DEBUG,stream=sys.stdout) + else: + logging.basicConfig(format='%(asctime)s-%(name)s-%(levelname)s-%(message)s',datefmt='%Y-%m-%d,%H:%M:%S',level=logging.DEBUG,filename=logfile) + signal.signal(signal.SIGTERM, signal_term_handler) + signal.signal(signal.SIGHUP, signal.SIG_IGN) + logging.info(_("unattended upgrade start options:%s")%(" ".join(sys.argv))) + unattended_upgrades_shutdown = UnattendedUpgradesShutdown(options) + sys.exit(unattended_upgrades_shutdown.run()) \ No newline at end of file