Import Upstream version 2.0.2

This commit is contained in:
openKylinBot 2022-05-13 19:55:18 +08:00
commit 0f4774d81a
761 changed files with 473426 additions and 0 deletions

25
.clang-format Normal file
View File

@ -0,0 +1,25 @@
Language: Cpp
# BasedOnStyle
TabWidth: 8
UseTab: Always
IndentWidth: 3
ContinuationIndentWidth: 3
ColumnLimit: 0
BreakBeforeBraces: Allman
AccessModifierOffset: 0
IncludeCategories:
- Regex: 'apti18n.h'
Priority: 9999
- Regex: 'apt-[^/]*/'
Priority: 20
- Regex: '^"'
Priority: 10
- Regex: 'config.h'
Priority: 0
- Regex: '(zlib|bzlib|lzma|lz4frame|gtest/gtest|db|gnutls/.*)\.h'
Priority: 30
- Regex: '\.h'
Priority: 100
- Regex: '.*'
Priority: 99

39
.gitlab-ci.yml Normal file
View File

@ -0,0 +1,39 @@
image: debian:unstable
variables:
DEBIAN_FRONTEND: noninteractive
CCACHE_DIR: $CI_PROJECT_DIR/.ccache
CCACHE_BASEDIR: $CI_PROJECT_DIR
cache:
paths:
- .ccache
test as root:
stage: test
script:
- adduser --home /home/travis travis --quiet --disabled-login --gecos "" --uid 1000
- rm -f /etc/dpkg/dpkg.cfg.d/excludes
- apt-get update
- apt-get install -qq build-essential expect sudo ccache
- chmod -R o+rwX $PWD
- ./prepare-release travis-ci
- sudo -u travis mkdir -p build .ccache
- sudo -u travis env -C build cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -G Ninja ..
- sudo -u travis --preserve-env=CCACHE_DIR,CCACHE_BASEDIR ninja -C build
- CTEST_OUTPUT_ON_FAILURE=1 ninja -C build test
- unbuffer ./test/integration/run-tests -q -j 4
test as user:
stage: test
script:
- adduser --home /home/travis travis --quiet --disabled-login --gecos "" --uid 1000
- rm -f /etc/dpkg/dpkg.cfg.d/excludes
- apt-get update
- apt-get install -qq build-essential expect sudo ccache
- chmod 755 /root
- chmod -R o+rwX $PWD
- ./prepare-release travis-ci
- sudo -u travis mkdir -p build .ccache
- sudo -u travis env -C build cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -G Ninja ..
- sudo -u travis --preserve-env=CCACHE_DIR,CCACHE_BASEDIR ninja -C build
- sudo -u travis CTEST_OUTPUT_ON_FAILURE=1 ninja -C build test
- sudo -u travis unbuffer ./test/integration/run-tests -q -j 4

23
.travis.yml Normal file
View File

@ -0,0 +1,23 @@
language: cpp
cache: ccache
sudo: required
services:
- docker
env:
global:
- DEBIAN_FRONTEND=noninteractive
matrix:
- USER=travis CMAKE_FLAGS=
- USER=root CMAKE_FLAGS=-DWITH_DOC=OFF
install:
- sed -i -e "s#1000#$(id -u)#g" Dockerfile
- docker build --tag=apt-ci .
before_script:
- docker run --rm -w $PWD -v $HOME/.ccache:$HOME/.ccache -v $PWD:$PWD --user=travis apt-ci sh -e -c "mkdir build && cd build && env PATH=/usr/lib/ccache:\$PATH cmake -DCMAKE_BUILD_TYPE=Coverage -G Ninja $CMAKE_FLAGS .."
- docker run --rm -w $PWD -v $HOME/.ccache:$HOME/.ccache -v $PWD:$PWD --user=travis apt-ci ninja -C build
script:
- docker run --rm -w $PWD -v $PWD:$PWD --user=travis apt-ci env CTEST_OUTPUT_ON_FAILURE=1 ninja -C build test
- docker run --rm -w $PWD -v $PWD:$PWD --user=travis apt-ci env DESTDIR=$PWD/rootdir chronic ninja -C build install
- docker run --rm -w $PWD -v $PWD:$PWD --user=$USER --tmpfs /tmp:suid,exec apt-ci unbuffer ./test/integration/run-tests -qq -j 4
after_script:
- docker run --rm -w $PWD/build -v $PWD:$PWD --user=$USER `bash <(curl -s https://codecov.io/env)` apt-ci bash -c 'bash <(curl -s https://codecov.io/bash)'

63
AUTHORS Normal file
View File

@ -0,0 +1,63 @@
The project contributors:
Michael Vogt <mvo@debian.org>
- Development, bug fixes
David Kalnischkies <kalnischkies+debian@gmail.com>
- Development, bug fixes
Julian Andres Klode
- Development, bug fixes
Past Contributors:
Robert Collins <robert.collins@canonical.com>
- Change the package index Info methods to allow apt-cache policy to be useful
when using several different archives on the same host.
Christian Perrier <bubulle@debian.org>
- Translations hero/coordinator
Eugene V. Lyubimkin
- Development, bug fixes
Otavio Salvador
- Development, bug fixes
Luca Bruno
- Development, bug fixes
CVS:jgg Jason Gunthorpe <jgg@debian.org>
- The Mad Cow incarnate
CVS:mdz Matt Zimmerman <mdz@debian.org>
- Ongoing maintenance and coordination of development
CVS:piefel Michael Piefel <piefel@debian.org>
- i18n and l10n
CVS:che Ben Gertzfield <che@debian.org>
- Packaging and Releases
CVS:bod Brendan O'Dea <bod@debian.org>
- Perl Bindings
CVS:tausq Randolph Chung <tausq@debian.org>
- Patches, Fixes, Debugging, GUIs and Releases
Isaac Jones <ijones@syntaxpolice.org> and Colin Walters <walters@debian.org>
Initial implementation of authentication support (Release.gpg)
Brian White <bcwhite@verisim.com> - Project originator
Tom Lees <tom@lpsg.demon.co.uk> - DPKG documentation and ideas
Behan Webster <behanw@verisim.com> - Original GUI design
Scott Ellis <storm@gate.net> - Original packaging and beta releases
Branden Robinson <branden@purdue.edu> - Man Page Documentation
Manoj Srivastava <srivasta@datasync.com> - 1st Generation FTP method and
dselect setup script
Adam Heath <doogie@debian.org> - 2nd Generation FTP method author
Ben Collins <bcollins@debian.org> - Initial RSH method
Many other bug reports through the Debian Bug system
NOTE: The ChangeLog generator will parse for names and email addresses. The
'CVS:<name>' tag should indicate who this pair refers to.

View File

@ -0,0 +1,35 @@
# CMake support for target-based function multiversioning
#
# Copyright (C) 2019 Canonical Ltd
#
# Author: Julian Andres Klode <jak@debian.org>.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
function(check_cxx_target var target code)
check_cxx_source_compiles(
"
__attribute__((target(\"${target}\"))) static int foo() { ${code} return 1; }
__attribute__((target(\"default\"))) static int foo() { ${code} return 0; }
int main() { return foo(); }
" ${var})
endfunction()

328
CMake/Documentation.cmake Normal file
View File

@ -0,0 +1,328 @@
# po4a/docbook documentation support for CMake
# - see documentation of add_docbook()
#
# Copyright (C) 2016 Julian Andres Klode <jak@debian.org>.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
find_path(DOCBOOK_XSL manpages/docbook.xsl
# Debian
/usr/share/xml/docbook/stylesheet/docbook-xsl
/usr/share/xml/docbook/stylesheet/nwalsh
# OpenSUSE
/usr/share/xml/docbook/stylesheet/nwalsh/current
# Arch
/usr/share/xml/docbook/xsl-stylesheets
# Fedora
/usr/share/sgml/docbook/xsl-stylesheets
# Fink
${CMAKE_INSTALL_PREFIX}/share/xml/xsl/docbook-xsl
# FreeBSD
${CMAKE_INSTALL_PREFIX}/share/xsl/docbook/
NO_DEFAULT_PATH)
if(NOT DOCBOOK_XSL)
message(FATAL_ERROR "Could not find docbook xsl")
endif()
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/docbook-text-style.xsl.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/docbook-text-style.xsl)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/docbook-html-style.xsl.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/docbook-html-style.xsl)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/manpage-style.xsl.cmake.in
${CMAKE_CURRENT_BINARY_DIR}/manpage-style.xsl)
# Split up a string of the form DOCUMENT[.DOCUMENT][.LANGUAGE][.SECTION].EXTENSION
#
# There might be up to two parts in the document name. The language must be
# a two char language code like de, or a 5 char code of the form de_DE.
function(po4a_components doc lang sec ext translated_full_document)
get_filename_component(name ${translated_full_document} NAME)
string(REPLACE "." ";" name "${name}") # Make it a list
list(GET name 0 _doc) # First element is always the document
list(GET name 1 _lang) # Second *might* be a language
list(GET name -2 _sec) # Second-last *might* be a section
list(GET name -1 _ext) # Last element is always the file type
# If the language code is neither a file type, nor a section, nor a language
# assume it is part of the file name and use the next component as the lang.
if(_lang AND NOT _lang MATCHES "^(xml|dbk|[0-9]|[a-z][a-z]|[a-z][a-z]_[A-Z][A-Z])$")
set(_doc "${_doc}.${_lang}")
list(GET name 2 _lang)
endif()
# If no language is present, we get a section; both not present => type
if(_lang MATCHES "xml|dbk|[0-9]")
set(_lang "")
endif()
if(NOT _sec MATCHES "^[0-9]$") # A (manpage) section must be a number
set(_sec "")
endif()
set(${doc} ${_doc} PARENT_SCOPE)
set(${lang} ${_lang} PARENT_SCOPE)
set(${sec} ${_sec} PARENT_SCOPE)
set(${ext} ${_ext} PARENT_SCOPE)
endfunction()
# Process one document
function(po4a_one stamp_out out full_document language deps)
path_join(full_path "${CMAKE_CURRENT_SOURCE_DIR}" "${full_document}")
if (full_document MATCHES "\.ent$")
set(dest "${language}/${full_document}")
set(full_dest "${dest}")
else()
po4a_components(document _ section ext "${full_document}")
# Calculate target file name
set(dest "${language}/${document}.${language}")
if(section)
set(dest "${dest}.${section}")
endif()
set(full_dest "${dest}.${ext}")
endif()
# po4a might drop files not translated enough, so build a stamp file
set(stamp ${CMAKE_CURRENT_BINARY_DIR}/${dest}.po4a-stamp)
add_custom_command(
OUTPUT ${stamp}
COMMAND ${CMAKE_COMMAND} -E make_directory ${CMAKE_CURRENT_BINARY_DIR}/${language}
COMMAND po4a --previous --no-backups
--package-name='${PROJECT_NAME}-doc'
--package-version='${PACKAGE_VERSION}'
--msgid-bugs-address='${PACKAGE_MAIL}'
--translate-only ${full_dest}
--srcdir ${CMAKE_CURRENT_SOURCE_DIR}
--destdir ${CMAKE_CURRENT_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/po4a.conf
COMMAND ${CMAKE_COMMAND} -E touch ${stamp}
COMMENT "Generating ${full_dest} (or dropping it)"
DEPENDS ${full_document} ${deps} po/${language}.po
)
# Return result
set(${stamp_out} ${stamp} PARENT_SCOPE)
set(${out} ${CMAKE_CURRENT_BINARY_DIR}/${full_dest} PARENT_SCOPE)
endfunction()
function(xsltproc_one)
set(generated "")
set(options HTML TEXT MANPAGE)
set(oneValueArgs STAMP STAMP_OUT FULL_DOCUMENT)
set(multiValueArgs INSTALL DEPENDS)
cmake_parse_arguments(DOC "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
po4a_components(document language section ext "${DOC_FULL_DOCUMENT}")
# Default parameters
set(params
--nonet
--xinclude
--stringparam chunk.quietly yes
--stringparam man.output.quietly yes
--path ${PROJECT_SOURCE_DIR}/vendor/${CURRENT_VENDOR}/
--path ${CMAKE_CURRENT_SOURCE_DIR}/
)
# Parameters if localized
if(language)
list(APPEND params -stringparam l10n.gentext.default.language ${language})
endif()
path_join(full_input_path ${CMAKE_CURRENT_SOURCE_DIR} ${DOC_FULL_DOCUMENT})
if (DOC_MANPAGE)
if (language)
set(manpage_output "${CMAKE_CURRENT_BINARY_DIR}/${language}/${document}.${section}")
else()
set(manpage_output "${CMAKE_CURRENT_BINARY_DIR}/${document}.${section}")
endif()
set(manpage_stylesheet "${CMAKE_CURRENT_BINARY_DIR}/manpage-style.xsl")
set(manpage_params)
install(FILES ${manpage_output}
DESTINATION ${CMAKE_INSTALL_MANDIR}/${language}/man${section}
OPTIONAL)
endif()
if (DOC_HTML)
if (language)
set(html_output "${CMAKE_CURRENT_BINARY_DIR}/${language}/${document}.${language}.html")
else()
set(html_output "${CMAKE_CURRENT_BINARY_DIR}/${document}.html")
endif()
set(html_params --stringparam base.dir ${html_output})
set(html_stylesheet "${CMAKE_CURRENT_BINARY_DIR}/docbook-html-style.xsl")
install(DIRECTORY ${html_output}
DESTINATION ${DOC_INSTALL}
OPTIONAL)
endif()
if (DOC_TEXT)
if (language)
set(text_output "${CMAKE_CURRENT_BINARY_DIR}/${language}/${document}.${language}.text")
else()
set(text_output "${CMAKE_CURRENT_BINARY_DIR}/${document}.text")
endif()
set(text_params --stringparam base.dir ${text_output})
set(text_stylesheet "${CMAKE_CURRENT_BINARY_DIR}/docbook-text-style.xsl")
file(RELATIVE_PATH text_output_relative ${CMAKE_CURRENT_BINARY_DIR} ${text_output})
add_custom_command(OUTPUT ${text_output}.w3m-stamp
COMMAND ${PROJECT_SOURCE_DIR}/CMake/run_if_exists.sh
--stdout ${text_output}
${text_output}.html
env LC_ALL=C.UTF-8 w3m -cols 78 -dump
-o display_charset=UTF-8
-no-graph -T text/html ${text_output}.html
COMMAND ${CMAKE_COMMAND} -E touch ${text_output}.w3m-stamp
COMMENT "Generating ${text_output_relative} (if not dropped by po4a)"
DEPENDS "${text_output}.html.xsltproc-stamp"
)
list(APPEND generated ${text_output}.w3m-stamp)
install(FILES ${text_output}
DESTINATION ${DOC_INSTALL}
OPTIONAL)
set(text_output "${text_output}.html")
endif()
foreach(type in manpage html text)
if (NOT ${type}_output)
continue()
endif()
set(output ${${type}_output})
set(stylesheet ${${type}_stylesheet})
set(type_params ${${type}_params})
file(RELATIVE_PATH output_relative ${CMAKE_CURRENT_BINARY_DIR} ${output})
add_custom_command(OUTPUT ${output}.xsltproc-stamp
COMMAND ${PROJECT_SOURCE_DIR}/CMake/run_if_exists.sh
${full_input_path}
xsltproc ${params} ${type_params} -o ${output}
${stylesheet}
${full_input_path}
COMMAND ${CMAKE_COMMAND} -E touch ${output}.xsltproc-stamp
COMMENT "Generating ${output_relative} (if not dropped by po4a)"
DEPENDS ${DOC_STAMP} ${DOC_DEPENDS})
list(APPEND generated ${output}.xsltproc-stamp)
endforeach()
set(${DOC_STAMP_OUT} ${generated} PARENT_SCOPE)
endfunction()
# add_docbook(Name [ALL] [HTML] [TEXT] [MANPAGE]
# [INSTALL install dir]
# [DEPENDS depend ...]
# [DOCUMENTS documents ...]
# [LINGUAS lingua ...])
#
# Generate a target called name with all the documents being converted to
# the chosen output formats and translated to the chosen languages using po4a.
#
# For the translation support, the po4a.conf must be written so that
# translations for a document guide.xml are written to LANG/guide.LANG.xml,
# and for a manual page man.5.xml to a file called LANG/man.LANG.5.xml.
#
# The guide and manual page names may also contain a second component separated
# by a dot, it must however not be a valid language code.
#
# Note that po4a might chose not to generate a translated manual page for a
# given language if the translation rate is not high enough. We deal with this
# by creating stamp files.
function(add_docbook target)
set(generated "")
set(options HTML TEXT MANPAGE ALL)
set(oneValueArgs)
set(multiValueArgs INSTALL DOCUMENTS LINGUAS TRANSLATED_ENTITIES DEPENDS)
cmake_parse_arguments(DOC "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
if (DOC_HTML)
list(APPEND formats HTML)
endif()
if (DOC_TEXT)
list(APPEND formats TEXT)
endif()
if (DOC_MANPAGE)
list(APPEND formats MANPAGE)
endif()
foreach(document ${DOC_TRANSLATED_ENTITIES})
foreach(lang ${DOC_LINGUAS})
po4a_one(po4a_stamp po4a_out ${document} "${lang}" "")
list(APPEND DOC_DEPENDS ${po4a_stamp})
endforeach()
endforeach()
foreach(document ${DOC_DOCUMENTS})
foreach(lang ${DOC_LINGUAS})
po4a_one(po4a_stamp po4a_out ${document} "${lang}" "${DOC_DEPENDS}")
xsltproc_one(STAMP_OUT xslt_stamp
STAMP ${po4a_stamp}
FULL_DOCUMENT ${po4a_out}
INSTALL ${DOC_INSTALL}
${formats})
list(APPEND stamps ${xslt_stamp})
endforeach()
xsltproc_one(STAMP_OUT xslt_stamp
STAMP ${document}
FULL_DOCUMENT ${document}
INSTALL ${DOC_INSTALL}
${formats})
list(APPEND stamps ${xslt_stamp})
endforeach()
if (DOC_ALL)
add_custom_target(${target} ALL DEPENDS ${stamps})
else()
add_custom_target(${target} DEPENDS ${stamps})
endif()
endfunction()
# Add an update-po4a target
function(add_update_po4a target pot header)
set(WRITE_HEADER "")
if (header)
set(WRITE_HEADER
COMMAND sed -n "/^\#$/,$p" ${pot} > ${pot}.headerfree
COMMAND cat ${header} ${pot}.headerfree > ${pot}
COMMAND rm ${pot}.headerfree
)
endif()
add_custom_target(${target}
COMMAND po4a --previous --no-backups --force --no-translations
--msgmerge-opt --add-location=file
--porefs noline,wrap
--package-name=${PROJECT_NAME}-doc --package-version=${PACKAGE_VERSION}
--msgid-bugs-address=${PACKAGE_MAIL} po4a.conf
${WRITE_HEADER}
VERBATIM
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
)
endfunction()

View File

@ -0,0 +1,59 @@
# - Try to find Berkeley DB
# Once done this will define
#
# BERKELEY_DB_FOUND - system has Berkeley DB
# BERKELEY_DB_INCLUDE_DIRS - the Berkeley DB include directory
# BERKELEY_DB_LIBRARIES - Link these to use Berkeley DB
# BERKELEY_DB_DEFINITIONS - Compiler switches required for using Berkeley DB
# Copyright (c) 2006, Alexander Dymo, <adymo@kdevelop.org>
# Copyright (c) 2016, Julian Andres Klode <jak@debian.org>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# We need NO_DEFAULT_PATH here, otherwise CMake helpfully picks up the wrong
# db.h on BSD systems instead of the Berkeley DB one.
find_path(BERKELEY_DB_INCLUDE_DIRS db.h
${CMAKE_INSTALL_FULL_INCLUDEDIR}/db5
/usr/local/include/db5
/usr/include/db5
${CMAKE_INSTALL_FULL_INCLUDEDIR}/db4
/usr/local/include/db4
/usr/include/db4
${CMAKE_INSTALL_FULL_INCLUDEDIR}
/usr/local/include
/usr/include
NO_DEFAULT_PATH
)
find_library(BERKELEY_DB_LIBRARIES NAMES db db-5)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Berkeley "Could not find Berkeley DB >= 4.1" BERKELEY_DB_INCLUDE_DIRS BERKELEY_DB_LIBRARIES)
# show the BERKELEY_DB_INCLUDE_DIRS and BERKELEY_DB_LIBRARIES variables only in the advanced view
mark_as_advanced(BERKELEY_DB_INCLUDE_DIRS BERKELEY_DB_LIBRARIES)

25
CMake/FindGcrypt.cmake Normal file
View File

@ -0,0 +1,25 @@
# - Try to find GCRYPT
# Once done, this will define
#
# GCRYPT_FOUND - system has GCRYPT
# GCRYPT_INCLUDE_DIRS - the GCRYPT include directories
# GCRYPT_LIBRARIES - the GCRYPT library
find_package(PkgConfig)
pkg_check_modules(GCRYPT_PKGCONF libgcrypt)
find_path(GCRYPT_INCLUDE_DIRS
NAMES gcrypt.h
PATHS ${GCRYPT_PKGCONF_INCLUDE_DIRS}
)
find_library(GCRYPT_LIBRARIES
NAMES gcrypt
PATHS ${GCRYPT_PKGCONF_LIBRARY_DIRS}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(GCRYPT DEFAULT_MSG GCRYPT_INCLUDE_DIRS GCRYPT_LIBRARIES)
mark_as_advanced(GCRYPT_INCLUDE_DIRS GCRYPT_LIBRARIES)

20
CMake/FindIconv.cmake Normal file
View File

@ -0,0 +1,20 @@
find_path(ICONV_INCLUDE_DIR NAMES iconv.h)
find_library(ICONV_LIBRARY NAMES iconv)
if (ICONV_LIBRARY)
set(ICONV_SYMBOL_FOUND "${ICONV_LIBRARY}")
else()
check_function_exists(iconv_open ICONV_SYMBOL_FOUND)
endif()
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Iconv DEFAULT_MESSAGE ICONV_INCLUDE_DIR ICONV_SYMBOL_FOUND)
if(ICONV_LIBRARY)
set(ICONV_LIBRARIES "${ICONV_LIBRARY}")
else()
set(ICONV_LIBRARIES)
endif()
set(ICONV_INCLUDE_DIRS "${ICONV_INCLUDE_DIR}")
mark_as_advanced(ICONV_LIBRARY ICONV_INCLUDE_DIR)

148
CMake/FindLFS.cmake Normal file
View File

@ -0,0 +1,148 @@
# CMake support for large files
#
# Copyright (C) 2016 Julian Andres Klode <jak@debian.org>.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# This defines the following variables
#
# LFS_DEFINITIONS - List of definitions to pass to add_definitions()
# LFS_COMPILE_OPTIONS - List of definitions to pass to add_compile_options()
# LFS_LIBRARIES - List of libraries and linker flags
# LFS_FOUND - If there is Large files support
#
include(CheckCXXSourceCompiles)
include(FindPackageHandleStandardArgs)
# Test program to check for LFS. Requires that off_t has at least 8 byte large
set(_lfs_test_source
"
#include <sys/types.h>
typedef char my_static_assert[sizeof(off_t) >= 8 ? 1 : -1];
int main(void) { return 0; }
"
)
# Check if the given options are needed
#
# This appends to the variables _lfs_cppflags, _lfs_cflags, and _lfs_ldflags,
# it also sets LFS_FOUND to 1 if it works.
function(_lfs_check_compiler_option var options definitions libraries)
set(CMAKE_REQUIRED_QUIET 1)
set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS} ${options})
set(CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} ${definitions})
set(CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_DEFINITIONS} ${libraries})
message(STATUS "Looking for LFS support using ${options} ${definitions} ${libraries}")
check_cxx_source_compiles("${_lfs_test_source}" ${var})
if(${var})
message(STATUS "Looking for LFS support using ${options} ${definitions} ${libraries} - found")
set(_lfs_cppflags ${_lfs_cppflags} ${definitions} PARENT_SCOPE)
set(_lfs_cflags ${_lfs_cflags} ${options} PARENT_SCOPE)
set(_lfs_ldflags ${_lfs_ldflags} ${libraries} PARENT_SCOPE)
set(LFS_FOUND TRUE PARENT_SCOPE)
else()
message(STATUS "Looking for LFS support using ${options} ${definitions} ${libraries} - not found")
endif()
endfunction()
# Check for the availability of LFS.
# The cases handled are:
#
# * Native LFS
# * Output of getconf LFS_CFLAGS; getconf LFS_LIBS; getconf LFS_LDFLAGS
# * Preprocessor flag -D_FILE_OFFSET_BITS=64
# * Preprocessor flag -D_LARGE_FILES
#
function(_lfs_check)
set(_lfs_cflags)
set(_lfs_cppflags)
set(_lfs_ldflags)
set(_lfs_libs)
set(CMAKE_REQUIRED_QUIET 1)
message(STATUS "Looking for native LFS support")
check_cxx_source_compiles("${_lfs_test_source}" lfs_native)
if (lfs_native)
message(STATUS "Looking for native LFS support - found")
set(LFS_FOUND TRUE)
else()
message(STATUS "Looking for native LFS support - not found")
endif()
if (NOT LFS_FOUND)
# Check using getconf. If getconf fails, don't worry, the check in
# _lfs_check_compiler_option will fail as well.
execute_process(COMMAND getconf LFS_CFLAGS
OUTPUT_VARIABLE _lfs_cflags_raw
OUTPUT_STRIP_TRAILING_WHITESPACE
ERROR_QUIET)
execute_process(COMMAND getconf LFS_LIBS
OUTPUT_VARIABLE _lfs_libs_tmp
OUTPUT_STRIP_TRAILING_WHITESPACE
ERROR_QUIET)
execute_process(COMMAND getconf LFS_LDFLAGS
OUTPUT_VARIABLE _lfs_ldflags_tmp
OUTPUT_STRIP_TRAILING_WHITESPACE
ERROR_QUIET)
separate_arguments(_lfs_cflags_raw)
separate_arguments(_lfs_ldflags_tmp)
separate_arguments(_lfs_libs_tmp)
# Move -D flags to the place they are supposed to be
foreach(flag ${_lfs_cflags_raw})
if (flag MATCHES "-D.*")
list(APPEND _lfs_cppflags_tmp ${flag})
else()
list(APPEND _lfs_cflags_tmp ${flag})
endif()
endforeach()
# Check if the flags we received (if any) produce working LFS support
_lfs_check_compiler_option(lfs_getconf_works
"${_lfs_cflags_tmp}"
"${_lfs_cppflags_tmp}"
"${_lfs_libs_tmp};${_lfs_ldflags_tmp}")
endif()
if(NOT LFS_FOUND) # IRIX stuff
_lfs_check_compiler_option(lfs_need_n32 "-n32" "" "")
endif()
if(NOT LFS_FOUND) # Linux and friends
_lfs_check_compiler_option(lfs_need_file_offset_bits "" "-D_FILE_OFFSET_BITS=64" "")
endif()
if(NOT LFS_FOUND) # AIX
_lfs_check_compiler_option(lfs_need_large_files "" "-D_LARGE_FILES=1" "")
endif()
set(LFS_DEFINITIONS ${_lfs_cppflags} CACHE STRING "Extra definitions for large file support")
set(LFS_COMPILE_OPTIONS ${_lfs_cflags} CACHE STRING "Extra definitions for large file support")
set(LFS_LIBRARIES ${_lfs_libs} ${_lfs_ldflags} CACHE STRING "Extra definitions for large file support")
set(LFS_FOUND ${LFS_FOUND} CACHE INTERNAL "Found LFS")
endfunction()
if (NOT LFS_FOUND)
_lfs_check()
endif()
find_package_handle_standard_args(LFS "Could not find LFS. Set LFS_DEFINITIONS, LFS_COMPILE_OPTIONS, LFS_LIBRARIES." LFS_FOUND)

25
CMake/FindLZ4.cmake Normal file
View File

@ -0,0 +1,25 @@
# - Try to find LZ4
# Once done, this will define
#
# LZ4_FOUND - system has LZ4
# LZ4_INCLUDE_DIRS - the LZ4 include directories
# LZ4_LIBRARIES - the LZ4 library
find_package(PkgConfig)
pkg_check_modules(LZ4_PKGCONF liblz4)
find_path(LZ4_INCLUDE_DIRS
NAMES lz4frame.h
PATHS ${LZ4_PKGCONF_INCLUDE_DIRS}
)
find_library(LZ4_LIBRARIES
NAMES lz4
PATHS ${LZ4_PKGCONF_LIBRARY_DIRS}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LZ4 DEFAULT_MSG LZ4_INCLUDE_DIRS LZ4_LIBRARIES)
mark_as_advanced(LZ4_INCLUDE_DIRS LZ4_LIBRARIES)

25
CMake/FindLZMA.cmake Normal file
View File

@ -0,0 +1,25 @@
# - Try to find LZMA
# Once done, this will define
#
# LZMA_FOUND - system has LZMA
# LZMA_INCLUDE_DIRS - the LZMA include directories
# LZMA_LIBRARIES - the LZMA library
find_package(PkgConfig)
pkg_check_modules(LZMA_PKGCONF liblzma)
find_path(LZMA_INCLUDE_DIRS
NAMES lzma.h
PATHS ${LZMA_PKGCONF_INCLUDE_DIRS}
)
find_library(LZMA_LIBRARIES
NAMES lzma
PATHS ${LZMA_PKGCONF_LIBRARY_DIRS}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(LZMA DEFAULT_MSG LZMA_INCLUDE_DIRS LZMA_LIBRARIES)
mark_as_advanced(LZMA_INCLUDE_DIRS LZMA_LIBRARIES)

25
CMake/FindSeccomp.cmake Normal file
View File

@ -0,0 +1,25 @@
# - Try to find SECCOMP
# Once done, this will define
#
# SECCOMP_FOUND - system has SECCOMP
# SECCOMP_INCLUDE_DIRS - the SECCOMP include directories
# SECCOMP_LIBRARIES - the SECCOMP library
find_package(PkgConfig)
pkg_check_modules(SECCOMP_PKGCONF libseccomp)
find_path(SECCOMP_INCLUDE_DIRS
NAMES seccomp.h
PATHS ${SECCOMP_PKGCONF_INCLUDE_DIRS}
)
find_library(SECCOMP_LIBRARIES
NAMES seccomp
PATHS ${SECCOMP_PKGCONF_LIBRARY_DIRS}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(SECCOMP DEFAULT_MSG SECCOMP_INCLUDE_DIRS SECCOMP_LIBRARIES)
mark_as_advanced(SECCOMP_INCLUDE_DIRS SECCOMP_LIBRARIES)

24
CMake/FindSystemd.cmake Normal file
View File

@ -0,0 +1,24 @@
# - Try to find SYSTEMD
# Once done, this will define
#
# SYSTEMD_FOUND - system has SYSTEMD
# SYSTEMD_INCLUDE_DIRS - the SYSTEMD include directories
# SYSTEMD_LIBRARIES - the SYSTEMD library
find_package(PkgConfig)
pkg_check_modules(SYSTEMD_PKGCONF libsystemd)
find_path(SYSTEMD_INCLUDE_DIRS
NAMES systemd/sd-bus.h
PATHS ${SYSTEMD_PKGCONF_INCLUDE_DIRS}
)
find_library(SYSTEMD_LIBRARIES
NAMES systemd
PATHS ${SYSTEMD_PKGCONF_LIBRARY_DIRS}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Systemd DEFAULT_MSG SYSTEMD_INCLUDE_DIRS SYSTEMD_LIBRARIES)
mark_as_advanced(SYSTEMD_INCLUDE_DIRS SYSTEMD_LIBRARIES)

25
CMake/FindUdev.cmake Normal file
View File

@ -0,0 +1,25 @@
# - Try to find UDEV
# Once done, this will define
#
# UDEV_FOUND - system has UDEV
# UDEV_INCLUDE_DIRS - the UDEV include directories
# UDEV_LIBRARIES - the UDEV library
find_package(PkgConfig)
pkg_check_modules(UDEV_PKGCONF libudev)
find_path(UDEV_INCLUDE_DIRS
NAMES libudev.h
PATHS ${UDEV_PKGCONF_INCLUDE_DIRS}
)
find_library(UDEV_LIBRARIES
NAMES udev
PATHS ${UDEV_PKGCONF_LIBRARY_DIRS}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Udev DEFAULT_MSG UDEV_INCLUDE_DIRS UDEV_LIBRARIES)
mark_as_advanced(UDEV_INCLUDE_DIRS UDEV_LIBRARIES)

25
CMake/FindZstd.cmake Normal file
View File

@ -0,0 +1,25 @@
# - Try to find ZSTD
# Once done, this will define
#
# ZSTD_FOUND - system has ZSTD
# ZSTD_INCLUDE_DIRS - the ZSTD include directories
# ZSTD_LIBRARIES - the ZSTD library
find_package(PkgConfig)
pkg_check_modules(ZSTD_PKGCONF libzstd)
find_path(ZSTD_INCLUDE_DIRS
NAMES zstd.h
PATHS ${ZSTD_PKGCONF_INCLUDE_DIRS}
)
find_library(ZSTD_LIBRARIES
NAMES zstd
PATHS ${ZSTD_PKGCONF_LIBRARY_DIRS}
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(ZSTD DEFAULT_MSG ZSTD_INCLUDE_DIRS ZSTD_LIBRARIES)
mark_as_advanced(ZSTD_INCLUDE_DIRS ZSTD_LIBRARIES)

101
CMake/Misc.cmake Normal file
View File

@ -0,0 +1,101 @@
include(CheckCXXCompilerFlag)
# Flatten our header structure
function(flatify target headers)
foreach(header ${headers})
get_filename_component(tgt ${header} NAME)
configure_file(${header} ${target}/${tgt} COPYONLY)
endforeach(header ${headers})
endfunction()
function(add_optional_compile_options flags)
foreach(flag ${flags})
check_cxx_compiler_flag(-${flag} have-compiler-flag:-${flag})
if (have-compiler-flag:-${flag})
add_compile_options("-${flag}")
endif()
endforeach()
endfunction()
# Substitute vendor references in a file
function(add_vendor_file)
set(options)
set(oneValueArgs OUTPUT INPUT MODE)
set(multiValueArgs VARIABLES)
cmake_parse_arguments(AVF "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(in ${CMAKE_CURRENT_SOURCE_DIR}/${AVF_INPUT})
set(out ${CMAKE_CURRENT_BINARY_DIR}/${AVF_OUTPUT})
add_custom_command(
OUTPUT ${out}
COMMAND ${CMAKE_COMMAND} -DPROJECT_SOURCE_DIR=${PROJECT_SOURCE_DIR}
"-DVARS=${AVF_VARIABLES}"
-DCURRENT_VENDOR=${CURRENT_VENDOR}
-DIN=${in}
-DOUT=${out}
-P ${PROJECT_SOURCE_DIR}/CMake/vendor_substitute.cmake
COMMAND chmod ${AVF_MODE} ${out}
DEPENDS ${in}
${PROJECT_SOURCE_DIR}/doc/apt-verbatim.ent
${PROJECT_SOURCE_DIR}/vendor/${CURRENT_VENDOR}/apt-vendor.ent
${PROJECT_SOURCE_DIR}/vendor/getinfo
${PROJECT_SOURCE_DIR}/CMake/vendor_substitute.cmake
VERBATIM
)
# Would like to use ${AVF_OUTPUT} as target name, but then ninja gets
# cycles.
add_custom_target(vendor-${AVF_OUTPUT} ALL DEPENDS ${out})
endfunction()
# Add symbolic links to a file
function(add_slaves destination master)
set(slaves "")
foreach(slave ${ARGN})
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${slave}
COMMAND ${CMAKE_COMMAND} -E create_symlink ${master} ${CMAKE_CURRENT_BINARY_DIR}/${slave})
list(APPEND slaves ${CMAKE_CURRENT_BINARY_DIR}/${slave})
endforeach()
STRING(REPLACE "/" "-" master "${master}")
add_custom_target(${master}-slaves ALL DEPENDS ${slaves})
install(FILES ${slaves} DESTINATION ${destination})
endfunction()
# Generates a simple version script versioning everything with current SOVERSION
function(add_version_script target)
get_target_property(soversion ${target} SOVERSION)
set(script "${CMAKE_CURRENT_BINARY_DIR}/${target}.versionscript")
string(REPLACE "-" "" name "${target}_${soversion}")
string(TOUPPER "${name}" name)
add_custom_command(OUTPUT "${script}"
COMMAND echo "${name} {global: *; };" > "${script}"
VERBATIM )
add_custom_target(${target}-versionscript DEPENDS "${script}")
target_link_libraries(${target} PRIVATE -Wl,-version-script="${script}")
add_dependencies(${target} ${target}-versionscript)
endfunction()
function(path_join out path1 path2)
string(SUBSTRING ${path2} 0 1 init_char)
if ("${init_char}" STREQUAL "/")
set(${out} "${path2}" PARENT_SCOPE)
else()
set(${out} "${path1}/${path2}" PARENT_SCOPE)
endif()
endfunction()
# install_empty_directories(path ...)
#
# Creates empty directories in the install destination dir. Paths may be
# absolute or relative; in the latter case, the value of CMAKE_INSTALL_PREFIX
# is prepended.
function(install_empty_directories)
foreach(path ${ARGN})
path_join(full_path "${CMAKE_INSTALL_PREFIX}" "${path}")
INSTALL(CODE "MESSAGE(STATUS \"Creating directory: \$ENV{DESTDIR}${full_path}\")"
CODE "FILE(MAKE_DIRECTORY \$ENV{DESTDIR}${full_path})")
endforeach()
endfunction()

185
CMake/Translations.cmake Normal file
View File

@ -0,0 +1,185 @@
# translations.cmake - Translations using APT's translation system.
# Copyright (C) 2009, 2016 Julian Andres Klode <jak@debian.org>
function(apt_add_translation_domain)
set(options)
set(oneValueArgs DOMAIN)
set(multiValueArgs TARGETS SCRIPTS EXCLUDE_LANGUAGES)
cmake_parse_arguments(NLS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
# Build the list of source files of the target
set(files "")
set(abs_files "")
set(scripts "")
set(abs_scripts "")
set(mofiles)
set(targets ${NLS_TARGETS})
set(domain ${NLS_DOMAIN})
set(xgettext_params
--add-comments
--foreign
--package-name=${PROJECT_NAME}
--package-version=${PACKAGE_VERSION}
--msgid-bugs-address=${PACKAGE_MAIL}
)
foreach(source ${NLS_SCRIPTS})
path_join(file "${CMAKE_CURRENT_SOURCE_DIR}" "${source}")
file(RELATIVE_PATH relfile ${PROJECT_SOURCE_DIR} ${file})
list(APPEND scripts ${relfile})
list(APPEND abs_scripts ${file})
endforeach()
foreach(target ${targets})
get_target_property(source_dir ${target} SOURCE_DIR)
get_target_property(sources ${target} SOURCES)
foreach(source ${sources})
if (source MATCHES TARGET_OBJECTS)
continue()
endif()
path_join(file "${source_dir}" "${source}")
file(RELATIVE_PATH relfile ${PROJECT_SOURCE_DIR} ${file})
set(files ${files} ${relfile})
set(abs_files ${abs_files} ${file})
endforeach()
target_compile_definitions(${target} PRIVATE -DAPT_DOMAIN="${domain}")
endforeach()
if("${scripts}" STREQUAL "")
set(sh_pot "/dev/null")
else()
set(sh_pot ${CMAKE_CURRENT_BINARY_DIR}/${domain}.sh.pot)
# Create the template for this specific sub-domain
add_custom_command (OUTPUT ${sh_pot}
COMMAND xgettext ${xgettext_params} -L Shell
-o ${sh_pot} ${scripts}
DEPENDS ${abs_scripts}
VERBATIM
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
)
endif()
add_custom_command (OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${domain}.c.pot
COMMAND xgettext ${xgettext_params} -k_ -kN_
--keyword=P_:1,2
-o ${CMAKE_CURRENT_BINARY_DIR}/${domain}.c.pot ${files}
DEPENDS ${abs_files}
VERBATIM
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
)
# We are building a ${domain}.pot with a header for launchpad, but we also
# build a ${domain.pot}-tmp as a byproduct. The msgfmt command than depend
# on the byproduct while their target depends on the output, so that msgfmt
# does not have to be rerun if nothing in the template changed.
#
# Make sure the .pot-tmp has no line numbers, to avoid useless rebuilding
# of .mo files.
add_custom_command (OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot
BYPRODUCTS ${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot-tmp
COMMAND msgcomm --more-than=0 --omit-header --sort-by-file --add-location=file
${sh_pot}
${CMAKE_CURRENT_BINARY_DIR}/${domain}.c.pot
--output=${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot-tmp0
COMMAND msgcomm --more-than=0 --sort-by-file
${sh_pot}
${CMAKE_CURRENT_BINARY_DIR}/${domain}.c.pot
--output=${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot
COMMAND cmake -E copy_if_different
${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot-tmp0
${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot-tmp
DEPENDS ${sh_pot}
${CMAKE_CURRENT_BINARY_DIR}/${domain}.c.pot
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
)
# We need a target to depend on otherwise, the msgmerge might not get called
# with the make generator
add_custom_target(nls-${domain}-template DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot)
# Build .mo files
file(GLOB translations "${PROJECT_SOURCE_DIR}/po/*.po")
list(SORT translations)
foreach(file ${translations})
get_filename_component(langcode ${file} NAME_WE)
if ("${langcode}" IN_LIST NLS_EXCLUDE_LANGUAGES)
continue()
endif()
set(outdir ${CMAKE_CURRENT_BINARY_DIR}/locale/${langcode}/LC_MESSAGES)
file(MAKE_DIRECTORY ${outdir})
# Command to merge and compile the messages. As explained in the custom
# command for msgcomm, this depends on byproduct to avoid reruns
add_custom_command(OUTPUT ${outdir}/${domain}.po
COMMAND msgmerge -qo ${outdir}/${domain}.po ${file} ${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot-tmp
DEPENDS ${file} ${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot-tmp
)
add_custom_command(OUTPUT ${outdir}/${domain}.mo
COMMAND msgfmt --statistics -o ${outdir}/${domain}.mo ${outdir}/${domain}.po
DEPENDS ${outdir}/${domain}.po
)
set(mofiles ${mofiles} ${outdir}/${domain}.mo)
install(FILES ${outdir}/${domain}.mo
DESTINATION "${CMAKE_INSTALL_LOCALEDIR}/${langcode}/LC_MESSAGES")
endforeach(file ${translations})
add_custom_target(nls-${domain} ALL DEPENDS ${mofiles} nls-${domain}-template)
endfunction()
# Usage: apt_add_update_po(output domain [domain ...])
function(apt_add_update_po)
set(options)
set(oneValueArgs TEMPLATE)
set(multiValueArgs DOMAINS EXCLUDE_LANGUAGES)
cmake_parse_arguments(NLS "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
set(output ${CMAKE_CURRENT_SOURCE_DIR}/${NLS_TEMPLATE}.pot)
foreach(domain ${NLS_DOMAINS})
list(APPEND potfiles ${CMAKE_CURRENT_BINARY_DIR}/${domain}.pot)
endforeach()
get_filename_component(master_name ${output} NAME_WE)
add_custom_target(nls-${master_name}
COMMAND msgcomm --sort-by-file --add-location=file
--more-than=0 --output=${output}
${potfiles}
DEPENDS ${potfiles})
file(GLOB translations "${PROJECT_SOURCE_DIR}/po/*.po")
if (NOT TARGET update-po)
add_custom_target(update-po)
endif()
foreach(translation ${translations})
get_filename_component(langcode ${translation} NAME_WE)
if ("${langcode}" IN_LIST NLS_EXCLUDE_LANGUAGES)
continue()
endif()
add_custom_target(update-po-${langcode}
COMMAND msgmerge -q --previous --update --backup=none ${translation} ${output}
DEPENDS nls-${master_name}
)
add_dependencies(update-po update-po-${langcode})
endforeach()
add_dependencies(update-po nls-${master_name})
endfunction()
function(apt_add_po_statistics excluded)
add_custom_target(statistics)
file(GLOB translations "${PROJECT_SOURCE_DIR}/po/*.po")
foreach(translation ${translations})
get_filename_component(langcode ${translation} NAME_WE)
if ("${langcode}" IN_LIST excluded)
add_custom_command(
TARGET statistics PRE_BUILD
COMMAND printf "%-6s " "${langcode}:"
COMMAND echo "ignored"
VERBATIM
)
continue()
endif()
add_custom_command(
TARGET statistics PRE_BUILD
COMMAND printf "%-6s " "${langcode}:"
COMMAND msgfmt --statistics -o /dev/null ${translation}
VERBATIM
)
endforeach()
endfunction()

29
CMake/apti18n.h.in Normal file
View File

@ -0,0 +1,29 @@
// -*- mode: cpp; mode: fold -*-
/* Internationalization macros for apt. This header should be included last
in each C file. */
// Set by autoconf
#cmakedefine USE_NLS
#ifdef USE_NLS
// apt will use the gettext implementation of the C library
#include <libintl.h>
#include <locale.h>
# ifdef APT_DOMAIN
# define _(x) dgettext(APT_DOMAIN,x)
# define P_(msg,plural,n) dngettext(APT_DOMAIN,msg,plural,n)
# else
# define _(x) gettext(x)
# define P_(msg,plural,n) ngettext(msg,plural,n)
# endif
# define N_(x) x
#else
// apt will not use any gettext
# define setlocale(a, b)
# define textdomain(a)
# define bindtextdomain(a, b)
# define _(x) x
# define P_(msg,plural,n) (n == 1 ? msg : plural)
# define N_(x) x
# define dgettext(d, m) m
#endif

88
CMake/config.h.in Normal file
View File

@ -0,0 +1,88 @@
/* Define if your processor stores words with the most significant
byte first (like Motorola and SPARC, unlike Intel and VAX). */
#cmakedefine WORDS_BIGENDIAN
/* Define if we have the timegm() function */
#cmakedefine HAVE_TIMEGM
/* Define if we have the zlib library for gzip */
#cmakedefine HAVE_ZLIB
/* Define if we have the bz2 library for bzip2 */
#cmakedefine HAVE_BZ2
/* Define if we have the lzma library for lzma/xz */
#cmakedefine HAVE_LZMA
/* Define if we have the lz4 library for lz4 */
#cmakedefine HAVE_LZ4
/* Define if we have the zstd library for zst */
#cmakedefine HAVE_ZSTD
/* Define if we have the systemd library */
#cmakedefine HAVE_SYSTEMD
/* Define if we have the udev library */
#cmakedefine HAVE_UDEV
/* Define if we have the seccomp library */
#cmakedefine HAVE_SECCOMP
/* These two are used by the statvfs shim for glibc2.0 and bsd */
/* Define if we have sys/vfs.h */
#cmakedefine HAVE_VFS_H
#cmakedefine HAVE_STRUCT_STATFS_F_TYPE
/* Define if we have sys/mount.h */
#cmakedefine HAVE_MOUNT_H
/* Define if we have sys/endian.h */
#cmakedefine HAVE_SYS_ENDIAN_H
/* Define if we have machine/endian.h */
#cmakedefine HAVE_MACHINE_ENDIAN_H
/* Check for getresuid() function and similar ones */
#cmakedefine HAVE_GETRESUID
#cmakedefine HAVE_GETRESGID
#cmakedefine HAVE_SETRESUID
#cmakedefine HAVE_SETRESGID
/* Check for ptsname_r() */
#cmakedefine HAVE_PTSNAME_R
/* Define the arch name string */
#define COMMON_ARCH "${COMMON_ARCH}"
/* The package name string */
#define PACKAGE "${PACKAGE}"
/* The version number string */
#define PACKAGE_VERSION "${PACKAGE_VERSION}"
/* The mail address to reach upstream */
#define PACKAGE_MAIL "${PACKAGE_MAIL}"
/* Guard for code that should only be emitted when compiling apt */
#define APT_COMPILING_APT
/* Various directories */
#cmakedefine CMAKE_INSTALL_FULL_BINDIR "${CMAKE_INSTALL_FULL_BINDIR}"
#cmakedefine STATE_DIR "${STATE_DIR}"
#cmakedefine CACHE_DIR "${CACHE_DIR}"
#cmakedefine LOG_DIR "${LOG_DIR}"
#cmakedefine CONF_DIR "${CONF_DIR}"
#cmakedefine LIBEXEC_DIR "${LIBEXEC_DIR}"
#cmakedefine BIN_DIR "${BIN_DIR}"
#cmakedefine DPKG_DATADIR "${DPKG_DATADIR}"
/* Group of the root user */
#cmakedefine ROOT_GROUP "${ROOT_GROUP}"
/* defined if __builtin_ia32_crc32{s,d}i() exists in an sse4.2 target */
#cmakedefine HAVE_FMV_SSE42_AND_CRC32
#cmakedefine HAVE_FMV_SSE42_AND_CRC32DI
/* unrolling is faster combined with an optimizing compiler */
#define SHA2_UNROLL_TRANSFORM

9
CMake/endian.h.in Normal file
View File

@ -0,0 +1,9 @@
#include <config.h>
#ifdef HAVE_MACHINE_ENDIAN_H
#include <machine/endian.h>
#endif
#ifdef HAVE_SYS_ENDIAN_H
#include <sys/types.h>
#include <sys/endian.h>
#endif

16
CMake/run_if_exists.sh Executable file
View File

@ -0,0 +1,16 @@
#!/bin/sh
# Small helper for running a command
out=""
if [ "$1" = "--stdout" ]; then
out="$2"
shift 2
fi
if [ -e "$1" ]; then
shift
if [ "$out" ]; then
exec "$@" > $out
else
exec "$@"
fi
fi

13
CMake/statvfs.h.in Normal file
View File

@ -0,0 +1,13 @@
/* Compatibility for systems with out Single Unix Spec statvfs */
#include <config.h>
#ifdef HAVE_VFS_H
#include <sys/vfs.h>
#endif
#ifdef HAVE_MOUNT_H
#include <sys/param.h>
#include <sys/mount.h>
#endif
#define statvfs statfs

View File

@ -0,0 +1,8 @@
file(READ ${IN} input)
foreach(variable ${VARS})
execute_process(COMMAND ${PROJECT_SOURCE_DIR}/vendor/getinfo
--vendor ${CURRENT_VENDOR} ${variable}
OUTPUT_VARIABLE value OUTPUT_STRIP_TRAILING_WHITESPACE)
string(REPLACE "&${variable};" "${value}" input "${input}")
endforeach()
file(WRITE ${OUT} "${input}")

271
CMakeLists.txt Normal file
View File

@ -0,0 +1,271 @@
# Copyright (C) 2009, 2016 Julian Andres Klode <jak@debian.org>.
# Licensed under the same terms as APT; i.e. GPL 2 or later.
# set minimum version
project(apt)
cmake_minimum_required(VERSION 3.4.0)
# Generic header locations
include_directories(${PROJECT_BINARY_DIR}/include)
enable_testing()
option(WITH_DOC "Build documentation." ON)
option(WITH_TESTS "Build tests" ON)
option(USE_NLS "Localisation support." ON)
set(CMAKE_MODULE_PATH "${PROJECT_SOURCE_DIR}/CMake")
# Add coverage target
set(CMAKE_CXX_FLAGS_COVERAGE "-g -fprofile-arcs -ftest-coverage")
set(CMAKE_EXE_LINKER_FLAGS_COVERAGE "-lgcov")
set(CMAKE_SHARED_LINKER_FLAGS_COVERAGE "-lgcov")
# Work around bug in GNUInstallDirs
if (EXISTS "/etc/debian_version")
set(CMAKE_INSTALL_LIBEXECDIR "lib")
endif()
# Include stuff
include(Misc)
include(CheckIncludeFiles)
include(CheckFunctionExists)
include(CheckStructHasMember)
include(GNUInstallDirs)
include(TestBigEndian)
find_package(Threads REQUIRED)
find_package(LFS REQUIRED)
find_package(Iconv REQUIRED)
find_package(Perl REQUIRED)
find_program(TRIEHASH_EXECUTABLE NAMES triehash)
if (NOT TRIEHASH_EXECUTABLE)
message(FATAL_ERROR "Could not find triehash executable")
endif()
if(USE_NLS)
find_package(Intl REQUIRED)
link_libraries(${Intl_LIBRARIES})
include_directories(${Intl_INCLUDE_DIRS})
endif()
# Add large file support
add_compile_options(${LFS_COMPILE_OPTIONS})
add_definitions(${LFS_DEFINITIONS})
link_libraries(${LFS_LIBRARIES})
# Set compiler flags
set(CMAKE_CXX_STANDARD 14)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_VISIBILITY_INLINES_HIDDEN 1)
add_optional_compile_options(Wall)
add_optional_compile_options(Wextra)
add_optional_compile_options(Wcast-align)
add_optional_compile_options(Wlogical-op)
add_optional_compile_options(Wredundant-decls)
add_optional_compile_options(Wmissing-declarations)
add_optional_compile_options(Wunsafe-loop-optimizations)
add_optional_compile_options(Wctor-dtor-privacy)
add_optional_compile_options(Wdisabled-optimization)
add_optional_compile_options(Winit-self)
add_optional_compile_options(Wmissing-include-dirs)
add_optional_compile_options(Wnoexcept)
add_optional_compile_options(Wsign-promo)
add_optional_compile_options(Wundef)
add_optional_compile_options(Wdouble-promotion)
add_optional_compile_options(Wsuggest-override)
add_optional_compile_options(Werror=suggest-override)
add_optional_compile_options(Werror=return-type)
# apt-ftparchive dependencies
find_package(BerkeleyDB REQUIRED)
if (BERKELEY_DB_FOUND)
set(HAVE_BDB 1)
endif()
find_package(GnuTLS REQUIRED)
if (GNUTLS_FOUND)
set(HAVE_GNUTLS 1)
endif()
# (De)Compressor libraries
find_package(ZLIB REQUIRED)
if (ZLIB_FOUND)
set(HAVE_ZLIB 1)
endif()
find_package(BZip2 REQUIRED)
if (BZIP2_FOUND)
set(HAVE_BZ2 1)
endif()
find_package(LZMA REQUIRED)
if (LZMA_FOUND)
set(HAVE_LZMA 1)
endif()
find_package(LZ4 REQUIRED)
if (LZ4_FOUND)
set(HAVE_LZ4 1)
endif()
find_package(Zstd)
if (ZSTD_FOUND)
set(HAVE_ZSTD 1)
endif()
find_package(Udev)
if (UDEV_FOUND)
set(HAVE_UDEV 1)
endif()
find_package(Systemd)
if (SYSTEMD_FOUND)
set(HAVE_SYSTEMD 1)
endif()
find_package(Seccomp)
if (SECCOMP_FOUND)
set(HAVE_SECCOMP 1)
endif()
find_package(Gcrypt REQUIRED)
# Mount()ing and stat()ing and friends
check_symbol_exists(statfs sys/vfs.h HAVE_VFS_H)
check_include_files(sys/params.h HAVE_PARAMS_H)
check_symbol_exists(statfs sys/mount.h HAVE_MOUNT_H)
if (NOT HAVE_VFS_H AND NOT HAVE_MOUNT_H)
message(FATAL_ERROR "Can find neither statvfs() nor statfs()")
endif()
check_function_exists(statvfs HAVE_STATVFS)
if (NOT HAVE_STATVFS)
configure_file(CMake/statvfs.h.in ${PROJECT_BINARY_DIR}/include/sys/statvfs.h COPYONLY)
endif()
CHECK_STRUCT_HAS_MEMBER("struct statfs" f_type sys/vfs.h HAVE_STRUCT_STATFS_F_TYPE)
# Other checks
check_function_exists(getresuid HAVE_GETRESUID)
check_function_exists(getresgid HAVE_GETRESGID)
check_function_exists(setresuid HAVE_SETRESUID)
check_function_exists(setresgid HAVE_SETRESGID)
check_function_exists(ptsname_r HAVE_PTSNAME_R)
check_function_exists(timegm HAVE_TIMEGM)
test_big_endian(WORDS_BIGENDIAN)
# FreeBSD
add_definitions(-D_WITH_GETLINE=1)
CHECK_INCLUDE_FILES(machine/endian.h HAVE_MACHINE_ENDIAN_H)
CHECK_INCLUDE_FILES(sys/endian.h HAVE_SYS_ENDIAN_H)
CHECK_INCLUDE_FILES(endian.h HAVE_ENDIAN_H)
if (NOT HAVE_ENDIAN_H)
if (HAVE_MACHINE_ENDIAN_H OR HAVE_SYS_ENDIAN_H)
configure_file(CMake/endian.h.in ${PROJECT_BINARY_DIR}/include/endian.h)
else()
message(FATAL_ERROR "Cannot find endian.h")
endif()
endif()
include(CheckTypeSize)
set(CMAKE_EXTRA_INCLUDE_FILES "signal.h")
check_type_size("sig_t" SIG_T LANGUAGE "CXX")
check_type_size("sighandler_t" SIGHANDLER_T LANGUAGE "CXX")
set(CMAKE_EXTRA_INCLUDE_FILES)
if (NOT HAVE_SIGHANDLER_T)
if (HAVE_SIG_T)
add_definitions(-Dsighandler_t=sig_t)
else()
message(FATAL_ERROR "Platform defines neither sig_t nor sighandler_t")
endif()
endif()
# Handle resolving
check_function_exists(res_ninit HAVE_LIBC_RESOLV)
if(HAVE_LIBC_RESOLV)
set(RESOLV_LIBRARIES)
else()
set(RESOLV_LIBRARIES -lresolv)
endif()
# Check multiversioning
include(CheckCxxTarget)
check_cxx_target(HAVE_FMV_SSE42_AND_CRC32 "sse4.2" "__builtin_ia32_crc32si(0, 1llu);")
check_cxx_target(HAVE_FMV_SSE42_AND_CRC32DI "sse4.2" "__builtin_ia32_crc32di(0, 1llu);")
# Configure some variables like package, version and architecture.
set(PACKAGE ${PROJECT_NAME})
set(PACKAGE_MAIL "APT Development Team <deity@lists.debian.org>")
set(PACKAGE_VERSION "2.0.2")
string(REGEX MATCH "^[0-9.]+" PROJECT_VERSION ${PACKAGE_VERSION})
if (NOT DEFINED DPKG_DATADIR)
execute_process(COMMAND ${PERL_EXECUTABLE} -MDpkg -e "print $Dpkg::DATADIR;"
OUTPUT_VARIABLE DPKG_DATADIR_CMD OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Found dpkg data dir: ${DPKG_DATADIR_CMD}")
set(DPKG_DATADIR "${DPKG_DATADIR_CMD}" CACHE PATH "dpkg data directory")
endif()
if (NOT DEFINED COMMON_ARCH)
execute_process(COMMAND dpkg-architecture -qDEB_HOST_ARCH
OUTPUT_VARIABLE COMMON_ARCH OUTPUT_STRIP_TRAILING_WHITESPACE)
endif()
if (NOT DEFINED ROOT_GROUP)
execute_process(COMMAND id -gn root
OUTPUT_VARIABLE ROOT_GROUP OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Found root group: ${ROOT_GROUP}")
endif()
set(ROOT_GROUP "${ROOT_GROUP}" CACHE STRING "Group of root (e.g.: wheel or root)")
# Set various directories
set(STATE_DIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/apt" CACHE PATH "Your /var/lib/apt")
set(CACHE_DIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/cache/apt" CACHE PATH "Your /var/cache/apt")
set(LOG_DIR "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/apt" CACHE PATH "Your /var/log/apt")
set(CONF_DIR "${CMAKE_INSTALL_FULL_SYSCONFDIR}/apt" CACHE PATH "Your /etc/apt")
set(LIBEXEC_DIR "${CMAKE_INSTALL_FULL_LIBEXECDIR}/apt" CACHE PATH "Your /usr/libexec/apt")
set(BIN_DIR "${CMAKE_INSTALL_FULL_BINDIR}")
# Configure our configuration headers (config.h and apti18n.h)
configure_file(CMake/config.h.in ${PROJECT_BINARY_DIR}/include/config.h)
configure_file(CMake/apti18n.h.in ${PROJECT_BINARY_DIR}/include/apti18n.h)
# Add our subdirectories
add_subdirectory(vendor)
add_subdirectory(apt-pkg)
add_subdirectory(apt-private)
add_subdirectory(cmdline)
add_subdirectory(completions)
add_subdirectory(doc)
add_subdirectory(dselect)
add_subdirectory(ftparchive)
add_subdirectory(methods)
add_subdirectory(test)
if (USE_NLS)
add_subdirectory(po)
# Link update-po4a into the update-po target
add_dependencies(update-po update-po4a)
endif()
# Create our directories.
install_empty_directories(
${CONF_DIR}/apt.conf.d
${CONF_DIR}/auth.conf.d
${CONF_DIR}/preferences.d
${CONF_DIR}/sources.list.d
${CONF_DIR}/trusted.gpg.d
${CACHE_DIR}/archives/partial
${STATE_DIR}/lists/partial
${STATE_DIR}/mirrors/partial
${STATE_DIR}/periodic
${LOG_DIR}
)

22
COPYING Normal file
View File

@ -0,0 +1,22 @@
Apt is copyright 1997, 1998, 1999 Jason Gunthorpe and others.
Apt is currently developed by APT Development Team <deity@lists.debian.org>.
License: GPLv2+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
See /usr/share/common-licenses/GPL-2, or
<http://www.gnu.org/copyleft/gpl.txt> for the terms of the latest version
of the GNU General Public License.

339
COPYING.GPL Normal file
View File

@ -0,0 +1,339 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

12
Dockerfile Normal file
View File

@ -0,0 +1,12 @@
FROM debian:unstable
COPY . /tmp
WORKDIR /tmp
RUN sed -i s#://deb.debian.org#://cdn-fastly.deb.debian.org# /etc/apt/sources.list \
&& apt-get update \
&& adduser --home /home/travis travis --quiet --disabled-login --gecos "" --uid 1000 \
&& env DEBIAN_FRONTEND=noninteractive apt-get install build-essential ccache ninja-build expect curl git -q -y \
&& env DEBIAN_FRONTEND=noninteractive ./prepare-release travis-ci \
&& dpkg-reconfigure ccache \
&& rm -f /etc/dpkg/dpkg.cfg.d/excludes \
&& rm -r /tmp/* \
&& apt-get clean

215
README.md Normal file
View File

@ -0,0 +1,215 @@
APT
===
apt is the main command-line package manager for Debian and its derivatives.
It provides command-line tools for searching and managing as well as querying
information about packages as well as low-level access to all features
provided by the libapt-pkg and libapt-inst libraries which higher-level
package managers can depend upon.
Included tools are:
* **apt-get** for retrieval of packages and information about them
from authenticated sources and for installation, upgrade and
removal of packages together with their dependencies
* **apt-cache** for querying available information about installed
as well as available packages
* **apt-cdrom** to use removable media as a source for packages
* **apt-config** as an interface to the configuration settings
* **apt-key** as an interface to manage authentication keys
* **apt-extracttemplates** to be used by debconf to prompt for configuration
questions before installation
* **apt-ftparchive** creates Packages and other index files
needed to publish an archive of deb packages
* **apt-sortpkgs** is a Packages/Sources file normalizer
* **apt** is a high-level command-line interface for better interactive usage
The libraries libapt-pkg and libapt-inst are also maintained as part of this project,
alongside various additional binaries like the acquire methods used by them.
Bindings for Python ([python-apt](https://tracker.debian.org/pkg/python-apt)) and
Perl ([libapt-pkg-perl](https://tracker.debian.org/pkg/libapt-pkg-perl)) are available as separated projects.
Discussion happens mostly on [the mailing list](mailto:deity@lists.debian.org) ([archive](https://lists.debian.org/deity/)) and on [IRC](irc://irc.oftc.net/debian-apt).
Our bug tracker as well as a general overview can be found at the [Debian Tracker page](https://tracker.debian.org/pkg/apt).
Contributing
------------
APT is maintained in git, the official repository being located at
[https://salsa.debian.org/apt-team/apt](https://salsa.debian.org/apt-team/apt),
but also available at other locations like [GitHub](https://github.com/Debian/apt).
The default branch is `master`, other branches targeted at different
derivatives and releases being used as needed. Various topic branches in
different stages of completion might be branched of from those, which you
are encouraged to do as well.
### Coding
APT uses CMake. To start building, you need to run
cmake <path to source directory>
from a build directory. For example, if you want to build in the source tree,
run:
cmake .
Then you can use make as you normally would (pass `-j <count>` to perform `<count>`
jobs in parallel).
You can also use the Ninja generator of CMake, to do that pass
-G Ninja
to the cmake invocation, and then use ninja instead of make.
The source code uses in most parts a relatively uncommon indent convention,
namely 3 spaces with 8 space tab (see [doc/style.txt](./doc/style.txt) for more on this).
Adhering to it avoids unnecessary code-churn destroying history (aka: `git blame`)
and you are therefore encouraged to write patches in this style.
Your editor can surely help you with this, for vim the settings would be
`setlocal shiftwidth=3 noexpandtab tabstop=8`
(the later two are the default configuration and could therefore be omitted).
### Translations
While we welcome contributions here, we highly encourage you to contact the [Debian Internationalization (i18n) team](https://wiki.debian.org/Teams/I18n).
Various language teams have formed which can help you create, maintain
and improve a translation, while we could only do a basic syntax check of the
file format…
Further more, translating APT is split into two independent parts:
The program translation, meaning the messages printed by the tools,
as well as the manual pages and other documentation shipped with APT.
### Bug triage
Software tools like APT, which are used by thousands of users every
day, have a steady flow of incoming bug reports. Not all of them are really
bugs in APT: It can be packaging bugs, like failing maintainer scripts, that a
user reports against apt, because apt was the command they executed that lead
to this failure; or various wishlist items for new features. Given enough time
the occasional duplicate enters the system as well.
Our bug tracker is therefore full with open bug reports which are waiting for you! ;)
Testing
-------
### Manual execution
When you make changes and want to run them manually, you can just do so. CMake
automatically inserts an rpath so the binaries find the correct libraries.
Note that you have to invoke CMake with the right install prefix set (e.g.
`-DCMAKE_INSTALL_PREFIX=/usr`) to have your build find and use the right files
by default or alternatively set the locations at run-time via an `APT_CONFIG`
configuration file.
### Integration tests
There is an extensive integration test suite available which can be run via:
$ ./test/integration/run-tests
Each test can also be run individually as well. The tests are very noisy by
default, especially so while running all of them; it might be beneficial to
enabling quiet (`-q`) or very quiet (`-qq`) mode. The tests can also be run in
parallel via `-j X` where `X` is the number of jobs to run.
While these tests are not executed at package build-time as they require
additional dependencies, the repository contains the configuration needed to
run them on [Travis CI](https://travis-ci.org/) and
[Shippable](https://shippable.com/) as well as via autopkgtests e.g. on
[Debian Continuous Integration](https://ci.debian.net/packages/a/apt/).
A test case here is a shell script embedded in a framework creating an environment in which
apt tools can be used naturally without root-rights to test every aspect of its behavior
itself as well as in conjunction with dpkg and other tools while working with packages.
### Unit tests
These tests are gtest-dev based, executed by ctest, reside in `./test/libapt`
and can be run with `make test`. They are executed at package build-time, but
not by `make`. CTest by default does not show the output of tests, even if they
failed, so to see more details you can also run them with `ctest --verbose`.
Debugging
---------
APT does many things, so there is no central debug mode which could be
activated. Instead, it uses various configuration options to activate debug output
in certain areas. The following describes some common scenarios and generally
useful options, but is in no way exhaustive.
Note that, to avoid accidents, you should *NEVER* use these settings as root.
Simulation mode (`-s`) is usually sufficient to help you run apt as a non-root user.
### Using different state files
If a dependency solver bug is reported, but can't easily be reproduced by the
triager, it is beneficial to ask the reporter for the
`/var/lib/dpkg/status` file which includes the packages installed on the
system and in which version. Such a file can then be used via the option
`dir::state::status`. Beware of different architecture settings!
Bug reports usually include this information in the template. Assuming you
already have the `Packages` files for the architecture (see `sources.list`
manpage for the `arch=` option) you can change to a different architecture
with a configuration file like:
APT::Architecture "arch1";
#clear APT::Architectures;
APT:: Architectures { "arch1"; "arch2"; }
If a certain mirror state is needed, see if you can reproduce it with [snapshot.debian.org](http://snapshot.debian.org/).
Your sources.list file (`dir::etc::sourcelist`) has to be correctly mention the repository,
but if it does, you can use different downloaded archive state files via `dir::state::lists`.
In case manually vs. automatically installed matters, you can ask the reporter for
the `/var/lib/apt/extended_states` file and use it with `dir::state::extended_states`.
### Dependency resolution
APT works in its internal resolver in two stages: First all packages are visited
and marked for installation, keep back or removal. Option `Debug::pkgDepCache::Marker`
shows this. This also decides which packages are to be installed to satisfy dependencies,
which can be seen by `Debug::pkgDepCache::AutoInstall`. After this is done, we might
be in a situation in which two packages want to be installed, but only one of them can be.
It is the job of the `pkgProblemResolver` to decide which of two packages 'wins' and can
therefore decide what has to happen. You can see the contenders as well as their fight and
the resulting resolution with `Debug::pkgProblemResolver`.
### Downloading files
Various binaries (called 'methods') are tasked with downloading files. The Acquire system
talks to them via simple text protocol. Depending on which side you want to see, either
`Debug::pkgAcquire::Worker` or `Debug::Acquire::http` (or similar) will show the messages.
The integration tests use a simple self-built web server (`webserver`) which also logs. If you find that
the http(s) methods do not behave like they should be try to implement this behavior in
webserver for simpler and more controlled testing.
### Installation order
Dependencies are solved, packages downloaded: Everything is ready for the installation!
The last step in the chain is often forgotten, but still very important:
Packages have to be installed in a particular order so that their dependencies are
satisfied, but at the same time you don't want to install very important and optional
packages at the same time if possible, so that a broken optional package does not
block the correct installation of very important packages. Which option to use depends on
if you are interested in the topology sorting (`Debug::pkgOrderList`), the dependency-aware
cycle and unconfigured prevention (`Debug::pkgPackageManager`) or the actual calls
to dpkg (`Debug::pkgDpkgPm`).
Additional documentation
------------------------
Many more things could and should be said about APT and its usage but are more
targeted at developers of related programs or only of special interest.
* [Protocol specification of APT's communication with external dependency solvers (EDSP)](./doc/external-dependency-solver-protocol.md)
* [Protocol specification of APT's communication with external installation planners (EIPP)](./doc/external-installation-planner-protocol.md)
* [How to use and configure APT to acquire additional files in 'update' operations](./doc/acquire-additional-files.md)
* [Download and package installation progress reporting details](./doc/progress-reporting.md)
* [Remarks on DNS SRV record support in APT](./doc/srv-records-support.md)
* [Protocol specification of APT interfacing with external hooks via JSON](./doc/json-hooks-protocol.md)

11
abicheck/apt_build.xml.in Normal file
View File

@ -0,0 +1,11 @@
<version>
build-branch
</version>
<headers>
@build_path@/include/apt-pkg
</headers>
<libs>
@build_path@/apt-pkg/
</libs>

View File

@ -0,0 +1,11 @@
<version>
installed
</version>
<headers>
/usr/include/apt-pkg/
</headers>
<libs>
@installed_libapt@
</libs>

22
abicheck/run_abi_test Executable file
View File

@ -0,0 +1,22 @@
#!/bin/sh
# ensure we are in the abibreak subdirectory
cd "$(readlink -f $(dirname $0))"
if [ ! -d ../build ]; then
echo "../build missing, did you run make?"
exit 1
fi
if ! command -v abi-compliance-checker 2>/dev/null >&2; then
echo "Please install the 'abi-compliance-checker' package"
exit 1
fi
LIBPATH=$(find /usr/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH) -type f -regex '.*/libapt-\(pkg\|inst\)\.so\..*' -printf %p\\\\n)
sed s#@installed_libapt@#$LIBPATH# apt_installed.xml.in > apt_installed.xml
BUILDPATH=$(readlink -f ../build)
sed s#@build_path@#$BUILDPATH# apt_build.xml.in > apt_build.xml
abi-compliance-checker -l apt -d1 apt_installed.xml -d2 apt_build.xml $@

80
apt-pkg/CMakeLists.txt Normal file
View File

@ -0,0 +1,80 @@
# Include apt-pkg directly, as some files have #include <system.h>
include_directories(${PROJECT_BINARY_DIR}/include/apt-pkg)
file(MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/include/apt-pkg/)
execute_process(COMMAND ${TRIEHASH_EXECUTABLE}
--ignore-case
--header ${PROJECT_BINARY_DIR}/include/apt-pkg/tagfile-keys.h
--code ${CMAKE_CURRENT_BINARY_DIR}/tagfile-keys.cc
--enum-class
--enum-name pkgTagSection::Key
--function-name pkgTagHash
--include "<apt-pkg/tagfile.h>"
${CMAKE_CURRENT_SOURCE_DIR}/tagfile-keys.list)
set_property(DIRECTORY APPEND PROPERTY CMAKE_CONFIGURE_DEPENDS "tagfile-keys.list")
# Set the version of the library
execute_process(COMMAND awk -v ORS=. "/^\#define APT_PKG_M/ {print \$3}"
COMMAND sed "s/\\.\$//"
INPUT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/contrib/macros.h
OUTPUT_VARIABLE MAJOR OUTPUT_STRIP_TRAILING_WHITESPACE)
execute_process(COMMAND grep "^#define APT_PKG_RELEASE"
COMMAND cut -d " " -f 3
INPUT_FILE ${CMAKE_CURRENT_SOURCE_DIR}/contrib/macros.h
OUTPUT_VARIABLE MINOR OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Building libapt-pkg ${MAJOR} (release ${MINOR})")
set(APT_PKG_MAJOR ${MAJOR} PARENT_SCOPE) # exporting for methods/CMakeLists.txt
configure_file(apt-pkg.pc.in ${CMAKE_CURRENT_BINARY_DIR}/apt-pkg.pc @ONLY)
# Definition of the C++ files used to build the library - note that this
# is expanded at CMake time, so you have to rerun cmake if you add or remove
# a file (you can just run cmake . in the build directory)
file(GLOB_RECURSE library "*.cc" "${CMAKE_CURRENT_BINARY_DIR}/tagfile-keys.cc")
file(GLOB_RECURSE headers "*.h")
# Create a library using the C++ files
add_library(apt-pkg SHARED ${library})
add_dependencies(apt-pkg apt-pkg-versionscript)
# Link the library and set the SONAME
target_include_directories(apt-pkg
PRIVATE ${ZLIB_INCLUDE_DIRS}
${BZIP2_INCLUDE_DIR}
${LZMA_INCLUDE_DIRS}
${LZ4_INCLUDE_DIRS}
$<$<BOOL:${ZSTD_FOUND}>:${ZSTD_INCLUDE_DIRS}>
$<$<BOOL:${UDEV_FOUND}>:${UDEV_INCLUDE_DIRS}>
$<$<BOOL:${SYSTEMD_FOUND}>:${SYSTEMD_INCLUDE_DIRS}>
${ICONV_INCLUDE_DIRS}
$<$<BOOL:${GCRYPT_FOUND}>:${GCRYPT_INCLUDE_DIRS}>
)
target_link_libraries(apt-pkg
PRIVATE -lutil ${CMAKE_DL_LIBS} ${RESOLV_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
${ZLIB_LIBRARIES}
${BZIP2_LIBRARIES}
${LZMA_LIBRARIES}
${LZ4_LIBRARIES}
$<$<BOOL:${ZSTD_FOUND}>:${ZSTD_LIBRARIES}>
$<$<BOOL:${UDEV_FOUND}>:${UDEV_LIBRARIES}>
$<$<BOOL:${SYSTEMD_FOUND}>:${SYSTEMD_LIBRARIES}>
${ICONV_LIBRARIES}
$<$<BOOL:${GCRYPT_FOUND}>:${GCRYPT_LIBRARIES}>
)
set_target_properties(apt-pkg PROPERTIES VERSION ${MAJOR}.${MINOR})
set_target_properties(apt-pkg PROPERTIES SOVERSION ${MAJOR})
set_target_properties(apt-pkg PROPERTIES CXX_VISIBILITY_PRESET hidden)
add_version_script(apt-pkg)
# Install the library and the header files
install(TARGETS apt-pkg LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(FILES ${headers} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/apt-pkg)
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/apt-pkg.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)
flatify(${PROJECT_BINARY_DIR}/include/apt-pkg/ "${headers}")
if(CMAKE_BUILD_TYPE STREQUAL "Coverage")
target_link_libraries(apt-pkg PUBLIC noprofile)
endif()

4051
apt-pkg/acquire-item.cc Normal file

File diff suppressed because it is too large Load Diff

1214
apt-pkg/acquire-item.h Normal file

File diff suppressed because it is too large Load Diff

562
apt-pkg/acquire-method.cc Normal file
View File

@ -0,0 +1,562 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Acquire Method
This is a skeleton class that implements most of the functionality
of a method and some useful functions to make method implementation
simpler. The methods all derive this and specialize it. The most
complex implementation is the http method which needs to provide
pipelining, it runs the message engine at the same time it is
downloading files..
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/acquire-method.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/strutl.h>
#include <algorithm>
#include <iostream>
#include <iterator>
#include <sstream>
#include <string>
#include <vector>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
/*}}}*/
using namespace std;
// poor mans unordered_map::try_emplace for C++11 as it is a C++17 feature /*{{{*/
template <typename Arg>
static void try_emplace(std::unordered_map<std::string, std::string> &fields, std::string &&name, Arg &&value)
{
if (fields.find(name) == fields.end())
fields.emplace(std::move(name), std::forward<Arg>(value));
}
/*}}}*/
// AcqMethod::pkgAcqMethod - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* This constructs the initialization text */
pkgAcqMethod::pkgAcqMethod(const char *Ver,unsigned long Flags)
{
std::unordered_map<std::string, std::string> fields;
try_emplace(fields, "Version", Ver);
if ((Flags & SingleInstance) == SingleInstance)
try_emplace(fields, "Single-Instance", "true");
if ((Flags & Pipeline) == Pipeline)
try_emplace(fields, "Pipeline", "true");
if ((Flags & SendConfig) == SendConfig)
try_emplace(fields, "Send-Config", "true");
if ((Flags & LocalOnly) == LocalOnly)
try_emplace(fields, "Local-Only", "true");
if ((Flags & NeedsCleanup) == NeedsCleanup)
try_emplace(fields, "Needs-Cleanup", "true");
if ((Flags & Removable) == Removable)
try_emplace(fields, "Removable", "true");
if ((Flags & AuxRequests) == AuxRequests)
try_emplace(fields, "AuxRequests", "true");
SendMessage("100 Capabilities", std::move(fields));
SetNonBlock(STDIN_FILENO,true);
Queue = 0;
QueueBack = 0;
}
/*}}}*/
void pkgAcqMethod::SendMessage(std::string const &header, std::unordered_map<std::string, std::string> &&fields) /*{{{*/
{
auto CheckKey = [](std::string const &str) {
// Space, hyphen-minus, and alphanum are allowed for keys/headers.
return str.find_first_not_of(" -0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz") == std::string::npos;
};
auto CheckValue = [](std::string const &str) {
return std::all_of(str.begin(), str.end(), [](unsigned char c) -> bool {
return c > 127 // unicode
|| (c > 31 && c < 127) // printable chars
|| c == '\n' || c == '\t'; // special whitespace
});
};
auto Error = [this]() {
_error->Error("SECURITY: Message contains control characters, rejecting.");
_error->DumpErrors();
SendMessage("400 URI Failure", {{"URI", "<UNKNOWN>"}, {"Message", "SECURITY: Message contains control characters, rejecting."}});
abort();
};
if (!CheckKey(header))
return Error();
for (auto const &f : fields)
{
if (!CheckKey(f.first))
return Error();
if (!CheckValue(f.second))
return Error();
}
std::cout << header << '\n';
for (auto const &f : fields)
{
if (f.second.empty())
continue;
std::cout << f.first << ": ";
auto const lines = VectorizeString(f.second, '\n');
if (likely(lines.empty() == false))
{
std::copy(lines.begin(), std::prev(lines.end()), std::ostream_iterator<std::string>(std::cout, "\n "));
std::cout << *lines.rbegin();
}
std::cout << '\n';
}
std::cout << '\n'
<< std::flush;
}
/*}}}*/
// AcqMethod::Fail - A fetch has failed /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgAcqMethod::Fail(bool Transient)
{
string Err = "Undetermined Error";
if (_error->empty() == false)
{
Err.clear();
while (_error->empty() == false)
{
std::string msg;
if (_error->PopMessage(msg))
{
if (Err.empty() == false)
Err.append("\n");
Err.append(msg);
}
}
}
Fail(Err, Transient);
}
/*}}}*/
// AcqMethod::Fail - A fetch has failed /*{{{*/
void pkgAcqMethod::Fail(string Err,bool Transient)
{
// Strip out junk from the error messages
std::transform(Err.begin(), Err.end(), Err.begin(), [](char const c) {
if (c == '\r' || c == '\n')
return ' ';
return c;
});
if (IP.empty() == false && _config->FindB("Acquire::Failure::ShowIP", true) == true)
Err.append(" ").append(IP);
std::unordered_map<std::string, std::string> fields;
if (Queue != nullptr)
try_emplace(fields, "URI", Queue->Uri);
else
try_emplace(fields, "URI", "<UNKNOWN>");
try_emplace(fields, "Message", Err);
if(FailReason.empty() == false)
try_emplace(fields, "FailReason", FailReason);
if (UsedMirror.empty() == false)
try_emplace(fields, "UsedMirror", UsedMirror);
if (Transient == true)
try_emplace(fields, "Transient-Failure", "true");
SendMessage("400 URI Failure", std::move(fields));
if (Queue != nullptr)
Dequeue();
}
/*}}}*/
// AcqMethod::DropPrivsOrDie - Drop privileges or die /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgAcqMethod::DropPrivsOrDie()
{
if (!DropPrivileges()) {
Fail(false);
exit(112); /* call the european emergency number */
}
}
/*}}}*/
// AcqMethod::URIStart - Indicate a download is starting /*{{{*/
void pkgAcqMethod::URIStart(FetchResult &Res)
{
if (Queue == 0)
abort();
std::unordered_map<std::string, std::string> fields;
try_emplace(fields, "URI", Queue->Uri);
if (Res.Size != 0)
try_emplace(fields, "Size", std::to_string(Res.Size));
if (Res.LastModified != 0)
try_emplace(fields, "Last-Modified", TimeRFC1123(Res.LastModified, true));
if (Res.ResumePoint != 0)
try_emplace(fields, "Resume-Point", std::to_string(Res.ResumePoint));
if (UsedMirror.empty() == false)
try_emplace(fields, "UsedMirror", UsedMirror);
SendMessage("200 URI Start", std::move(fields));
}
/*}}}*/
// AcqMethod::URIDone - A URI is finished /*{{{*/
static void printHashStringList(std::unordered_map<std::string, std::string> &fields, std::string const &Prefix, HashStringList const &list)
{
for (auto const &hash : list)
{
// very old compatibility name for MD5Sum
if (hash.HashType() == "MD5Sum")
try_emplace(fields, Prefix + "MD5-Hash", hash.HashValue());
try_emplace(fields, Prefix + hash.HashType() + "-Hash", hash.HashValue());
}
}
void pkgAcqMethod::URIDone(FetchResult &Res, FetchResult *Alt)
{
if (Queue == 0)
abort();
std::unordered_map<std::string, std::string> fields;
try_emplace(fields, "URI", Queue->Uri);
if (Res.Filename.empty() == false)
try_emplace(fields, "Filename", Res.Filename);
if (Res.Size != 0)
try_emplace(fields, "Size", std::to_string(Res.Size));
if (Res.LastModified != 0)
try_emplace(fields, "Last-Modified", TimeRFC1123(Res.LastModified, true));
printHashStringList(fields, "", Res.Hashes);
if (UsedMirror.empty() == false)
try_emplace(fields, "UsedMirror", UsedMirror);
if (Res.GPGVOutput.empty() == false)
{
std::ostringstream os;
std::copy(Res.GPGVOutput.begin(), Res.GPGVOutput.end() - 1, std::ostream_iterator<std::string>(os, "\n"));
os << *Res.GPGVOutput.rbegin();
try_emplace(fields, "GPGVOutput", os.str());
}
if (Res.ResumePoint != 0)
try_emplace(fields, "Resume-Point", std::to_string(Res.ResumePoint));
if (Res.IMSHit == true)
try_emplace(fields, "IMS-Hit", "true");
if (Alt != nullptr)
{
if (Alt->Filename.empty() == false)
try_emplace(fields, "Alt-Filename", Alt->Filename);
if (Alt->Size != 0)
try_emplace(fields, "Alt-Size", std::to_string(Alt->Size));
if (Alt->LastModified != 0)
try_emplace(fields, "Alt-Last-Modified", TimeRFC1123(Alt->LastModified, true));
if (Alt->IMSHit == true)
try_emplace(fields, "Alt-IMS-Hit", "true");
printHashStringList(fields, "Alt-", Alt->Hashes);
}
SendMessage("201 URI Done", std::move(fields));
Dequeue();
}
/*}}}*/
// AcqMethod::MediaFail - Synchronous request for new media /*{{{*/
// ---------------------------------------------------------------------
/* This sends a 403 Media Failure message to the APT and waits for it
to be ackd */
bool pkgAcqMethod::MediaFail(string Required,string Drive)
{
fprintf(stdout, "403 Media Failure\nMedia: %s\nDrive: %s\n",
Required.c_str(),Drive.c_str());
std::cout << "\n" << std::flush;
vector<string> MyMessages;
/* Here we read messages until we find a 603, each non 603 message is
appended to the main message list for later processing */
while (1)
{
if (WaitFd(STDIN_FILENO) == false)
return false;
if (ReadMessages(STDIN_FILENO,MyMessages) == false)
return false;
string Message = MyMessages.front();
MyMessages.erase(MyMessages.begin());
// Fetch the message number
char *End;
int Number = strtol(Message.c_str(),&End,10);
if (End == Message.c_str())
{
cerr << "Malformed message!" << endl;
exit(100);
}
// Change ack
if (Number == 603)
{
while (MyMessages.empty() == false)
{
Messages.push_back(MyMessages.front());
MyMessages.erase(MyMessages.begin());
}
return !StringToBool(LookupTag(Message,"Failed"),false);
}
Messages.push_back(Message);
}
}
/*}}}*/
// AcqMethod::Configuration - Handle the configuration message /*{{{*/
// ---------------------------------------------------------------------
/* This parses each configuration entry and puts it into the _config
Configuration class. */
bool pkgAcqMethod::Configuration(string Message)
{
::Configuration &Cnf = *_config;
const char *I = Message.c_str();
const char *MsgEnd = I + Message.length();
unsigned int Length = strlen("Config-Item");
for (; I + Length < MsgEnd; I++)
{
// Not a config item
if (I[Length] != ':' || stringcasecmp(I,I+Length,"Config-Item") != 0)
continue;
I += Length + 1;
for (; I < MsgEnd && *I == ' '; I++);
const char *Equals = (const char*) memchr(I, '=', MsgEnd - I);
if (Equals == NULL)
return false;
const char *End = (const char*) memchr(Equals, '\n', MsgEnd - Equals);
if (End == NULL)
End = MsgEnd;
Cnf.Set(DeQuoteString(string(I,Equals-I)),
DeQuoteString(string(Equals+1,End-Equals-1)));
I = End;
}
return true;
}
/*}}}*/
// AcqMethod::Run - Run the message engine /*{{{*/
// ---------------------------------------------------------------------
/* Fetch any messages and execute them. In single mode it returns 1 if
there are no more available messages - any other result is a
fatal failure code! */
int pkgAcqMethod::Run(bool Single)
{
while (1)
{
// Block if the message queue is empty
if (Messages.empty() == true)
{
if (Single == false)
if (WaitFd(STDIN_FILENO) == false)
break;
if (ReadMessages(STDIN_FILENO,Messages) == false)
break;
}
// Single mode exits if the message queue is empty
if (Single == true && Messages.empty() == true)
return -1;
string Message = Messages.front();
Messages.erase(Messages.begin());
// Fetch the message number
char *End;
int Number = strtol(Message.c_str(),&End,10);
if (End == Message.c_str())
{
cerr << "Malformed message!" << endl;
return 100;
}
switch (Number)
{
case 601:
if (Configuration(Message) == false)
return 100;
break;
case 600:
{
FetchItem *Tmp = new FetchItem;
Tmp->Uri = LookupTag(Message,"URI");
Tmp->Proxy(LookupTag(Message, "Proxy"));
Tmp->DestFile = LookupTag(Message,"FileName");
if (RFC1123StrToTime(LookupTag(Message,"Last-Modified"),Tmp->LastModified) == false)
Tmp->LastModified = 0;
Tmp->IndexFile = StringToBool(LookupTag(Message,"Index-File"),false);
Tmp->FailIgnore = StringToBool(LookupTag(Message,"Fail-Ignore"),false);
Tmp->ExpectedHashes = HashStringList();
for (char const * const * t = HashString::SupportedHashes(); *t != NULL; ++t)
{
std::string tag = "Expected-";
tag.append(*t);
std::string const hash = LookupTag(Message, tag.c_str());
if (hash.empty() == false)
Tmp->ExpectedHashes.push_back(HashString(*t, hash));
}
char *End;
if (Tmp->ExpectedHashes.FileSize() > 0)
Tmp->MaximumSize = Tmp->ExpectedHashes.FileSize();
else
Tmp->MaximumSize = strtoll(LookupTag(Message, "Maximum-Size", "0").c_str(), &End, 10);
Tmp->Next = 0;
// Append it to the list
FetchItem **I = &Queue;
for (; *I != 0; I = &(*I)->Next);
*I = Tmp;
if (QueueBack == 0)
QueueBack = Tmp;
// Notify that this item is to be fetched.
if (URIAcquire(Message, Tmp) == false)
Fail();
break;
}
}
}
Exit();
return 0;
}
/*}}}*/
// AcqMethod::PrintStatus - privately really send a log/status message /*{{{*/
void pkgAcqMethod::PrintStatus(char const * const header, const char* Format,
va_list &args) const
{
string CurrentURI = "<UNKNOWN>";
if (Queue != 0)
CurrentURI = Queue->Uri;
if (UsedMirror.empty() == true)
fprintf(stdout, "%s\nURI: %s\nMessage: ",
header, CurrentURI.c_str());
else
fprintf(stdout, "%s\nURI: %s\nUsedMirror: %s\nMessage: ",
header, CurrentURI.c_str(), UsedMirror.c_str());
vfprintf(stdout,Format,args);
std::cout << "\n\n" << std::flush;
}
/*}}}*/
// AcqMethod::Log - Send a log message /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgAcqMethod::Log(const char *Format,...)
{
va_list args;
va_start(args,Format);
PrintStatus("101 Log", Format, args);
va_end(args);
}
/*}}}*/
// AcqMethod::Status - Send a status message /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgAcqMethod::Status(const char *Format,...)
{
va_list args;
va_start(args,Format);
PrintStatus("102 Status", Format, args);
va_end(args);
}
/*}}}*/
// AcqMethod::Redirect - Send a redirect message /*{{{*/
// ---------------------------------------------------------------------
/* This method sends the redirect message and dequeues the item as
* the worker will enqueue again later on to the right queue */
void pkgAcqMethod::Redirect(const string &NewURI)
{
if (NewURI.find_first_not_of(" !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~") != std::string::npos)
{
_error->Error("SECURITY: URL redirect target contains control characters, rejecting.");
Fail();
return;
}
std::unordered_map<std::string, std::string> fields;
try_emplace(fields, "URI", Queue->Uri);
try_emplace(fields, "New-URI", NewURI);
SendMessage("103 Redirect", std::move(fields));
Dequeue();
}
/*}}}*/
// AcqMethod::FetchResult::FetchResult - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
pkgAcqMethod::FetchResult::FetchResult() : LastModified(0),
IMSHit(false), Size(0), ResumePoint(0), d(NULL)
{
}
/*}}}*/
// AcqMethod::FetchResult::TakeHashes - Load hashes /*{{{*/
// ---------------------------------------------------------------------
/* This hides the number of hashes we are supporting from the caller.
It just deals with the hash class. */
void pkgAcqMethod::FetchResult::TakeHashes(class Hashes &Hash)
{
Hashes = Hash.GetHashStringList();
}
/*}}}*/
void pkgAcqMethod::Dequeue() { /*{{{*/
FetchItem const * const Tmp = Queue;
Queue = Queue->Next;
if (Tmp == QueueBack)
QueueBack = Queue;
delete Tmp;
}
/*}}}*/
pkgAcqMethod::~pkgAcqMethod() {}
struct pkgAcqMethod::FetchItem::Private
{
std::string Proxy;
};
pkgAcqMethod::FetchItem::FetchItem() : Next(nullptr), DestFileFd(-1), LastModified(0), IndexFile(false),
FailIgnore(false), MaximumSize(0), d(new Private)
{}
std::string pkgAcqMethod::FetchItem::Proxy()
{
return d->Proxy;
}
void pkgAcqMethod::FetchItem::Proxy(std::string const &Proxy)
{
d->Proxy = Proxy;
}
pkgAcqMethod::FetchItem::~FetchItem() { delete d; }
pkgAcqMethod::FetchResult::~FetchResult() {}

136
apt-pkg/acquire-method.h Normal file
View File

@ -0,0 +1,136 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Acquire Method - Method helper class + functions
These functions are designed to be used within the method task to
ease communication with APT.
##################################################################### */
/*}}}*/
/** \addtogroup acquire
* @{
*
* \file acquire-method.h
*/
#ifndef PKGLIB_ACQUIRE_METHOD_H
#define PKGLIB_ACQUIRE_METHOD_H
#include <apt-pkg/hashes.h>
#include <apt-pkg/macros.h>
#include <stdarg.h>
#include <time.h>
#include <string>
#include <unordered_map>
#include <vector>
class APT_PUBLIC pkgAcqMethod
{
protected:
struct FetchItem
{
FetchItem *Next;
std::string Uri;
std::string DestFile;
int DestFileFd;
time_t LastModified;
bool IndexFile;
bool FailIgnore;
HashStringList ExpectedHashes;
// a maximum size we will download, this can be the exact filesize
// for when we know it or a arbitrary limit when we don't know the
// filesize (like a InRelease file)
unsigned long long MaximumSize;
FetchItem();
virtual ~FetchItem();
std::string Proxy(); // For internal use only.
void Proxy(std::string const &Proxy) APT_HIDDEN;
private:
struct Private;
Private *const d;
};
struct FetchResult
{
HashStringList Hashes;
std::vector<std::string> GPGVOutput;
time_t LastModified;
bool IMSHit;
std::string Filename;
unsigned long long Size;
unsigned long long ResumePoint;
void TakeHashes(class Hashes &Hash);
FetchResult();
virtual ~FetchResult();
private:
void * const d;
};
// State
std::vector<std::string> Messages;
FetchItem *Queue;
FetchItem *QueueBack;
std::string FailReason;
std::string UsedMirror;
std::string IP;
// Handlers for messages
virtual bool Configuration(std::string Message);
virtual bool Fetch(FetchItem * /*Item*/) {return true;};
virtual bool URIAcquire(std::string const &/*Message*/, FetchItem *Itm) { return Fetch(Itm); };
// Outgoing messages
void Fail(bool Transient = false);
inline void Fail(const char *Why, bool Transient = false) {Fail(std::string(Why),Transient);};
virtual void Fail(std::string Why, bool Transient = false);
virtual void URIStart(FetchResult &Res);
virtual void URIDone(FetchResult &Res,FetchResult *Alt = 0);
void SendMessage(std::string const &header, std::unordered_map<std::string, std::string> &&fields);
bool MediaFail(std::string Required,std::string Drive);
virtual void Exit() {};
void PrintStatus(char const * const header, const char* Format, va_list &args) const;
public:
enum CnfFlags
{
SingleInstance = (1 << 0),
Pipeline = (1 << 1),
SendConfig = (1 << 2),
LocalOnly = (1 << 3),
NeedsCleanup = (1 << 4),
Removable = (1 << 5),
AuxRequests = (1 << 6)
};
void Log(const char *Format,...);
void Status(const char *Format,...);
void Redirect(const std::string &NewURI);
int Run(bool Single = false);
inline void SetFailReason(std::string Msg) {FailReason = Msg;};
inline void SetIP(std::string aIP) {IP = aIP;};
pkgAcqMethod(const char *Ver,unsigned long Flags = 0);
virtual ~pkgAcqMethod();
void DropPrivsOrDie();
private:
APT_HIDDEN void Dequeue();
};
/** @} */
#endif

962
apt-pkg/acquire-worker.cc Normal file
View File

@ -0,0 +1,962 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Acquire Worker
The worker process can startup either as a Configuration prober
or as a queue runner. As a configuration prober it only reads the
configuration message and
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/acquire-item.h>
#include <apt-pkg/acquire-worker.h>
#include <apt-pkg/acquire.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/proxy.h>
#include <apt-pkg/strutl.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <vector>
#include <sstream>
#include <errno.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <unistd.h>
#include <apti18n.h>
/*}}}*/
using namespace std;
// Worker::Worker - Constructor for Queue startup /*{{{*/
pkgAcquire::Worker::Worker(Queue *Q, MethodConfig *Cnf, pkgAcquireStatus *log) :
d(NULL), OwnerQ(Q), Log(log), Config(Cnf), Access(Cnf->Access),
CurrentItem(nullptr)
{
Construct();
}
/*}}}*/
// Worker::Worker - Constructor for method config startup /*{{{*/
pkgAcquire::Worker::Worker(MethodConfig *Cnf) : Worker(nullptr, Cnf, nullptr)
{
}
/*}}}*/
// Worker::Construct - Constructor helper /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgAcquire::Worker::Construct()
{
NextQueue = 0;
NextAcquire = 0;
Process = -1;
InFd = -1;
OutFd = -1;
OutReady = false;
InReady = false;
Debug = _config->FindB("Debug::pkgAcquire::Worker",false);
}
/*}}}*/
// Worker::~Worker - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
pkgAcquire::Worker::~Worker()
{
close(InFd);
close(OutFd);
if (Process > 0)
{
/* Closing of stdin is the signal to exit and die when the process
indicates it needs cleanup */
if (Config->NeedsCleanup == false)
kill(Process,SIGINT);
ExecWait(Process,Access.c_str(),true);
}
}
/*}}}*/
// Worker::Start - Start the worker process /*{{{*/
// ---------------------------------------------------------------------
/* This forks the method and inits the communication channel */
bool pkgAcquire::Worker::Start()
{
// Get the method path
constexpr char const * const methodsDir = "Dir::Bin::Methods";
std::string const confItem = std::string(methodsDir) + "::" + Access;
std::string Method;
if (_config->Exists(confItem))
Method = _config->FindFile(confItem.c_str());
else if (Access == "ftp" || Access == "rsh" || Access == "ssh")
return _error->Error(_("The method '%s' is unsupported and disabled by default. Consider switching to http(s). Set Dir::Bin::Methods::%s to \"%s\" to enable it again."), Access.c_str(), Access.c_str(), Access.c_str());
else
Method = _config->FindDir(methodsDir) + Access;
if (FileExists(Method) == false)
{
if (flNotDir(Method) == "false")
{
_error->Error(_("The method '%s' is explicitly disabled via configuration."), Access.c_str());
if (Access == "http" || Access == "https")
_error->Notice(_("If you meant to use Tor remember to use %s instead of %s."), ("tor+" + Access).c_str(), Access.c_str());
return false;
}
_error->Error(_("The method driver %s could not be found."),Method.c_str());
std::string const A(Access.cbegin(), std::find(Access.cbegin(), Access.cend(), '+'));
std::string pkg;
strprintf(pkg, "apt-transport-%s", A.c_str());
_error->Notice(_("Is the package %s installed?"), pkg.c_str());
return false;
}
std::string const Calling = _config->FindDir(methodsDir) + Access;
if (Debug == true)
{
std::clog << "Starting method '" << Calling << "'";
if (Calling != Method)
std::clog << " ( via " << Method << " )";
std::clog << endl;
}
// Create the pipes
int Pipes[4] = {-1,-1,-1,-1};
if (pipe(Pipes) != 0 || pipe(Pipes+2) != 0)
{
_error->Errno("pipe","Failed to create IPC pipe to subprocess");
for (int I = 0; I != 4; I++)
close(Pipes[I]);
return false;
}
for (int I = 0; I != 4; I++)
SetCloseExec(Pipes[I],true);
// Fork off the process
Process = ExecFork();
if (Process == 0)
{
// Setup the FDs
dup2(Pipes[1],STDOUT_FILENO);
dup2(Pipes[2],STDIN_FILENO);
SetCloseExec(STDOUT_FILENO,false);
SetCloseExec(STDIN_FILENO,false);
SetCloseExec(STDERR_FILENO,false);
const char * const Args[] = { Calling.c_str(), nullptr };
execv(Method.c_str() ,const_cast<char **>(Args));
std::cerr << "Failed to exec method " << Calling << " ( via " << Method << ")" << endl;
_exit(100);
}
// Fix up our FDs
InFd = Pipes[0];
OutFd = Pipes[3];
SetNonBlock(Pipes[0],true);
SetNonBlock(Pipes[3],true);
close(Pipes[1]);
close(Pipes[2]);
OutReady = false;
InReady = true;
// Read the configuration data
if (WaitFd(InFd) == false ||
ReadMessages() == false)
return _error->Error(_("Method %s did not start correctly"),Method.c_str());
RunMessages();
if (OwnerQ != 0)
SendConfiguration();
return true;
}
/*}}}*/
// Worker::ReadMessages - Read all pending messages into the list /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgAcquire::Worker::ReadMessages()
{
if (::ReadMessages(InFd,MessageQueue) == false)
return MethodFailure();
return true;
}
/*}}}*/
// Worker::RunMessage - Empty the message queue /*{{{*/
// ---------------------------------------------------------------------
/* This takes the messages from the message queue and runs them through
the parsers in order. */
enum class APT_HIDDEN MessageType
{
CAPABILITIES = 100,
LOG = 101,
STATUS = 102,
REDIRECT = 103,
WARNING = 104,
URI_START = 200,
URI_DONE = 201,
AUX_REQUEST = 351,
URI_FAILURE = 400,
GENERAL_FAILURE = 401,
MEDIA_CHANGE = 403
};
static bool isDoomedItem(pkgAcquire::Item const * const Itm)
{
auto const TransItm = dynamic_cast<pkgAcqTransactionItem const * const>(Itm);
if (TransItm == nullptr)
return false;
return TransItm->TransactionManager->State != pkgAcqTransactionItem::TransactionStarted;
}
static HashStringList GetHashesFromMessage(std::string const &Prefix, std::string const &Message)
{
HashStringList hsl;
for (char const *const *type = HashString::SupportedHashes(); *type != NULL; ++type)
{
std::string const tagname = Prefix + *type + "-Hash";
std::string const hashsum = LookupTag(Message, tagname.c_str());
if (hashsum.empty() == false)
hsl.push_back(HashString(*type, hashsum));
}
return hsl;
}
static void APT_NONNULL(3) ChangeSiteIsMirrorChange(std::string const &NewURI, pkgAcquire::ItemDesc &desc, pkgAcquire::Item *const Owner)
{
if (URI::SiteOnly(NewURI) == URI::SiteOnly(desc.URI))
return;
auto const firstSpace = desc.Description.find(" ");
if (firstSpace != std::string::npos)
{
std::string const OldSite = desc.Description.substr(0, firstSpace);
if (likely(APT::String::Startswith(desc.URI, OldSite)))
{
std::string const OldExtra = desc.URI.substr(OldSite.length() + 1);
if (likely(APT::String::Endswith(NewURI, OldExtra)))
{
std::string const NewSite = NewURI.substr(0, NewURI.length() - OldExtra.length());
Owner->UsedMirror = URI::ArchiveOnly(NewSite);
desc.Description.replace(0, firstSpace, Owner->UsedMirror);
}
}
}
}
bool pkgAcquire::Worker::RunMessages()
{
while (MessageQueue.empty() == false)
{
string Message = MessageQueue.front();
MessageQueue.erase(MessageQueue.begin());
if (Debug == true)
clog << " <- " << Access << ':' << QuoteString(Message,"\n") << endl;
// Fetch the message number
char *End;
MessageType const Number = static_cast<MessageType>(strtoul(Message.c_str(),&End,10));
if (End == Message.c_str())
return _error->Error("Invalid message from method %s: %s",Access.c_str(),Message.c_str());
string URI = LookupTag(Message,"URI");
pkgAcquire::Queue::QItem *Itm = NULL;
if (URI.empty() == false)
Itm = OwnerQ->FindItem(URI,this);
if (Itm != NULL)
{
// update used mirror
string UsedMirror = LookupTag(Message,"UsedMirror", "");
if (UsedMirror.empty() == false)
{
for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
(*O)->UsedMirror = UsedMirror;
if (Itm->Description.find(" ") != string::npos)
Itm->Description.replace(0, Itm->Description.find(" "), UsedMirror);
}
}
// Determine the message number and dispatch
switch (Number)
{
case MessageType::CAPABILITIES:
if (Capabilities(Message) == false)
return _error->Error("Unable to process Capabilities message from %s",Access.c_str());
break;
case MessageType::LOG:
if (Debug == true)
clog << " <- (log) " << LookupTag(Message,"Message") << endl;
break;
case MessageType::STATUS:
Status = LookupTag(Message,"Message");
break;
case MessageType::REDIRECT:
{
if (Itm == nullptr)
{
_error->Error("Method gave invalid 103 Redirect message");
break;
}
std::string const NewURI = LookupTag(Message,"New-URI",URI.c_str());
Itm->URI = NewURI;
auto const AltUris = VectorizeString(LookupTag(Message, "Alternate-URIs"), '\n');
ItemDone();
// Change the status so that it can be dequeued
for (auto const &O: Itm->Owners)
O->Status = pkgAcquire::Item::StatIdle;
// Mark the item as done (taking care of all queues)
// and then put it in the main queue again
std::vector<Item*> const ItmOwners = Itm->Owners;
OwnerQ->ItemDone(Itm);
Itm = nullptr;
for (auto const &Owner: ItmOwners)
{
for (auto alt = AltUris.crbegin(); alt != AltUris.crend(); ++alt)
Owner->PushAlternativeURI(std::string(*alt), {}, false);
pkgAcquire::ItemDesc &desc = Owner->GetItemDesc();
// for a simplified retry a method might redirect without URI change
// see also IsRedirectionLoop implementation
if (desc.URI != NewURI)
{
auto newuri = NewURI;
if (Owner->IsGoodAlternativeURI(newuri) == false && Owner->PopAlternativeURI(newuri) == false)
newuri.clear();
if (newuri.empty() || Owner->IsRedirectionLoop(newuri))
{
std::string msg = Message;
msg.append("\nFailReason: RedirectionLoop");
Owner->Failed(msg, Config);
if (Log != nullptr)
Log->Fail(Owner->GetItemDesc());
continue;
}
if (Log != nullptr)
Log->Done(desc);
ChangeSiteIsMirrorChange(NewURI, desc, Owner);
desc.URI = NewURI;
}
if (isDoomedItem(Owner) == false)
OwnerQ->Owner->Enqueue(desc);
}
break;
}
case MessageType::WARNING:
_error->Warning("%s: %s", Itm ? Itm->Owner ? Itm->Owner->DescURI().c_str() : Access.c_str() : Access.c_str(), LookupTag(Message, "Message").c_str());
break;
case MessageType::URI_START:
{
if (Itm == nullptr)
{
_error->Error("Method gave invalid 200 URI Start message");
break;
}
CurrentItem = Itm;
Itm->CurrentSize = 0;
Itm->TotalSize = strtoull(LookupTag(Message,"Size","0").c_str(), NULL, 10);
Itm->ResumePoint = strtoull(LookupTag(Message,"Resume-Point","0").c_str(), NULL, 10);
for (auto const Owner: Itm->Owners)
{
Owner->Start(Message, Itm->TotalSize);
// Display update before completion
if (Log != nullptr)
{
if (Log->MorePulses == true)
Log->Pulse(Owner->GetOwner());
Log->Fetch(Owner->GetItemDesc());
}
}
break;
}
case MessageType::URI_DONE:
{
if (Itm == nullptr)
{
_error->Error("Method gave invalid 201 URI Done message");
break;
}
PrepareFiles("201::URIDone", Itm);
// Display update before completion
if (Log != 0 && Log->MorePulses == true)
for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
Log->Pulse((*O)->GetOwner());
HashStringList ReceivedHashes;
{
std::string const givenfilename = LookupTag(Message, "Filename");
std::string const filename = givenfilename.empty() ? Itm->Owner->DestFile : givenfilename;
// see if we got hashes to verify
ReceivedHashes = GetHashesFromMessage("", Message);
// not all methods always sent Hashes our way
if (ReceivedHashes.usable() == false)
{
HashStringList const ExpectedHashes = Itm->GetExpectedHashes();
if (ExpectedHashes.usable() == true && RealFileExists(filename))
{
Hashes calc(ExpectedHashes);
FileFd file(filename, FileFd::ReadOnly, FileFd::None);
calc.AddFD(file);
ReceivedHashes = calc.GetHashStringList();
}
}
// only local files can refer other filenames and counting them as fetched would be unfair
if (Log != NULL && Itm->Owner->Complete == false && Itm->Owner->Local == false && givenfilename == filename)
Log->Fetched(ReceivedHashes.FileSize(),atoi(LookupTag(Message,"Resume-Point","0").c_str()));
}
std::vector<Item*> const ItmOwners = Itm->Owners;
OwnerQ->ItemDone(Itm);
Itm = NULL;
bool const isIMSHit = StringToBool(LookupTag(Message,"IMS-Hit"),false) ||
StringToBool(LookupTag(Message,"Alt-IMS-Hit"),false);
auto const forcedHash = _config->Find("Acquire::ForceHash");
for (auto const Owner: ItmOwners)
{
HashStringList const ExpectedHashes = Owner->GetExpectedHashes();
if(_config->FindB("Debug::pkgAcquire::Auth", false) == true)
{
std::clog << "201 URI Done: " << Owner->DescURI() << endl
<< "ReceivedHash:" << endl;
for (HashStringList::const_iterator hs = ReceivedHashes.begin(); hs != ReceivedHashes.end(); ++hs)
std::clog << "\t- " << hs->toStr() << std::endl;
std::clog << "ExpectedHash:" << endl;
for (HashStringList::const_iterator hs = ExpectedHashes.begin(); hs != ExpectedHashes.end(); ++hs)
std::clog << "\t- " << hs->toStr() << std::endl;
std::clog << endl;
}
// decide if what we got is what we expected
bool consideredOkay = false;
if ((forcedHash.empty() && ExpectedHashes.empty() == false) ||
(forcedHash.empty() == false && ExpectedHashes.usable()))
{
if (ReceivedHashes.empty())
{
/* IMS-Hits can't be checked here as we will have uncompressed file,
but the hashes for the compressed file. What we have was good through
so all we have to ensure later is that we are not stalled. */
consideredOkay = isIMSHit;
}
else if (ReceivedHashes == ExpectedHashes)
consideredOkay = true;
else
consideredOkay = false;
}
else
consideredOkay = !Owner->HashesRequired();
if (consideredOkay == true)
consideredOkay = Owner->VerifyDone(Message, Config);
else // hashsum mismatch
Owner->Status = pkgAcquire::Item::StatAuthError;
if (consideredOkay == true)
{
if (isDoomedItem(Owner) == false)
Owner->Done(Message, ReceivedHashes, Config);
if (Log != nullptr)
{
if (isIMSHit)
Log->IMSHit(Owner->GetItemDesc());
else
Log->Done(Owner->GetItemDesc());
}
}
else
{
auto SavedDesc = Owner->GetItemDesc();
if (isDoomedItem(Owner) == false)
{
if (Message.find("\nFailReason:") == std::string::npos)
{
if (ReceivedHashes != ExpectedHashes)
Message.append("\nFailReason: HashSumMismatch");
else
Message.append("\nFailReason: WeakHashSums");
}
Owner->Failed(Message,Config);
}
if (Log != nullptr)
Log->Fail(SavedDesc);
}
}
ItemDone();
break;
}
case MessageType::AUX_REQUEST:
{
if (Itm == nullptr)
{
_error->Error("Method gave invalid Aux Request message");
break;
}
else if (Config->GetAuxRequests() == false)
{
std::vector<Item *> const ItmOwners = Itm->Owners;
Message.append("\nMessage: Method tried to make an Aux Request while not being allowed to do them");
OwnerQ->ItemDone(Itm);
Itm = nullptr;
HandleFailure(ItmOwners, Config, Log, Message, false, false);
ItemDone();
std::string Msg = "600 URI Acquire\n";
Msg.reserve(200);
Msg += "URI: " + LookupTag(Message, "Aux-URI", "");
Msg += "\nFilename: /nonexistent/auxrequest.blocked";
Msg += "\n\n";
if (Debug == true)
clog << " -> " << Access << ':' << QuoteString(Msg, "\n") << endl;
OutQueue += Msg;
OutReady = true;
break;
}
auto maxsizestr = LookupTag(Message, "MaximumSize", "");
unsigned long long const MaxSize = maxsizestr.empty() ? 0 : strtoull(maxsizestr.c_str(), nullptr, 10);
new pkgAcqAuxFile(Itm->Owner, this, LookupTag(Message, "Aux-ShortDesc", ""),
LookupTag(Message, "Aux-Description", ""), LookupTag(Message, "Aux-URI", ""),
GetHashesFromMessage("Aux-", Message), MaxSize);
break;
}
case MessageType::URI_FAILURE:
{
if (Itm == nullptr)
{
std::string const msg = LookupTag(Message,"Message");
_error->Error("Method gave invalid 400 URI Failure message: %s", msg.c_str());
break;
}
PrepareFiles("400::URIFailure", Itm);
// Display update before completion
if (Log != nullptr && Log->MorePulses == true)
for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
Log->Pulse((*O)->GetOwner());
std::vector<Item*> const ItmOwners = Itm->Owners;
OwnerQ->ItemDone(Itm);
Itm = nullptr;
bool errTransient = false, errAuthErr = false;
if (StringToBool(LookupTag(Message, "Transient-Failure"), false) == true)
errTransient = true;
else
{
std::string const failReason = LookupTag(Message, "FailReason");
{
auto const reasons = { "Timeout", "ConnectionRefused",
"ConnectionTimedOut", "ResolveFailure", "TmpResolveFailure" };
errTransient = std::find(std::begin(reasons), std::end(reasons), failReason) != std::end(reasons);
}
if (errTransient == false)
{
auto const reasons = { "HashSumMismatch", "WeakHashSums", "MaximumSizeExceeded" };
errAuthErr = std::find(std::begin(reasons), std::end(reasons), failReason) != std::end(reasons);
}
}
HandleFailure(ItmOwners, Config, Log, Message, errTransient, errAuthErr);
ItemDone();
break;
}
case MessageType::GENERAL_FAILURE:
_error->Error("Method %s General failure: %s",Access.c_str(),LookupTag(Message,"Message").c_str());
break;
case MessageType::MEDIA_CHANGE:
MediaChange(Message);
break;
}
}
return true;
}
/*}}}*/
void pkgAcquire::Worker::HandleFailure(std::vector<pkgAcquire::Item *> const &ItmOwners, /*{{{*/
pkgAcquire::MethodConfig *const Config, pkgAcquireStatus *const Log,
std::string const &Message, bool const errTransient, bool const errAuthErr)
{
for (auto const Owner : ItmOwners)
{
std::string NewURI;
if (errTransient == true && Config->LocalOnly == false && Owner->Retries != 0)
{
--Owner->Retries;
Owner->FailMessage(Message);
auto SavedDesc = Owner->GetItemDesc();
if (Log != nullptr)
Log->Fail(SavedDesc);
if (isDoomedItem(Owner) == false)
OwnerQ->Owner->Enqueue(SavedDesc);
}
else
{
if (errAuthErr)
Owner->RemoveAlternativeSite(URI::SiteOnly(Owner->GetItemDesc().URI));
if (Owner->PopAlternativeURI(NewURI))
{
Owner->FailMessage(Message);
auto &desc = Owner->GetItemDesc();
if (Log != nullptr)
Log->Fail(desc);
ChangeSiteIsMirrorChange(NewURI, desc, Owner);
desc.URI = NewURI;
if (isDoomedItem(Owner) == false)
OwnerQ->Owner->Enqueue(desc);
}
else
{
if (errAuthErr && Owner->GetExpectedHashes().empty() == false)
Owner->Status = pkgAcquire::Item::StatAuthError;
else if (errTransient)
Owner->Status = pkgAcquire::Item::StatTransientNetworkError;
auto SavedDesc = Owner->GetItemDesc();
if (isDoomedItem(Owner) == false)
Owner->Failed(Message, Config);
if (Log != nullptr)
Log->Fail(SavedDesc);
}
}
}
}
/*}}}*/
// Worker::Capabilities - 100 Capabilities handler /*{{{*/
// ---------------------------------------------------------------------
/* This parses the capabilities message and dumps it into the configuration
structure. */
bool pkgAcquire::Worker::Capabilities(string Message)
{
if (Config == 0)
return true;
Config->Version = LookupTag(Message,"Version");
Config->SingleInstance = StringToBool(LookupTag(Message,"Single-Instance"),false);
Config->Pipeline = StringToBool(LookupTag(Message,"Pipeline"),false);
Config->SendConfig = StringToBool(LookupTag(Message,"Send-Config"),false);
Config->LocalOnly = StringToBool(LookupTag(Message,"Local-Only"),false);
Config->NeedsCleanup = StringToBool(LookupTag(Message,"Needs-Cleanup"),false);
Config->Removable = StringToBool(LookupTag(Message,"Removable"),false);
Config->SetAuxRequests(StringToBool(LookupTag(Message, "AuxRequests"), false));
// Some debug text
if (Debug == true)
{
clog << "Configured access method " << Config->Access << endl;
clog << "Version:" << Config->Version << " SingleInstance:" << Config->SingleInstance << " Pipeline:" << Config->Pipeline << " SendConfig:" << Config->SendConfig << " LocalOnly: " << Config->LocalOnly << " NeedsCleanup: " << Config->NeedsCleanup << " Removable: " << Config->Removable << " AuxRequests: " << Config->GetAuxRequests() << endl;
}
return true;
}
/*}}}*/
// Worker::MediaChange - Request a media change /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgAcquire::Worker::MediaChange(string Message)
{
int status_fd = _config->FindI("APT::Status-Fd",-1);
if(status_fd > 0)
{
string Media = LookupTag(Message,"Media");
string Drive = LookupTag(Message,"Drive");
ostringstream msg,status;
ioprintf(msg,_("Please insert the disc labeled: "
"'%s' "
"in the drive '%s' and press [Enter]."),
Media.c_str(),Drive.c_str());
status << "media-change: " // message
<< Media << ":" // media
<< Drive << ":" // drive
<< msg.str() // l10n message
<< endl;
std::string const dlstatus = status.str();
FileFd::Write(status_fd, dlstatus.c_str(), dlstatus.size());
}
if (Log == 0 || Log->MediaChange(LookupTag(Message,"Media"),
LookupTag(Message,"Drive")) == false)
{
char S[300];
snprintf(S,sizeof(S),"603 Media Changed\nFailed: true\n\n");
if (Debug == true)
clog << " -> " << Access << ':' << QuoteString(S,"\n") << endl;
OutQueue += S;
OutReady = true;
return true;
}
char S[300];
snprintf(S,sizeof(S),"603 Media Changed\n\n");
if (Debug == true)
clog << " -> " << Access << ':' << QuoteString(S,"\n") << endl;
OutQueue += S;
OutReady = true;
return true;
}
/*}}}*/
// Worker::SendConfiguration - Send the config to the method /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgAcquire::Worker::SendConfiguration()
{
if (Config->SendConfig == false)
return true;
if (OutFd == -1)
return false;
/* Write out all of the configuration directives by walking the
configuration tree */
std::ostringstream Message;
Message << "601 Configuration\n";
_config->Dump(Message, NULL, "Config-Item: %F=%V\n", false);
Message << '\n';
if (Debug == true)
clog << " -> " << Access << ':' << QuoteString(Message.str(),"\n") << endl;
OutQueue += Message.str();
OutReady = true;
return true;
}
/*}}}*/
// Worker::QueueItem - Add an item to the outbound queue /*{{{*/
// ---------------------------------------------------------------------
/* Send a URI Acquire message to the method */
bool pkgAcquire::Worker::QueueItem(pkgAcquire::Queue::QItem *Item)
{
if (OutFd == -1)
return false;
if (isDoomedItem(Item->Owner))
return true;
Item->SyncDestinationFiles();
string Message = "600 URI Acquire\n";
Message.reserve(300);
Message += "URI: " + Item->URI;
Message += "\nFilename: " + Item->Owner->DestFile;
URI URL(Item->URI);
// FIXME: We should not hard code proxy protocols here.
if (URL.Access == "http" || URL.Access == "https")
{
AutoDetectProxy(URL);
if (_config->Exists("Acquire::" + URL.Access + "::proxy::" + URL.Host))
{
Message += "\nProxy: " + _config->Find("Acquire::" + URL.Access + "::proxy::" + URL.Host);
}
}
HashStringList const hsl = Item->GetExpectedHashes();
for (HashStringList::const_iterator hs = hsl.begin(); hs != hsl.end(); ++hs)
Message += "\nExpected-" + hs->HashType() + ": " + hs->HashValue();
Message += Item->Custom600Headers();
if (hsl.FileSize() == 0 && Message.find("\nMaximum-Size: ") == std::string::npos)
{
unsigned long long FileSize = Item->GetMaximumSize();
if(FileSize > 0)
{
string MaximumSize;
strprintf(MaximumSize, "%llu", FileSize);
Message += "\nMaximum-Size: " + MaximumSize;
}
}
Message += "\n\n";
if (RealFileExists(Item->Owner->DestFile))
{
std::string const SandboxUser = _config->Find("APT::Sandbox::User");
ChangeOwnerAndPermissionOfFile("Item::QueueURI", Item->Owner->DestFile.c_str(),
SandboxUser.c_str(), ROOT_GROUP, 0600);
}
if (Debug == true)
clog << " -> " << Access << ':' << QuoteString(Message,"\n") << endl;
OutQueue += Message;
OutReady = true;
return true;
}
/*}}}*/
// Worker::ReplyAux - reply to an aux request from this worker /*{{{*/
bool pkgAcquire::Worker::ReplyAux(pkgAcquire::ItemDesc const &Item)
{
if (OutFd == -1)
return false;
if (isDoomedItem(Item.Owner))
return true;
string Message = "600 URI Acquire\n";
Message.reserve(200);
Message += "URI: " + Item.URI;
if (RealFileExists(Item.Owner->DestFile))
{
if (Item.Owner->Status == pkgAcquire::Item::StatDone)
{
std::string const SandboxUser = _config->Find("APT::Sandbox::User");
ChangeOwnerAndPermissionOfFile("Worker::ReplyAux", Item.Owner->DestFile.c_str(),
SandboxUser.c_str(), ROOT_GROUP, 0600);
Message += "\nFilename: " + Item.Owner->DestFile;
}
else
{
// we end up here in case we would need root-rights to delete a file,
// but we run the command as non-root… (yes, it is unlikely)
Message += "\nFilename: " + flCombine("/nonexistent", Item.Owner->DestFile);
}
}
else
Message += "\nFilename: " + Item.Owner->DestFile;
Message += "\n\n";
if (Debug == true)
clog << " -> " << Access << ':' << QuoteString(Message, "\n") << endl;
OutQueue += Message;
OutReady = true;
return true;
}
/*}}}*/
// Worker::OutFdRead - Out bound FD is ready /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgAcquire::Worker::OutFdReady()
{
int Res;
do
{
Res = write(OutFd,OutQueue.c_str(),OutQueue.length());
}
while (Res < 0 && errno == EINTR);
if (Res <= 0)
return MethodFailure();
OutQueue.erase(0,Res);
if (OutQueue.empty() == true)
OutReady = false;
return true;
}
/*}}}*/
// Worker::InFdRead - In bound FD is ready /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgAcquire::Worker::InFdReady()
{
if (ReadMessages() == false)
return false;
RunMessages();
return true;
}
/*}}}*/
// Worker::MethodFailure - Called when the method fails /*{{{*/
// ---------------------------------------------------------------------
/* This is called when the method is believed to have failed, probably because
read returned -1. */
bool pkgAcquire::Worker::MethodFailure()
{
_error->Error("Method %s has died unexpectedly!",Access.c_str());
// do not reap the child here to show meaningful error to the user
ExecWait(Process,Access.c_str(),false);
Process = -1;
close(InFd);
close(OutFd);
InFd = -1;
OutFd = -1;
OutReady = false;
InReady = false;
OutQueue = string();
MessageQueue.erase(MessageQueue.begin(),MessageQueue.end());
return false;
}
/*}}}*/
// Worker::Pulse - Called periodically /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgAcquire::Worker::Pulse()
{
if (CurrentItem == 0)
return;
struct stat Buf;
if (stat(CurrentItem->Owner->DestFile.c_str(),&Buf) != 0)
return;
CurrentItem->CurrentSize = Buf.st_size;
}
/*}}}*/
// Worker::ItemDone - Called when the current item is finished /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgAcquire::Worker::ItemDone()
{
CurrentItem = nullptr;
Status = string();
}
/*}}}*/
void pkgAcquire::Worker::PrepareFiles(char const * const caller, pkgAcquire::Queue::QItem const * const Itm)/*{{{*/
{
if (RealFileExists(Itm->Owner->DestFile))
{
ChangeOwnerAndPermissionOfFile(caller, Itm->Owner->DestFile.c_str(), "root", ROOT_GROUP, 0644);
std::string const filename = Itm->Owner->DestFile;
for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
{
pkgAcquire::Item const * const Owner = *O;
if (Owner->DestFile == filename || filename == "/dev/null")
continue;
RemoveFile("PrepareFiles", Owner->DestFile);
if (link(filename.c_str(), Owner->DestFile.c_str()) != 0)
{
// different mounts can't happen for us as we download to lists/ by default,
// but if the system is reused by others the locations can potentially be on
// different disks, so use symlink as poor-men replacement.
// FIXME: Real copying as last fallback, but that is costly, so offload to a method preferable
if (symlink(filename.c_str(), Owner->DestFile.c_str()) != 0)
_error->Error("Can't create (sym)link of file %s to %s", filename.c_str(), Owner->DestFile.c_str());
}
}
}
else
{
for (pkgAcquire::Queue::QItem::owner_iterator O = Itm->Owners.begin(); O != Itm->Owners.end(); ++O)
RemoveFile("PrepareFiles", (*O)->DestFile);
}
}
/*}}}*/

324
apt-pkg/acquire-worker.h Normal file
View File

@ -0,0 +1,324 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Acquire Worker - Worker process manager
Each worker class is associated with exactly one subprocess.
##################################################################### */
/*}}}*/
/** \addtogroup acquire
* @{
*
* \file acquire-worker.h
*/
#ifndef PKGLIB_ACQUIRE_WORKER_H
#define PKGLIB_ACQUIRE_WORKER_H
#include <apt-pkg/acquire.h>
#include <apt-pkg/weakptr.h>
#include <string>
#include <vector>
#include <sys/types.h>
/** \brief A fetch subprocess.
*
* A worker process is responsible for one stage of the fetch. This
* class encapsulates the communications protocol between the master
* process and the worker, from the master end.
*
* Each worker is intrinsically placed on two linked lists. The
* Queue list (maintained in the #NextQueue variable) is maintained
* by the pkgAcquire::Queue class; it represents the set of workers
* assigned to a particular queue. The Acquire list (maintained in
* the #NextAcquire variable) is maintained by the pkgAcquire class;
* it represents the set of active workers for a particular
* pkgAcquire object.
*
* \todo Like everything else in the Acquire system, this has way too
* many protected items.
*
* \sa pkgAcqMethod, pkgAcquire::Item, pkgAcquire
*/
class APT_PUBLIC pkgAcquire::Worker : public WeakPointable
{
/** \brief dpointer placeholder (for later in case we need it) */
void * const d;
friend class pkgAcquire;
protected:
friend class Queue;
/** \brief The next link on the Queue list.
*
* \todo This is always NULL; is it just for future use?
*/
Worker *NextQueue;
/** \brief The next link on the Acquire list. */
Worker *NextAcquire;
/** \brief The Queue with which this worker is associated. */
Queue *OwnerQ;
/** \brief The download progress indicator to which progress
* messages should be sent.
*/
pkgAcquireStatus *Log;
/** \brief The configuration of this method. On startup, the
* target of this pointer is filled in with basic data about the
* method, as reported by the worker.
*/
MethodConfig *Config;
/** \brief The access method to be used by this worker.
*
* \todo Doesn't this duplicate Config->Access?
*/
std::string Access;
/** \brief The PID of the subprocess. */
pid_t Process;
/** \brief A file descriptor connected to the standard output of
* the subprocess.
*
* Used to read messages and data from the subprocess.
*/
int InFd;
/** \brief A file descriptor connected to the standard input of the
* subprocess.
*
* Used to send commands and configuration data to the subprocess.
*/
int OutFd;
/** \brief The socket to send SCM_RIGHTS message through
*/
int PrivSepSocketFd;
int PrivSepSocketFdChild;
/** \brief Set to \b true if the worker is in a state in which it
* might generate data or command responses.
*
* \todo Is this right? It's a guess.
*/
bool InReady;
/** \brief Set to \b true if the worker is in a state in which it
* is legal to send commands to it.
*
* \todo Is this right?
*/
bool OutReady;
/** If \b true, debugging output will be sent to std::clog. */
bool Debug;
/** \brief The raw text values of messages received from the
* worker, in sequence.
*/
std::vector<std::string> MessageQueue;
/** \brief Buffers pending writes to the subprocess.
*
* \todo Wouldn't a std::dequeue be more appropriate?
*/
std::string OutQueue;
/** \brief Common code for the constructor.
*
* Initializes NextQueue and NextAcquire to NULL; Process, InFd,
* and OutFd to -1, OutReady and InReady to \b false, and Debug
* from _config.
*/
void Construct();
/** \brief Retrieve any available messages from the subprocess.
*
* The messages are retrieved as in \link strutl.h ReadMessages()\endlink, and
* #MethodFailure() is invoked if an error occurs; in particular,
* if the pipe to the subprocess dies unexpectedly while a message
* is being read.
*
* \return \b true if the messages were successfully read, \b
* false otherwise.
*/
bool ReadMessages();
/** \brief Parse and dispatch pending messages.
*
* This dispatches the message in a manner appropriate for its
* type.
*
* \todo Several message types lack separate handlers.
*
* \sa Capabilities(), SendConfiguration(), MediaChange()
*/
bool RunMessages();
/** \brief Read and dispatch any pending messages from the
* subprocess.
*
* \return \b false if the subprocess died unexpectedly while a
* message was being transmitted.
*/
bool InFdReady();
/** \brief Send any pending commands to the subprocess.
*
* This method will fail if there is no pending output.
*
* \return \b true if all commands were succeeded, \b false if an
* error occurred (in which case MethodFailure() will be invoked).
*/
bool OutFdReady();
/** \brief Handle a 100 Capabilities response from the subprocess.
*
* \param Message the raw text of the message from the subprocess.
*
* The message will be parsed and its contents used to fill
* #Config. If #Config is NULL, this routine is a NOP.
*
* \return \b true.
*/
bool Capabilities(std::string Message);
/** \brief Send a 601 Configuration message (containing the APT
* configuration) to the subprocess.
*
* The APT configuration will be send to the subprocess in a
* message of the following form:
*
* <pre>
* 601 Configuration
* Config-Item: Fully-Qualified-Item=Val
* Config-Item: Fully-Qualified-Item=Val
* ...
* </pre>
*
* \return \b true if the command was successfully sent, \b false
* otherwise.
*/
bool SendConfiguration();
/** \brief Handle a 403 Media Change message.
*
* \param Message the raw text of the message; the Media field
* indicates what type of media should be changed, and the Drive
* field indicates where the media is located.
*
* Invokes pkgAcquireStatus::MediaChange(Media, Drive) to ask the
* user to swap disks; informs the subprocess of the result (via
* 603 Media Changed, with the Failed field set to \b true if the
* user cancelled the media change).
*/
bool MediaChange(std::string Message);
/** \brief Invoked when the worked process dies unexpectedly.
*
* Waits for the subprocess to terminate and generates an error if
* it terminated abnormally, then closes and blanks out all file
* descriptors. Discards all pending messages from the
* subprocess.
*
* \return \b false.
*/
bool MethodFailure();
/** \brief Invoked when a fetch job is completed, either
* successfully or unsuccessfully.
*
* Resets the status information for the worker process.
*/
void ItemDone();
public:
/** \brief The queue entry that is currently being downloaded. */
pkgAcquire::Queue::QItem *CurrentItem;
/** \brief The most recent status string received from the
* subprocess.
*/
std::string Status;
/** \brief Tell the subprocess to download the given item.
*
* \param Item the item to queue up.
* \return \b true if the item was successfully enqueued.
*
* Queues up a 600 URI Acquire message for the given item to be
* sent at the next possible moment. Does \e not flush the output
* queue.
*/
bool QueueItem(pkgAcquire::Queue::QItem *Item);
APT_HIDDEN bool ReplyAux(pkgAcquire::ItemDesc const &Item);
/** \brief Start up the worker and fill in #Config.
*
* Reads the first message from the worker, which is assumed to be
* a 100 Capabilities message.
*
* \return \b true if all operations completed successfully.
*/
bool Start();
/** \brief Update the worker statistics (CurrentSize, TotalSize,
* etc).
*/
void Pulse();
/** \return The fetch method configuration. */
inline const MethodConfig *GetConf() const {return Config;};
/** \brief Create a new Worker to download files.
*
* \param OwnerQ The queue into which this worker should be
* placed.
*
* \param Config A location in which to store information about
* the fetch method.
*
* \param Log The download progress indicator that should be used
* to report the progress of this worker.
*/
Worker(Queue *OwnerQ,MethodConfig *Config,pkgAcquireStatus *Log);
/** \brief Create a new Worker that should just retrieve
* information about the fetch method.
*
* Nothing in particular forces you to refrain from actually
* downloading stuff, but the various status callbacks won't be
* invoked.
*
* \param Config A location in which to store information about
* the fetch method.
*/
explicit Worker(MethodConfig *Config);
/** \brief Clean up this worker.
*
* Closes the file descriptors; if MethodConfig::NeedsCleanup is
* \b false, also rudely interrupts the worker with a SIGINT.
*/
virtual ~Worker();
private:
APT_HIDDEN void PrepareFiles(char const * const caller, pkgAcquire::Queue::QItem const * const Itm);
APT_HIDDEN void HandleFailure(std::vector<pkgAcquire::Item *> const &ItmOwners,
pkgAcquire::MethodConfig *const Config, pkgAcquireStatus *const Log,
std::string const &Message, bool const errTransient, bool const errAuthErr);
};
/** @} */
#endif

1478
apt-pkg/acquire.cc Normal file

File diff suppressed because it is too large Load Diff

863
apt-pkg/acquire.h Normal file
View File

@ -0,0 +1,863 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Acquire - File Acquiration
This module contains the Acquire system. It is responsible for bringing
files into the local pathname space. It deals with URIs for files and
URI handlers responsible for downloading or finding the URIs.
Each file to download is represented by an Acquire::Item class subclassed
into a specialization. The Item class can add itself to several URI
acquire queues each prioritized by the download scheduler. When the
system is run the proper URI handlers are spawned and the acquire
queues are fed into the handlers by the schedular until the queues are
empty. This allows for an Item to be downloaded from an alternate source
if the first try turns out to fail. It also allows concurrent downloading
of multiple items from multiple sources as well as dynamic balancing
of load between the sources.
Schedualing of downloads is done on a first ask first get basis. This
preserves the order of the download as much as possible. And means the
fastest source will tend to process the largest number of files.
Internal methods and queues for performing gzip decompression,
md5sum hashing and file copying are provided to allow items to apply
a number of transformations to the data files they are working with.
##################################################################### */
/*}}}*/
/** \defgroup acquire Acquire system {{{
*
* \brief The Acquire system is responsible for retrieving files from
* local or remote URIs and postprocessing them (for instance,
* verifying their authenticity). The core class in this system is
* pkgAcquire, which is responsible for managing the download queues
* during the download. There is at least one download queue for
* each supported protocol; protocols such as http may provide one
* queue per host.
*
* Each file to download is represented by a subclass of
* pkgAcquire::Item. The files add themselves to the download
* queue(s) by providing their URI information to
* pkgAcquire::Item::QueueURI, which calls pkgAcquire::Enqueue.
*
* Once the system is set up, the Run method will spawn subprocesses
* to handle the enqueued URIs; the scheduler will then take items
* from the queues and feed them into the handlers until the queues
* are empty.
*
* \todo Acquire supports inserting an object into several queues at
* once, but it is not clear what its behavior in this case is, and
* no subclass of pkgAcquire::Item seems to actually use this
* capability.
*/ /*}}}*/
/** \addtogroup acquire
*
* @{
*
* \file acquire.h
*/
#ifndef PKGLIB_ACQUIRE_H
#define PKGLIB_ACQUIRE_H
#include <apt-pkg/hashes.h>
#include <apt-pkg/macros.h>
#include <apt-pkg/weakptr.h>
#include <string>
#include <vector>
#include <stddef.h>
#include <sys/select.h>
#include <sys/time.h>
class pkgAcquireStatus;
class metaIndex;
/** \brief The core download scheduler. {{{
*
* This class represents an ongoing download. It manages the lists
* of active and pending downloads and handles setting up and tearing
* down download-related structures.
*
* \todo Why all the protected data items and methods?
*/
class APT_PUBLIC pkgAcquire
{
private:
/** \brief FD of the Lock file we acquire in Setup (if any) */
int LockFD;
/** \brief dpointer placeholder (for later in case we need it) */
void * const d;
public:
class Item;
class Queue;
class Worker;
struct MethodConfig;
struct ItemDesc;
friend class Item;
friend class pkgAcqMetaBase;
friend class Queue;
typedef std::vector<Item *>::iterator ItemIterator;
typedef std::vector<Item *>::const_iterator ItemCIterator;
protected:
/** \brief A list of items to download.
*
* This is built monotonically as items are created and only
* emptied when the download shuts down.
*/
std::vector<Item *> Items;
/** \brief The head of the list of active queues.
*
* \todo why a hand-managed list of queues instead of std::list or
* std::set?
*/
Queue *Queues;
/** \brief The head of the list of active workers.
*
* \todo why a hand-managed list of workers instead of std::list
* or std::set?
*/
Worker *Workers;
/** \brief The head of the list of acquire method configurations.
*
* Each protocol (http, ftp, gzip, etc) via which files can be
* fetched can have a representation in this list. The
* configuration data is filled in by parsing the 100 Capabilities
* string output by a method on startup (see
* pkgAcqMethod::pkgAcqMethod and pkgAcquire::GetConfig).
*
* \todo why a hand-managed config dictionary instead of std::map?
*/
MethodConfig *Configs;
/** \brief The progress indicator for this download. */
pkgAcquireStatus *Log;
/** \brief The number of files which are to be fetched. */
unsigned long ToFetch;
// Configurable parameters for the scheduler
/** \brief Represents the queuing strategy for remote URIs. */
enum QueueStrategy {
/** \brief Generate one queue for each protocol/host combination; downloads from
* multiple hosts can proceed in parallel.
*/
QueueHost,
/** \brief Generate a single queue for each protocol; serialize
* downloads from multiple hosts.
*/
QueueAccess} QueueMode;
/** \brief If \b true, debugging information will be dumped to std::clog. */
bool const Debug;
/** \brief If \b true, a download is currently in progress. */
bool Running;
/** \brief Add the given item to the list of items. */
void Add(Item *Item);
/** \brief Remove the given item from the list of items. */
void Remove(Item *Item);
/** \brief Add the given worker to the list of workers. */
void Add(Worker *Work);
/** \brief Remove the given worker from the list of workers. */
void Remove(Worker *Work);
/** \brief Insert the given fetch request into the appropriate queue.
*
* \param Item The URI to download and the item to download it
* for. Copied by value into the queue; no reference to Item is
* retained.
*/
void Enqueue(ItemDesc &Item);
/** \brief Remove all fetch requests for this item from all queues. */
void Dequeue(Item *Item);
/** \brief Determine the fetch method and queue of a URI.
*
* \param URI The URI to fetch.
*
* \param[out] Config A location in which to place the method via
* which the URI is to be fetched.
*
* \return the string-name of the queue in which a fetch request
* for the given URI should be placed.
*/
std::string QueueName(std::string URI,MethodConfig const *&Config);
/** \brief Build up the set of file descriptors upon which select() should
* block.
*
* The default implementation inserts the file descriptors
* corresponding to active downloads.
*
* \param[out] Fd The largest file descriptor in the generated sets.
*
* \param[out] RSet The set of file descriptors that should be
* watched for input.
*
* \param[out] WSet The set of file descriptors that should be
* watched for output.
*/
virtual void SetFds(int &Fd,fd_set *RSet,fd_set *WSet);
/** Handle input from and output to file descriptors which select()
* has determined are ready. The default implementation
* dispatches to all active downloads.
*
* \param RSet The set of file descriptors that are ready for
* input.
*
* \param WSet The set of file descriptors that are ready for
* output.
*
* \return false if there is an error condition on one of the fds
*/
virtual bool RunFds(fd_set *RSet,fd_set *WSet);
/** \brief Check for idle queues with ready-to-fetch items.
*
* Called by pkgAcquire::Queue::Done each time an item is dequeued
* but remains on some queues; i.e., another queue should start
* fetching it.
*/
void Bump();
public:
/** \brief Retrieve information about a fetch method by name.
*
* \param Access The name of the method to look up.
*
* \return the method whose name is Access, or \b NULL if no such method exists.
*/
MethodConfig *GetConfig(std::string Access);
/** \brief Provides information on how a download terminated. */
enum RunResult {
/** \brief All files were fetched successfully. */
Continue,
/** \brief Some files failed to download. */
Failed,
/** \brief The download was cancelled by the user (i.e., #Log's
* pkgAcquireStatus::Pulse() method returned \b false).
*/
Cancelled};
/** \brief Download all the items that have been Add()ed to this
* download process.
*
* This method will block until the download completes, invoking
* methods on #Log to report on the progress of the download.
*
* \param PulseInterval The method pkgAcquireStatus::Pulse will be
* invoked on #Log at intervals of PulseInterval milliseconds.
*
* \return the result of the download.
*/
RunResult Run(int PulseInterval=500000);
/** \brief Remove all items from this download process, terminate
* all download workers, and empty all queues.
*/
void Shutdown();
/** \brief Get the first Worker object.
*
* \return the first active worker in this download process.
*/
inline Worker *WorkersBegin() {return Workers;};
/** \brief Advance to the next Worker object.
*
* \return the worker immediately following I, or \b NULL if none
* exists.
*/
Worker *WorkerStep(Worker *I) APT_PURE;
/** \brief Get the head of the list of items. */
inline ItemIterator ItemsBegin() {return Items.begin();};
inline ItemCIterator ItemsBegin() const {return Items.begin();};
/** \brief Get the end iterator of the list of items. */
inline ItemIterator ItemsEnd() {return Items.end();};
inline ItemCIterator ItemsEnd() const {return Items.end();};
// Iterate over queued Item URIs
class UriIterator;
/** \brief Get the head of the list of enqueued item URIs.
*
* This iterator will step over every element of every active
* queue.
*/
UriIterator UriBegin();
/** \brief Get the end iterator of the list of enqueued item URIs. */
UriIterator UriEnd();
/** Deletes each entry in the given directory that is not being
* downloaded by this object. For instance, when downloading new
* list files, calling Clean() will delete the old ones.
*
* \param Dir The directory to be cleaned out.
*
* \return \b true if the directory exists and is readable.
*/
bool Clean(std::string Dir);
/** \return the total size in bytes of all the items included in
* this download.
*/
unsigned long long TotalNeeded();
/** \return the size in bytes of all non-local items included in
* this download.
*/
unsigned long long FetchNeeded();
/** \return the amount of data to be fetched that is already
* present on the filesystem.
*/
unsigned long long PartialPresent();
void SetLog(pkgAcquireStatus *Progress) { Log = Progress; }
/** \brief acquire lock and perform directory setup
*
* \param Lock defines a lock file that should be acquired to ensure
* only one Acquire class is in action at the time or an empty string
* if no lock file should be used. If set also all needed directories
* will be created and setup.
*/
bool GetLock(std::string const &Lock);
/** \brief Construct a new pkgAcquire. */
explicit pkgAcquire(pkgAcquireStatus *Log);
pkgAcquire();
/** \brief Destroy this pkgAcquire object.
*
* Destroys all queue, method, and item objects associated with
* this download.
*/
virtual ~pkgAcquire();
private:
APT_HIDDEN void Initialize();
};
/** \brief Represents a single download source from which an item
* should be downloaded.
*
* An item may have several associated ItemDescs over its lifetime.
*/
struct APT_PUBLIC pkgAcquire::ItemDesc : public WeakPointable
{
/** \brief URI from which to download this item. */
std::string URI;
/** \brief description of this item. */
std::string Description;
/** \brief shorter description of this item. */
std::string ShortDesc;
/** \brief underlying item which is to be downloaded. */
Item *Owner;
};
/*}}}*/
/** \brief A single download queue in a pkgAcquire object. {{{
*
* \todo Why so many protected values?
*/
class APT_PUBLIC pkgAcquire::Queue
{
friend class pkgAcquire;
friend class pkgAcquire::UriIterator;
friend class pkgAcquire::Worker;
/** \brief dpointer placeholder (for later in case we need it) */
void * const d;
/** \brief The next queue in the pkgAcquire object's list of queues. */
Queue *Next;
protected:
/** \brief A single item placed in this queue. */
struct QItem : public ItemDesc
{
/** \brief The next item in the queue. */
QItem *Next;
/** \brief The worker associated with this item, if any. */
pkgAcquire::Worker *Worker;
/** \brief The underlying items interested in the download */
std::vector<Item*> Owners;
/** \brief How many bytes of the file have been downloaded. Zero
* if the current progress of the file cannot be determined.
*/
unsigned long long CurrentSize = 0;
/** \brief The total number of bytes to be downloaded. Zero if the
* total size of the final is unknown.
*/
unsigned long long TotalSize = 0;
/** \brief How much of the file was already downloaded prior to
* starting this worker.
*/
unsigned long long ResumePoint = 0;
typedef std::vector<Item*>::const_iterator owner_iterator;
/** \brief Assign the ItemDesc portion of this QItem from
* another ItemDesc
*/
void operator =(pkgAcquire::ItemDesc const &I)
{
URI = I.URI;
Description = I.Description;
ShortDesc = I.ShortDesc;
Owners.clear();
Owners.push_back(I.Owner);
Owner = I.Owner;
};
/** @return the sum of all expected hashes by all owners */
HashStringList GetExpectedHashes() const;
/** @return smallest maximum size of all owners */
unsigned long long GetMaximumSize() const;
/** \brief get partial files in order */
void SyncDestinationFiles() const;
/** @return the custom headers to use for this item */
std::string Custom600Headers() const;
/** @return the maximum priority of this item */
int APT_HIDDEN GetPriority() const;
};
/** \brief The name of this queue. */
std::string Name;
/** \brief The head of the list of items contained in this queue.
*
* \todo why a by-hand list instead of an STL structure?
*/
QItem *Items;
/** \brief The head of the list of workers associated with this queue.
*
* \todo This is plural because support exists in Queue for
* multiple workers. However, it does not appear that there is
* any way to actually associate more than one worker with a
* queue.
*
* \todo Why not just use a std::set?
*/
pkgAcquire::Worker *Workers;
/** \brief the download scheduler with which this queue is associated. */
pkgAcquire *Owner;
/** \brief The number of entries in this queue that are currently
* being downloaded.
*/
signed long PipeDepth;
/** \brief The maximum number of entries that this queue will
* attempt to download at once.
*/
unsigned long MaxPipeDepth;
public:
/** \brief Insert the given fetch request into this queue.
*
* \return \b true if the queuing was successful. May return
* \b false if the Item is already in the queue
*/
bool Enqueue(ItemDesc &Item);
/** \brief Remove all fetch requests for the given item from this queue.
*
* \return \b true if at least one request was removed from the queue.
*/
bool Dequeue(Item *Owner);
/** \brief Locate an item in this queue.
*
* \param URI A URI to match against.
* \param Owner A pkgAcquire::Worker to match against.
*
* \return the first item in the queue whose URI is #URI and that
* is being downloaded by #Owner.
*/
QItem *FindItem(std::string URI,pkgAcquire::Worker *Owner) APT_PURE;
/** Presumably this should start downloading an item?
*
* \todo Unimplemented. Implement it or remove?
*/
bool ItemStart(QItem *Itm,unsigned long long Size);
/** \brief Remove the given item from this queue and set its state
* to pkgAcquire::Item::StatDone.
*
* If this is the only queue containing the item, the item is also
* removed from the main queue by calling pkgAcquire::Dequeue.
*
* \param Itm The item to remove.
*
* \return \b true if no errors are encountered.
*/
bool ItemDone(QItem *Itm);
/** \brief Start the worker process associated with this queue.
*
* If a worker process is already associated with this queue,
* this is equivalent to calling Cycle().
*
* \return \b true if the startup was successful.
*/
bool Startup();
/** \brief Shut down the worker process associated with this queue.
*
* \param Final If \b true, then the process is stopped unconditionally.
* Otherwise, it is only stopped if it does not need cleanup
* as indicated by the pkgAcqMethod::NeedsCleanup member of
* its configuration.
*
* \return \b true.
*/
bool Shutdown(bool Final);
/** \brief Send idle items to the worker process.
*
* Fills up the pipeline by inserting idle items into the worker's queue.
*/
bool Cycle();
/** \brief Check for items that could be enqueued.
*
* Call this after an item placed in multiple queues has gone from
* the pkgAcquire::Item::StatFetching state to the
* pkgAcquire::Item::StatIdle state, to possibly refill an empty queue.
* This is an alias for Cycle().
*
* \todo Why both this and Cycle()? Are they expected to be
* different someday?
*/
void Bump();
/** \brief Create a new Queue.
*
* \param Name The name of the new queue.
* \param Owner The download process that owns the new queue.
*/
Queue(std::string const &Name,pkgAcquire * const Owner);
/** Shut down all the worker processes associated with this queue
* and empty the queue.
*/
virtual ~Queue();
};
/*}}}*/
/** \brief Iterates over all the URIs being fetched by a pkgAcquire object. {{{*/
class APT_PUBLIC pkgAcquire::UriIterator
{
/** \brief dpointer placeholder (for later in case we need it) */
void * const d;
/** The next queue to iterate over. */
pkgAcquire::Queue *CurQ;
/** The item that we currently point at. */
pkgAcquire::Queue::QItem *CurItem;
public:
inline void operator ++() {operator ++(0);};
void operator ++(int)
{
CurItem = CurItem->Next;
while (CurItem == 0 && CurQ != 0)
{
CurItem = CurQ->Items;
CurQ = CurQ->Next;
}
};
inline pkgAcquire::Queue::QItem const *operator ->() const {return CurItem;};
inline bool operator !=(UriIterator const &rhs) const {return rhs.CurQ != CurQ || rhs.CurItem != CurItem;};
inline bool operator ==(UriIterator const &rhs) const {return rhs.CurQ == CurQ && rhs.CurItem == CurItem;};
/** \brief Create a new UriIterator.
*
* \param Q The queue over which this UriIterator should iterate.
*/
explicit UriIterator(pkgAcquire::Queue *Q);
virtual ~UriIterator();
};
/*}}}*/
/** \brief Information about the properties of a single acquire method. {{{*/
struct APT_PUBLIC pkgAcquire::MethodConfig
{
class Private;
/** \brief dpointer placeholder (for later in case we need it) */
Private *const d;
/** \brief The next link on the acquire method list.
*
* \todo Why not an STL container?
*/
MethodConfig *Next;
/** \brief The name of this acquire method (e.g., http). */
std::string Access;
/** \brief The implementation version of this acquire method. */
std::string Version;
/** \brief If \b true, only one download queue should be created for this
* method.
*/
bool SingleInstance;
/** \brief If \b true, this method supports pipelined downloading. */
bool Pipeline;
/** \brief If \b true, the worker process should send the entire
* APT configuration tree to the fetch subprocess when it starts
* up.
*/
bool SendConfig;
/** \brief If \b true, this fetch method does not require network access;
* all files are to be acquired from the local disk.
*/
bool LocalOnly;
/** \brief If \b true, the subprocess has to carry out some cleanup
* actions before shutting down.
*
* For instance, the cdrom method needs to unmount the CD after it
* finishes.
*/
bool NeedsCleanup;
/** \brief If \b true, this fetch method acquires files from removable media. */
bool Removable;
/** \brief Set up the default method parameters.
*
* All fields are initialized to NULL, "", or \b false as
* appropriate.
*/
MethodConfig();
APT_HIDDEN bool GetAuxRequests() const;
APT_HIDDEN void SetAuxRequests(bool const value);
virtual ~MethodConfig();
};
/*}}}*/
/** \brief A monitor object for downloads controlled by the pkgAcquire class. {{{
*
* \todo Why protected members?
*/
class APT_PUBLIC pkgAcquireStatus
{
/** \brief dpointer placeholder (for later in case we need it) */
void * const d;
protected:
/** \brief The last time at which this monitor object was updated. */
struct timeval Time;
/** \brief The time at which the download started. */
struct timeval StartTime;
/** \brief The number of bytes fetched as of the previous call to
* pkgAcquireStatus::Pulse, including local items.
*/
unsigned long long LastBytes;
/** \brief The current rate of download as of the most recent call
* to pkgAcquireStatus::Pulse, in bytes per second.
*/
unsigned long long CurrentCPS;
/** \brief The number of bytes fetched as of the most recent call
* to pkgAcquireStatus::Pulse, including local items.
*/
unsigned long long CurrentBytes;
/** \brief The total number of bytes that need to be fetched.
*
* \warning This member is inaccurate, as new items might be
* enqueued while the download is in progress!
*/
unsigned long long TotalBytes;
/** \brief The total number of bytes accounted for by items that
* were successfully fetched.
*/
unsigned long long FetchedBytes;
/** \brief The amount of time that has elapsed since the download
* started.
*/
unsigned long long ElapsedTime;
/** \brief The total number of items that need to be fetched.
*
* \warning This member is inaccurate, as new items might be
* enqueued while the download is in progress!
*/
unsigned long TotalItems;
/** \brief The number of items that have been successfully downloaded. */
unsigned long CurrentItems;
/** \brief The estimated percentage of the download (0-100)
*/
double Percent;
public:
/** \brief If \b true, the download scheduler should call Pulse()
* at the next available opportunity.
*/
bool Update;
/** \brief If \b true, extra Pulse() invocations will be performed.
*
* With this option set, Pulse() will be called every time that a
* download item starts downloading, finishes downloading, or
* terminates with an error.
*/
bool MorePulses;
/** \brief Invoked when a local or remote file has been completely fetched.
*
* \param Size The size of the file fetched.
*
* \param ResumePoint How much of the file was already fetched.
*/
virtual void Fetched(unsigned long long Size,unsigned long long ResumePoint);
/** \brief Invoked when the user should be prompted to change the
* inserted removable media.
*
* This method should not return until the user has confirmed to
* the user interface that the media change is complete.
*
* \param Media The name of the media type that should be changed.
*
* \param Drive The identifying name of the drive whose media
* should be changed.
*
* \return \b true if the user confirms the media change, \b
* false if it is cancelled.
*
* \todo This is a horrible blocking monster; it should be CPSed
* with prejudice.
*/
virtual bool MediaChange(std::string Media,std::string Drive) = 0;
struct ReleaseInfoChange
{
std::string Type; /*!< Type of the change like "Origin", "Codename", "Version", … */
std::string From; /*!< old value */
std::string To; /*!< new value */
std::string Message; /*!< translated message describing the change */
bool DefaultAction; /*!< true if the change is informational, false if it must be explicitly confirmed */
};
/** \brief ask the user for confirmation of changes to infos about a repository
*
* This method should present the user with a choice of accepting the change
* or not and indicate the user opinion via the return value. If DefaultAction is true
* it is acceptable to only notify the user about the change, but to accept the change
* automatically on behalf of the user.
*
* The default implementation will fail if any Change has DefaultAction == false. Regardless of
* success it will print for each change the message attached to it via GlobalError either as an
* error (if DefaultAction == false) or as a notice otherwise.
*
* @param LastRelease can be used to extract further information from the previous Release file
* @param CurrentRelease can be used to extract further information from the current Release file
* @param Changes is an array of changes alongside explanatory messages
* which should be presented in some way to the user.
* @return \b true if all changes are accepted by user, otherwise or if user can't be asked \b false
*/
virtual bool ReleaseInfoChanges(metaIndex const * const LastRelease, metaIndex const * const CurrentRelease, std::vector<ReleaseInfoChange> &&Changes);
APT_HIDDEN static bool ReleaseInfoChangesAsGlobalErrors(std::vector<ReleaseInfoChange> &&Changes);
/** \brief Invoked when an item is confirmed to be up-to-date.
* For instance, when an HTTP download is informed that the file on
* the server was not modified.
*/
virtual void IMSHit(pkgAcquire::ItemDesc &/*Itm*/) {};
/** \brief Invoked when some of an item's data is fetched. */
virtual void Fetch(pkgAcquire::ItemDesc &/*Itm*/) {};
/** \brief Invoked when an item is successfully and completely fetched. */
virtual void Done(pkgAcquire::ItemDesc &/*Itm*/) {};
/** \brief Invoked when the process of fetching an item encounters
* a fatal error.
*/
virtual void Fail(pkgAcquire::ItemDesc &/*Itm*/) {};
/** \brief Periodically invoked while the Acquire process is underway.
*
* Subclasses should first call pkgAcquireStatus::Pulse(), then
* update their status output. The download process is blocked
* while Pulse() is being called.
*
* \return \b false if the user asked to cancel the whole Acquire process.
*
* \see pkgAcquire::Run
*/
virtual bool Pulse(pkgAcquire *Owner);
/** \brief Invoked when the Acquire process starts running. */
virtual void Start();
/** \brief Invoked when the Acquire process stops running. */
virtual void Stop();
/** \brief Initialize all counters to 0 and the time to the current time. */
pkgAcquireStatus();
virtual ~pkgAcquireStatus();
};
/*}}}*/
/** @} */
#endif

1392
apt-pkg/algorithms.cc Normal file

File diff suppressed because it is too large Load Diff

150
apt-pkg/algorithms.h Normal file
View File

@ -0,0 +1,150 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Algorithms - A set of misc algorithms
This simulate class displays what the ordering code has done and
analyses it with a fresh new dependency cache. In this way we can
see all of the effects of an upgrade run.
pkgDistUpgrade computes an upgrade that causes as many packages as
possible to move to the newest version.
pkgApplyStatus sets the target state based on the content of the status
field in the status file. It is important to get proper crash recovery.
pkgFixBroken corrects a broken system so that it is in a sane state.
pkgAllUpgrade attempts to upgrade as many packages as possible but
without installing new packages.
The problem resolver class contains a number of complex algorithms
to try to best-guess an upgrade state. It solves the problem of
maximizing the number of install state packages while having no broken
packages.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_ALGORITHMS_H
#define PKGLIB_ALGORITHMS_H
#include <apt-pkg/depcache.h>
#include <apt-pkg/packagemanager.h>
#include <apt-pkg/pkgcache.h>
#include <iostream>
#include <string>
#include <apt-pkg/macros.h>
class pkgSimulatePrivate;
class APT_PUBLIC pkgSimulate : public pkgPackageManager /*{{{*/
{
pkgSimulatePrivate * const d;
protected:
class APT_PUBLIC Policy : public pkgDepCache::Policy
{
pkgDepCache *Cache;
public:
virtual VerIterator GetCandidateVer(PkgIterator const &Pkg) APT_OVERRIDE
{
return (*Cache)[Pkg].CandidateVerIter(*Cache);
}
explicit Policy(pkgDepCache *Cache) : Cache(Cache) {};
};
unsigned char *Flags;
Policy iPolicy;
pkgDepCache Sim;
pkgDepCache::ActionGroup group;
// The Actual installation implementation
virtual bool Install(PkgIterator Pkg,std::string File) APT_OVERRIDE;
virtual bool Configure(PkgIterator Pkg) APT_OVERRIDE;
virtual bool Remove(PkgIterator Pkg,bool Purge) APT_OVERRIDE;
public:
bool Go(APT::Progress::PackageManager * progress) override;
private:
APT_HIDDEN void ShortBreaks();
APT_HIDDEN void Describe(PkgIterator iPkg,std::ostream &out,bool Current,bool Candidate);
APT_HIDDEN bool RealInstall(PkgIterator Pkg,std::string File);
APT_HIDDEN bool RealConfigure(PkgIterator Pkg);
APT_HIDDEN bool RealRemove(PkgIterator Pkg,bool Purge);
public:
explicit pkgSimulate(pkgDepCache *Cache);
virtual ~pkgSimulate();
};
/*}}}*/
class APT_PUBLIC pkgProblemResolver /*{{{*/
{
private:
/** \brief dpointer placeholder (for later in case we need it) */
void * const d;
pkgDepCache &Cache;
typedef pkgCache::PkgIterator PkgIterator;
typedef pkgCache::VerIterator VerIterator;
typedef pkgCache::DepIterator DepIterator;
typedef pkgCache::PrvIterator PrvIterator;
typedef pkgCache::Version Version;
typedef pkgCache::Package Package;
enum Flags {Protected = (1 << 0), PreInstalled = (1 << 1),
Upgradable = (1 << 2), ReInstateTried = (1 << 3),
ToRemove = (1 << 4)};
int *Scores;
unsigned char *Flags;
bool Debug;
// Sort stuff
APT_HIDDEN int ScoreSort(Package const *A, Package const *B) APT_PURE;
struct APT_PUBLIC PackageKill
{
PkgIterator Pkg;
DepIterator Dep;
};
APT_HIDDEN void MakeScores();
APT_HIDDEN bool DoUpgrade(pkgCache::PkgIterator Pkg);
protected:
bool InstOrNewPolicyBroken(pkgCache::PkgIterator Pkg);
public:
inline void Protect(pkgCache::PkgIterator Pkg) {Flags[Pkg->ID] |= Protected; Cache.MarkProtected(Pkg);};
inline void Remove(pkgCache::PkgIterator Pkg) {Flags[Pkg->ID] |= ToRemove;};
inline void Clear(pkgCache::PkgIterator Pkg) {Flags[Pkg->ID] &= ~(Protected | ToRemove);};
// Try to intelligently resolve problems by installing and removing packages
bool Resolve(bool BrokenFix = false, OpProgress * const Progress = NULL);
APT_HIDDEN bool ResolveInternal(bool const BrokenFix = false);
// Try to resolve problems only by using keep
bool ResolveByKeep(OpProgress * const Progress = NULL);
APT_HIDDEN bool ResolveByKeepInternal();
explicit pkgProblemResolver(pkgDepCache *Cache);
virtual ~pkgProblemResolver();
};
/*}}}*/
APT_PUBLIC bool pkgApplyStatus(pkgDepCache &Cache);
APT_PUBLIC bool pkgFixBroken(pkgDepCache &Cache);
APT_PUBLIC void pkgPrioSortList(pkgCache &Cache,pkgCache::Version **List);
#endif

8
apt-pkg/apt-pkg.pc.in Normal file
View File

@ -0,0 +1,8 @@
libdir=@CMAKE_INSTALL_FULL_LIBDIR@
includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@
Name: apt-pkg
Description: package management runtime library
Version: @PROJECT_VERSION@
Libs: -L${libdir} -lapt-pkg -pthread
Cflags: -I${includedir}

483
apt-pkg/aptconfiguration.cc Normal file
View File

@ -0,0 +1,483 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Provide access methods to various configuration settings,
setup defaults and returns validate settings.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/macros.h>
#include <apt-pkg/pkgsystem.h>
#include <apt-pkg/strutl.h>
#include <algorithm>
#include <string>
#include <vector>
#include <ctype.h>
#include <dirent.h>
#include <fcntl.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
/*}}}*/
namespace APT {
// setDefaultConfigurationForCompressors /*{{{*/
static void setDefaultConfigurationForCompressors() {
// Set default application paths to check for optional compression types
_config->CndSet("Dir::Bin::gzip", "/bin/gzip");
_config->CndSet("Dir::Bin::bzip2", "/bin/bzip2");
_config->CndSet("Dir::Bin::xz", "/usr/bin/xz");
_config->CndSet("Dir::Bin::lz4", "/usr/bin/lz4");
_config->CndSet("Dir::Bin::zstd", "/usr/bin/zstd");
if (FileExists(_config->Find("Dir::Bin::xz")) == true) {
_config->Set("Dir::Bin::lzma", _config->Find("Dir::Bin::xz"));
_config->Set("APT::Compressor::lzma::Binary", "xz");
if (_config->Exists("APT::Compressor::lzma::CompressArg") == false) {
_config->Set("APT::Compressor::lzma::CompressArg::", "--format=lzma");
_config->Set("APT::Compressor::lzma::CompressArg::", "-6");
}
if (_config->Exists("APT::Compressor::lzma::UncompressArg") == false) {
_config->Set("APT::Compressor::lzma::UncompressArg::", "--format=lzma");
_config->Set("APT::Compressor::lzma::UncompressArg::", "-d");
}
} else {
_config->CndSet("Dir::Bin::lzma", "/usr/bin/lzma");
if (_config->Exists("APT::Compressor::lzma::CompressArg") == false) {
_config->Set("APT::Compressor::lzma::CompressArg::", "--suffix=");
_config->Set("APT::Compressor::lzma::CompressArg::", "-6");
}
if (_config->Exists("APT::Compressor::lzma::UncompressArg") == false) {
_config->Set("APT::Compressor::lzma::UncompressArg::", "--suffix=");
_config->Set("APT::Compressor::lzma::UncompressArg::", "-d");
}
}
// setup the defaults for the compressiontypes => method mapping
_config->CndSet("Acquire::CompressionTypes::xz","xz");
_config->CndSet("Acquire::CompressionTypes::bz2","bzip2");
_config->CndSet("Acquire::CompressionTypes::lzma","lzma");
_config->CndSet("Acquire::CompressionTypes::gz","gzip");
_config->CndSet("Acquire::CompressionTypes::lz4","lz4");
_config->CndSet("Acquire::CompressionTypes::zst", "zstd");
}
/*}}}*/
// getCompressionTypes - Return Vector of usable compressiontypes /*{{{*/
// ---------------------------------------------------------------------
/* return a vector of compression types in the preferred order. */
std::vector<std::string>
const Configuration::getCompressionTypes(bool const &Cached) {
static std::vector<std::string> types;
if (types.empty() == false) {
if (Cached == true)
return types;
else
types.clear();
}
std::vector<APT::Configuration::Compressor> const compressors = getCompressors();
// load the order setting into our vector
std::vector<std::string> const order = _config->FindVector("Acquire::CompressionTypes::Order");
for (std::vector<std::string>::const_iterator o = order.begin();
o != order.end(); ++o) {
if ((*o).empty() == true)
continue;
// ignore types we have no method ready to use
std::string const method = std::string("Acquire::CompressionTypes::").append(*o);
if (_config->Exists(method) == false)
continue;
// ignore types we have no app ready to use
std::string const app = _config->Find(method);
if (std::find_if(compressors.begin(), compressors.end(), [&app](APT::Configuration::Compressor const &c) {
return c.Name == app;
}) == compressors.end())
continue;
types.push_back(*o);
}
// move again over the option tree to add all missing compression types
::Configuration::Item const *Types = _config->Tree("Acquire::CompressionTypes");
if (Types != 0)
Types = Types->Child;
for (; Types != 0; Types = Types->Next) {
if (Types->Tag == "Order" || Types->Tag.empty() == true)
continue;
// ignore types we already have in the vector
if (std::find(types.begin(),types.end(),Types->Tag) != types.end())
continue;
// ignore types we have no app ready to use
if (std::find_if(compressors.begin(), compressors.end(), [&Types](APT::Configuration::Compressor const &c) {
return c.Name == Types->Value;
}) == compressors.end())
continue;
types.push_back(Types->Tag);
}
// add the special "uncompressed" type
if (std::find(types.begin(), types.end(), "uncompressed") == types.end())
{
std::string const uncompr = _config->Find("Dir::Bin::uncompressed", "");
if (uncompr.empty() == true || FileExists(uncompr) == true)
types.push_back("uncompressed");
}
return types;
}
/*}}}*/
// GetLanguages - Return Vector of Language Codes /*{{{*/
// ---------------------------------------------------------------------
/* return a vector of language codes in the preferred order.
the special word "environment" will be replaced with the long and the short
code of the local settings and it will be insured that this will not add
duplicates. So in an german local the setting "environment, de_DE, en, de"
will result in "de_DE, de, en".
The special word "none" is the stopcode for the not-All code vector */
std::vector<std::string> const Configuration::getLanguages(bool const &All,
bool const &Cached, char const ** const Locale) {
using std::string;
// The detection is boring and has a lot of cornercases,
// so we cache the results to calculated it only once.
std::vector<string> static allCodes;
std::vector<string> static codes;
// we have something in the cache
if (codes.empty() == false || allCodes.empty() == false) {
if (Cached == true) {
if(All == true && allCodes.empty() == false)
return allCodes;
else
return codes;
} else {
allCodes.clear();
codes.clear();
}
}
// Include all Language codes we have a Translation file for in /var/lib/apt/lists
// so they will be all included in the Cache.
std::vector<string> builtin;
DIR *D = opendir(_config->FindDir("Dir::State::lists").c_str());
if (D != NULL) {
builtin.push_back("none");
for (struct dirent *Ent = readdir(D); Ent != 0; Ent = readdir(D)) {
string const name = SubstVar(Ent->d_name, "%5f", "_");
size_t const foundDash = name.rfind("-");
size_t const foundUnderscore = name.rfind("_", foundDash);
if (foundDash == string::npos || foundUnderscore == string::npos ||
foundDash <= foundUnderscore ||
name.substr(foundUnderscore+1, foundDash-(foundUnderscore+1)) != "Translation")
continue;
string const c = name.substr(foundDash+1);
if (unlikely(c.empty() == true) || c == "en")
continue;
// Skip unusual files, like backups or that alike
string::const_iterator s = c.begin();
for (;s != c.end(); ++s) {
if (isalpha(*s) == 0 && *s != '_')
break;
}
if (s != c.end())
continue;
if (std::find(builtin.begin(), builtin.end(), c) != builtin.end())
continue;
builtin.push_back(c);
}
closedir(D);
}
// FIXME: Remove support for the old APT::Acquire::Translation
// it was undocumented and so it should be not very widthly used
string const oldAcquire = _config->Find("APT::Acquire::Translation","");
if (oldAcquire.empty() == false && oldAcquire != "environment") {
// TRANSLATORS: the two %s are APT configuration options
_error->Notice("Option '%s' is deprecated. Please use '%s' instead, see 'man 5 apt.conf' for details.",
"APT::Acquire::Translation", "Acquire::Languages");
if (oldAcquire != "none")
codes.push_back(oldAcquire);
codes.push_back("en");
allCodes = codes;
for (std::vector<string>::const_iterator b = builtin.begin();
b != builtin.end(); ++b)
if (std::find(allCodes.begin(), allCodes.end(), *b) == allCodes.end())
allCodes.push_back(*b);
if (All == true)
return allCodes;
else
return codes;
}
// get the environment language codes: LC_MESSAGES (and later LANGUAGE)
// we extract both, a long and a short code and then we will
// check if we actually need both (rare) or if the short is enough
string const envMsg = string(Locale == 0 ? ::setlocale(LC_MESSAGES, NULL) : *Locale);
size_t const lenShort = (envMsg.find('_') != string::npos) ? envMsg.find('_') : 2;
size_t const lenLong = (envMsg.find_first_of(".@") != string::npos) ? envMsg.find_first_of(".@") : (lenShort + 3);
string const envLong = envMsg.substr(0,lenLong);
string const envShort = envLong.substr(0,lenShort);
// It is very likely we will need the environment codes later,
// so let us generate them now from LC_MESSAGES and LANGUAGE
std::vector<string> environment;
if (envShort != "C") {
// take care of LC_MESSAGES
if (envLong != envShort)
environment.push_back(envLong);
environment.push_back(envShort);
// take care of LANGUAGE
const char *language_env = getenv("LANGUAGE") == 0 ? "" : getenv("LANGUAGE");
string envLang = Locale == 0 ? language_env : *(Locale+1);
if (envLang.empty() == false) {
std::vector<string> env = VectorizeString(envLang,':');
short addedLangs = 0; // add a maximum of 3 fallbacks from the environment
for (std::vector<string>::const_iterator e = env.begin();
e != env.end() && addedLangs < 3; ++e) {
if (unlikely(e->empty() == true) || *e == "en")
continue;
if (*e == envLong || *e == envShort)
continue;
if (std::find(environment.begin(), environment.end(), *e) != environment.end())
continue;
++addedLangs;
environment.push_back(*e);
}
}
} else {
// cornercase: LANG=C, so we use only "en" Translation
environment.push_back("en");
}
std::vector<string> const lang = _config->FindVector("Acquire::Languages", "environment,en");
// the configs define the order, so add the environment
// then needed and ensure the codes are not listed twice.
bool noneSeen = false;
for (std::vector<string>::const_iterator l = lang.begin();
l != lang.end(); ++l) {
if (*l == "environment") {
for (std::vector<string>::const_iterator e = environment.begin();
e != environment.end(); ++e) {
if (std::find(allCodes.begin(), allCodes.end(), *e) != allCodes.end())
continue;
if (noneSeen == false)
codes.push_back(*e);
allCodes.push_back(*e);
}
continue;
} else if (*l == "none") {
noneSeen = true;
continue;
} else if (std::find(allCodes.begin(), allCodes.end(), *l) != allCodes.end())
continue;
if (noneSeen == false)
codes.push_back(*l);
allCodes.push_back(*l);
}
if (allCodes.empty() == false) {
for (std::vector<string>::const_iterator b = builtin.begin();
b != builtin.end(); ++b)
if (std::find(allCodes.begin(), allCodes.end(), *b) == allCodes.end())
allCodes.push_back(*b);
} else {
// "none" was forced
allCodes.push_back("none");
}
if (All == true)
return allCodes;
else
return codes;
}
/*}}}*/
// checkLanguage - are we interested in the given Language? /*{{{*/
bool Configuration::checkLanguage(std::string Lang, bool const All) {
// the empty Language is always interesting as it is the original
if (Lang.empty() == true)
return true;
// filenames are encoded, so undo this
Lang = SubstVar(Lang, "%5f", "_");
std::vector<std::string> const langs = getLanguages(All, true);
return (std::find(langs.begin(), langs.end(), Lang) != langs.end());
}
/*}}}*/
// getArchitectures - Return Vector of preferred Architectures /*{{{*/
std::vector<std::string> const Configuration::getArchitectures(bool const &Cached) {
using std::string;
std::vector<string> static archs;
if (likely(Cached == true) && archs.empty() == false)
return archs;
string const arch = _config->Find("APT::Architecture");
archs = _config->FindVector("APT::Architectures");
if (archs.empty() == true && _system != nullptr)
archs = _system->ArchitecturesSupported();
if (archs.empty() == true ||
std::find(archs.begin(), archs.end(), arch) == archs.end())
archs.insert(archs.begin(), arch);
// erase duplicates and empty strings
for (std::vector<string>::reverse_iterator a = archs.rbegin();
a != archs.rend(); ++a) {
if (a->empty() == true || std::find(a + 1, archs.rend(), *a) != archs.rend())
archs.erase(a.base()-1);
if (a == archs.rend())
break;
}
return archs;
}
/*}}}*/
// checkArchitecture - are we interested in the given Architecture? /*{{{*/
bool Configuration::checkArchitecture(std::string const &Arch) {
if (Arch == "all")
return true;
std::vector<std::string> const archs = getArchitectures(true);
return (std::find(archs.begin(), archs.end(), Arch) != archs.end());
}
/*}}}*/
// getCompressors - Return Vector of usealbe compressors /*{{{*/
// ---------------------------------------------------------------------
/* return a vector of compressors used by apt-ftparchive in the
multicompress functionality or to detect data.tar files */
std::vector<APT::Configuration::Compressor>
const Configuration::getCompressors(bool const Cached) {
static std::vector<APT::Configuration::Compressor> compressors;
if (compressors.empty() == false) {
if (Cached == true)
return compressors;
else
compressors.clear();
}
setDefaultConfigurationForCompressors();
std::vector<std::string> CompressorsDone;
# define APT_ADD_COMPRESSOR(NAME, EXT, BINARY, ARG, DEARG, COST) \
{ CompressorsDone.push_back(NAME); compressors.emplace_back(NAME, EXT, BINARY, ARG, DEARG, COST); }
APT_ADD_COMPRESSOR(".", "", "", nullptr, nullptr, 0)
if (_config->Exists("Dir::Bin::zstd") == false || FileExists(_config->Find("Dir::Bin::zstd")) == true)
APT_ADD_COMPRESSOR("zstd", ".zst", "zstd", "-19", "-d", 60)
#ifdef HAVE_ZSTD
else
APT_ADD_COMPRESSOR("zstd", ".zst", "false", nullptr, nullptr, 60)
#endif
if (_config->Exists("Dir::Bin::lz4") == false || FileExists(_config->Find("Dir::Bin::lz4")) == true)
APT_ADD_COMPRESSOR("lz4",".lz4","lz4","-1","-d",50)
#ifdef HAVE_LZ4
else
APT_ADD_COMPRESSOR("lz4",".lz4","false", nullptr, nullptr, 50)
#endif
if (_config->Exists("Dir::Bin::gzip") == false || FileExists(_config->Find("Dir::Bin::gzip")) == true)
APT_ADD_COMPRESSOR("gzip",".gz","gzip","-6n","-d",100)
#ifdef HAVE_ZLIB
else
APT_ADD_COMPRESSOR("gzip",".gz","false", nullptr, nullptr, 100)
#endif
if (_config->Exists("Dir::Bin::xz") == false || FileExists(_config->Find("Dir::Bin::xz")) == true)
APT_ADD_COMPRESSOR("xz",".xz","xz","-6","-d",200)
#ifdef HAVE_LZMA
else
APT_ADD_COMPRESSOR("xz",".xz","false", nullptr, nullptr, 200)
#endif
if (_config->Exists("Dir::Bin::bzip2") == false || FileExists(_config->Find("Dir::Bin::bzip2")) == true)
APT_ADD_COMPRESSOR("bzip2",".bz2","bzip2","-6","-d",300)
#ifdef HAVE_BZ2
else
APT_ADD_COMPRESSOR("bzip2",".bz2","false", nullptr, nullptr, 300)
#endif
if (_config->Exists("Dir::Bin::lzma") == false || FileExists(_config->Find("Dir::Bin::lzma")) == true)
APT_ADD_COMPRESSOR("lzma",".lzma","lzma","-6","-d",400)
#ifdef HAVE_LZMA
else
APT_ADD_COMPRESSOR("lzma",".lzma","false", nullptr, nullptr, 400)
#endif
std::vector<std::string> const comp = _config->FindVector("APT::Compressor", "", true);
for (auto const &c: comp)
{
if (c.empty() || std::find(CompressorsDone.begin(), CompressorsDone.end(), c) != CompressorsDone.end())
continue;
compressors.push_back(Compressor(c.c_str(), std::string(".").append(c).c_str(), c.c_str(), nullptr, nullptr, 1000));
}
return compressors;
}
/*}}}*/
// getCompressorExtensions - supported data.tar extensions /*{{{*/
// ---------------------------------------------------------------------
/* */
std::vector<std::string> const Configuration::getCompressorExtensions() {
std::vector<APT::Configuration::Compressor> const compressors = getCompressors();
std::vector<std::string> ext;
for (std::vector<APT::Configuration::Compressor>::const_iterator c = compressors.begin();
c != compressors.end(); ++c)
if (c->Extension.empty() == false && c->Extension != ".")
ext.push_back(c->Extension);
return ext;
}
/*}}}*/
// Compressor constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
Configuration::Compressor::Compressor(char const *name, char const *extension,
char const *binary,
char const *compressArg, char const *uncompressArg,
unsigned short const cost) {
std::string const config = std::string("APT::Compressor::").append(name).append("::");
Name = _config->Find(std::string(config).append("Name"), name);
Extension = _config->Find(std::string(config).append("Extension"), extension);
Binary = _config->Find(std::string(config).append("Binary"), binary);
Cost = _config->FindI(std::string(config).append("Cost"), cost);
std::string const compConf = std::string(config).append("CompressArg");
if (_config->Exists(compConf) == true)
CompressArgs = _config->FindVector(compConf);
else if (compressArg != NULL)
CompressArgs.push_back(compressArg);
std::string const uncompConf = std::string(config).append("UncompressArg");
if (_config->Exists(uncompConf) == true)
UncompressArgs = _config->FindVector(uncompConf);
else if (uncompressArg != NULL)
UncompressArgs.push_back(uncompressArg);
}
/*}}}*/
// getBuildProfiles - return a vector of enabled build profiles /*{{{*/
std::vector<std::string> const Configuration::getBuildProfiles() {
// order is: override value (~= commandline), environment variable, list (~= config file)
std::string profiles_env = getenv("DEB_BUILD_PROFILES") == 0 ? "" : getenv("DEB_BUILD_PROFILES");
if (profiles_env.empty() == false) {
profiles_env = SubstVar(profiles_env, " ", ",");
std::string const bp = _config->Find("APT::Build-Profiles");
_config->Clear("APT::Build-Profiles");
if (bp.empty() == false)
_config->Set("APT::Build-Profiles", bp);
}
return _config->FindVector("APT::Build-Profiles", profiles_env);
}
std::string const Configuration::getBuildProfilesString() {
std::vector<std::string> profiles = getBuildProfiles();
if (profiles.empty() == true)
return "";
std::vector<std::string>::const_iterator p = profiles.begin();
std::string list = *p;
for (++p; p != profiles.end(); ++p)
list.append(",").append(*p);
return list;
}
/*}}}*/
}

129
apt-pkg/aptconfiguration.h Normal file
View File

@ -0,0 +1,129 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/** \class APT::Configuration
* \brief Provide access methods to various configuration settings
*
* This class and their methods providing a layer around the usual access
* methods with _config to ensure that settings are correct and to be able
* to set defaults without the need to recheck it in every method again.
*/
/*}}}*/
#ifndef APT_CONFIGURATION_H
#define APT_CONFIGURATION_H
// Include Files /*{{{*/
#include <apt-pkg/macros.h>
#include <limits>
#include <string>
#include <vector>
/*}}}*/
namespace APT {
namespace Configuration { /*{{{*/
/** \brief Returns a vector of usable Compression Types
*
* Files can be compressed in various ways to decrease the size of the
* download. Therefore the Acquiremethods support a few compression types
* and some archives provide also a few different types. This option
* group exists to give the user the choice to prefer one type over the
* other (some compression types are very resource intensive - great if you
* have a limited download, bad if you have a really lowpowered hardware.)
*
* This method ensures that the defaults are set and checks at runtime
* if the type can be used. E.g. the current default is to prefer bzip2
* over lzma and gz - if the bzip2 binary is not available it has not much
* sense in downloading the bz2 file, therefore we will not return bz2 as
* a usable compression type. The availability is checked with the settings
* in the Dir::Bin group.
*
* \param Cached saves the result so we need to calculated it only once
* this parameter should only be used for testing purposes.
*
* \return a vector of the compression types in the preferred usage order
*/
APT_PUBLIC std::vector<std::string> const getCompressionTypes(bool const &Cached = true);
/** \brief Returns a vector of Language Codes
*
* Languages can be defined with their two or five chars long code.
* This methods handles the various ways to set the preferred codes,
* honors the environment and ensures that the codes are not listed twice.
*
* The special word "environment" will be replaced with the long and the short
* code of the local settings and it will be insured that this will not add
* duplicates. So in an german local the setting "environment, de_DE, en, de"
* will result in "de_DE, de, en".
*
* Another special word is "none" which separates the preferred from all codes
* in this setting. So setting and method can be used to get codes the user want
* to see or to get all language codes APT (should) have Translations available.
*
* \param All return all codes or only codes for languages we want to use
* \param Cached saves the result so we need to calculated it only once
* this parameter should only be used for testing purposes.
* \param Locale don't get the locale from the system but use this one instead
* this parameter should only be used for testing purposes.
*
* \return a vector of (all) Language Codes in the preferred usage order
*/
APT_PUBLIC std::vector<std::string> const getLanguages(bool const &All = false,
bool const &Cached = true, char const ** const Locale = 0);
/** \brief Are we interested in the given Language?
*
* \param Lang is the language we want to check
* \param All defines if we check against all codes or only against used codes
* \return true if we are interested, false otherwise
*/
APT_PUBLIC bool checkLanguage(std::string Lang, bool const All = false);
/** \brief Returns a vector of Architectures we support
*
* \param Cached saves the result so we need to calculated it only once
* this parameter should only be used for testing purposes.
*
* \return a vector of Architectures in preferred order
*/
APT_PUBLIC std::vector<std::string> const getArchitectures(bool const &Cached = true);
/** \brief Are we interested in the given Architecture?
*
* \param Arch we want to check
* \return true if we are interested, false otherwise
*/
APT_PUBLIC bool checkArchitecture(std::string const &Arch);
/** \brief Representation of supported compressors */
struct APT_PUBLIC Compressor {
std::string Name;
std::string Extension;
std::string Binary;
std::vector<std::string> CompressArgs;
std::vector<std::string> UncompressArgs;
unsigned short Cost;
Compressor(char const *name, char const *extension, char const *binary,
char const *compressArg, char const *uncompressArg,
unsigned short const cost);
Compressor() : Cost(std::numeric_limits<unsigned short>::max()) {};
};
/** \brief Return a vector of Compressors supported for data.tar's
*
* \param Cached saves the result so we need to calculated it only once
* this parameter should only be used for testing purposes.
*
* \return a vector of Compressors
*/
APT_PUBLIC std::vector<Compressor> const getCompressors(bool const Cached = true);
/** \brief Return a vector of extensions supported for data.tar's */
APT_PUBLIC std::vector<std::string> const getCompressorExtensions();
/** \return Return a vector of enabled build profile specifications */
APT_PUBLIC std::vector<std::string> const getBuildProfiles();
/** \return Return a comma-separated list of enabled build profile specifications */
APT_PUBLIC std::string const getBuildProfilesString();
/*}}}*/
}
/*}}}*/
}
#endif

363
apt-pkg/cachefile.cc Normal file
View File

@ -0,0 +1,363 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
CacheFile - Simple wrapper class for opening, generating and whatnot
This class implements a simple 2 line mechanism to open various sorts
of caches. It can operate as root, as not root, show progress and so on,
it transparently handles everything necessary.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/cachefile.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/depcache.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/indexfile.h>
#include <apt-pkg/mmap.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/pkgcachegen.h>
#include <apt-pkg/pkgsystem.h>
#include <apt-pkg/policy.h>
#include <apt-pkg/progress.h>
#include <apt-pkg/sourcelist.h>
#include <memory>
#include <string>
#include <vector>
#include <string.h>
#include <unistd.h>
#include <apti18n.h>
/*}}}*/
struct pkgCacheFile::Private
{
bool WithLock = false;
};
// CacheFile::CacheFile - Constructor /*{{{*/
pkgCacheFile::pkgCacheFile() : d(new Private()), ExternOwner(false), Map(NULL), Cache(NULL),
DCache(NULL), SrcList(NULL), Policy(NULL)
{
}
pkgCacheFile::pkgCacheFile(pkgDepCache * const Owner) : d(new Private()), ExternOwner(true),
Map(&Owner->GetCache().GetMap()), Cache(&Owner->GetCache()),
DCache(Owner), SrcList(NULL), Policy(NULL)
{
}
/*}}}*/
// CacheFile::~CacheFile - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
pkgCacheFile::~pkgCacheFile()
{
if (ExternOwner == false)
{
delete DCache;
delete Cache;
delete Map;
}
delete Policy;
delete SrcList;
if (d->WithLock == true)
_system->UnLock(true);
delete d;
}
/*}}}*/
// CacheFile::BuildCaches - Open and build the cache files /*{{{*/
class APT_HIDDEN ScopedErrorMerge {
public:
ScopedErrorMerge() { _error->PushToStack(); }
~ScopedErrorMerge() { _error->MergeWithStack(); }
};
bool pkgCacheFile::BuildCaches(OpProgress *Progress, bool WithLock)
{
std::unique_ptr<pkgCache> Cache;
std::unique_ptr<MMap> Map;
if (this->Cache != NULL)
return true;
ScopedErrorMerge sem;
if (_config->FindB("pkgCacheFile::Generate", true) == false)
{
FileFd file(_config->FindFile("Dir::Cache::pkgcache"), FileFd::ReadOnly);
if (file.IsOpen() == false || file.Failed())
return false;
Map.reset(new MMap(file, MMap::Public|MMap::ReadOnly));
if (unlikely(Map->validData() == false))
return false;
Cache.reset(new pkgCache(Map.get()));
if (_error->PendingError() == true)
return false;
this->Cache = Cache.release();
this->Map = Map.release();
return true;
}
if (WithLock == true)
{
if (_system->Lock(Progress) == false)
return false;
d->WithLock = true;
}
if (_error->PendingError() == true)
return false;
if (BuildSourceList(Progress) == false)
return false;
// Read the caches
MMap *TmpMap = nullptr;
pkgCache *TmpCache = nullptr;
bool Res = pkgCacheGenerator::MakeStatusCache(*SrcList,Progress,&TmpMap, &TmpCache, true);
Map.reset(TmpMap);
Cache.reset(TmpCache);
if (Progress != NULL)
Progress->Done();
if (Res == false)
return _error->Error(_("The package lists or status file could not be parsed or opened."));
/* This sux, remove it someday */
if (_error->PendingError() == true)
_error->Warning(_("You may want to run apt-get update to correct these problems"));
if (Cache == nullptr)
Cache.reset(new pkgCache(Map.get()));
if (_error->PendingError() == true)
return false;
this->Map = Map.release();
this->Cache = Cache.release();
return true;
}
/*}}}*/
// CacheFile::BuildSourceList - Open and build all relevant sources.list/*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgCacheFile::BuildSourceList(OpProgress * /*Progress*/)
{
std::unique_ptr<pkgSourceList> SrcList;
if (this->SrcList != NULL)
return true;
SrcList.reset(new pkgSourceList());
if (SrcList->ReadMainList() == false)
return _error->Error(_("The list of sources could not be read."));
this->SrcList = SrcList.release();
return true;
}
/*}}}*/
// CacheFile::BuildPolicy - Open and build all relevant preferences /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgCacheFile::BuildPolicy(OpProgress * /*Progress*/)
{
std::unique_ptr<pkgPolicy> Policy;
if (this->Policy != NULL)
return true;
Policy.reset(new pkgPolicy(Cache));
if (_error->PendingError() == true)
return false;
ReadPinFile(*Policy);
ReadPinDir(*Policy);
this->Policy = Policy.release();
return _error->PendingError() == false;
}
/*}}}*/
// CacheFile::BuildDepCache - Open and build the dependency cache /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgCacheFile::BuildDepCache(OpProgress *Progress)
{
if (BuildCaches(Progress, false) == false)
return false;
std::unique_ptr<pkgDepCache> DCache;
if (this->DCache != NULL)
return true;
if (BuildPolicy(Progress) == false)
return false;
DCache.reset(new pkgDepCache(Cache,Policy));
if (_error->PendingError() == true)
return false;
if (DCache->Init(Progress) == false)
return false;
this->DCache = DCache.release();
return true;
}
/*}}}*/
// CacheFile::Open - Open the cache files, creating if necessary /*{{{*/
// ---------------------------------------------------------------------
/* */
bool pkgCacheFile::Open(OpProgress *Progress, bool WithLock)
{
if (BuildCaches(Progress,WithLock) == false)
return false;
if (BuildPolicy(Progress) == false)
return false;
if (BuildDepCache(Progress) == false)
return false;
if (Progress != NULL)
Progress->Done();
if (_error->PendingError() == true)
return false;
return true;
}
/*}}}*/
bool pkgCacheFile::AddIndexFile(pkgIndexFile * const File) /*{{{*/
{
if (SrcList == NULL)
if (BuildSourceList() == false)
return false;
SrcList->AddVolatileFile(File);
if (Cache == nullptr || File->HasPackages() == false || File->Exists() == false)
return true;
if (File->FindInCache(*Cache).end() == false)
return _error->Warning("Duplicate sources.list entry %s",
File->Describe().c_str());
if (ExternOwner == false)
{
delete DCache;
delete Cache;
}
delete Policy;
DCache = NULL;
Policy = NULL;
Cache = NULL;
if (ExternOwner == false)
{
// a dynamic mmap means that we have build at least parts of the cache
// in memory which we might or might not have written to disk.
// Throwing away would therefore be a very costly operation we want to avoid
DynamicMMap * dynmmap = dynamic_cast<DynamicMMap*>(Map);
if (dynmmap != nullptr)
{
{
pkgCacheGenerator Gen(dynmmap, nullptr);
if (Gen.Start() == false || File->Merge(Gen, nullptr) == false)
return false;
}
Cache = new pkgCache(Map);
if (_error->PendingError() == true) {
delete Cache;
Cache = nullptr;
return false;
}
return true;
}
else
{
delete Map;
Map = NULL;
}
}
else
{
ExternOwner = false;
Map = NULL;
}
_system->UnLock(true);
return true;
}
/*}}}*/
// CacheFile::RemoveCaches - remove all cache files from disk /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgCacheFile::RemoveCaches()
{
std::string const pkgcache = _config->FindFile("Dir::cache::pkgcache");
std::string const srcpkgcache = _config->FindFile("Dir::cache::srcpkgcache");
if (pkgcache.empty() == false && RealFileExists(pkgcache) == true)
RemoveFile("RemoveCaches", pkgcache);
if (srcpkgcache.empty() == false && RealFileExists(srcpkgcache) == true)
RemoveFile("RemoveCaches", srcpkgcache);
if (pkgcache.empty() == false)
{
std::string cachedir = flNotFile(pkgcache);
std::string cachefile = flNotDir(pkgcache);
if (cachedir.empty() != true && cachefile.empty() != true && DirectoryExists(cachedir) == true)
{
cachefile.append(".");
std::vector<std::string> caches = GetListOfFilesInDir(cachedir, false);
for (std::vector<std::string>::const_iterator file = caches.begin(); file != caches.end(); ++file)
{
std::string nuke = flNotDir(*file);
if (strncmp(cachefile.c_str(), nuke.c_str(), cachefile.length()) != 0)
continue;
RemoveFile("RemoveCaches", *file);
}
}
}
if (srcpkgcache.empty() == true)
return;
std::string cachedir = flNotFile(srcpkgcache);
std::string cachefile = flNotDir(srcpkgcache);
if (cachedir.empty() == true || cachefile.empty() == true || DirectoryExists(cachedir) == false)
return;
cachefile.append(".");
std::vector<std::string> caches = GetListOfFilesInDir(cachedir, false);
for (std::vector<std::string>::const_iterator file = caches.begin(); file != caches.end(); ++file)
{
std::string nuke = flNotDir(*file);
if (strncmp(cachefile.c_str(), nuke.c_str(), cachefile.length()) != 0)
continue;
RemoveFile("RemoveCaches", *file);
}
}
/*}}}*/
// CacheFile::Close - close the cache files /*{{{*/
// ---------------------------------------------------------------------
/* */
void pkgCacheFile::Close()
{
if (ExternOwner == false)
{
delete DCache;
delete Cache;
delete Map;
}
else
ExternOwner = false;
delete Policy;
delete SrcList;
if (d->WithLock == true)
{
_system->UnLock(true);
d->WithLock = false;
}
Map = NULL;
DCache = NULL;
Policy = NULL;
Cache = NULL;
SrcList = NULL;
}
/*}}}*/

88
apt-pkg/cachefile.h Normal file
View File

@ -0,0 +1,88 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
CacheFile - Simple wrapper class for opening, generating and whatnot
This class implements a simple 2 line mechanism to open various sorts
of caches. It can operate as root, as not root, show progress and so on,
it transparently handles everything necessary.
This means it can rebuild caches from the source list and instantiates
and prepares the standard policy mechanism.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_CACHEFILE_H
#define PKGLIB_CACHEFILE_H
#include <stddef.h>
#include <apt-pkg/depcache.h>
#include <apt-pkg/macros.h>
#include <apt-pkg/pkgcache.h>
class MMap;
class pkgPolicy;
class pkgSourceList;
class pkgIndexFile;
class OpProgress;
class APT_PUBLIC pkgCacheFile
{
struct Private;
/** \brief dpointer placeholder (for later in case we need it) */
Private *const d;
bool ExternOwner;
protected:
MMap *Map;
pkgCache *Cache;
pkgDepCache *DCache;
pkgSourceList *SrcList;
public:
pkgPolicy *Policy;
// We look pretty much exactly like a pointer to a dep cache
inline operator pkgCache &() const {return *Cache;};
inline operator pkgCache *() const {return Cache;};
inline operator pkgDepCache &() const {return *DCache;};
inline operator pkgDepCache *() const {return DCache;};
inline operator pkgPolicy &() const {return *Policy;};
inline operator pkgPolicy *() const {return Policy;};
inline operator pkgSourceList &() const {return *SrcList;};
inline operator pkgSourceList *() const {return SrcList;};
inline pkgDepCache *operator ->() const {return DCache;};
inline pkgDepCache &operator *() const {return *DCache;};
inline pkgDepCache::StateCache &operator [](pkgCache::PkgIterator const &I) const {return (*DCache)[I];};
inline unsigned char &operator [](pkgCache::DepIterator const &I) const {return (*DCache)[I];};
bool BuildCaches(OpProgress *Progress = NULL,bool WithLock = true);
bool BuildSourceList(OpProgress *Progress = NULL);
bool BuildPolicy(OpProgress *Progress = NULL);
bool BuildDepCache(OpProgress *Progress = NULL);
bool Open(OpProgress *Progress = NULL, bool WithLock = true);
inline bool ReadOnlyOpen(OpProgress *Progress = NULL) { return Open(Progress, false); };
static void RemoveCaches();
void Close();
bool AddIndexFile(pkgIndexFile * const File);
inline pkgCache* GetPkgCache() { BuildCaches(NULL, false); return Cache; };
inline pkgDepCache* GetDepCache() { BuildDepCache(); return DCache; };
inline pkgPolicy* GetPolicy() { BuildPolicy(); return Policy; };
inline pkgSourceList* GetSourceList() { BuildSourceList(); return SrcList; };
inline bool IsPkgCacheBuilt() const { return (Cache != NULL); };
inline bool IsDepCacheBuilt() const { return (DCache != NULL); };
inline bool IsPolicyBuilt() const { return (Policy != NULL); };
inline bool IsSrcListBuilt() const { return (SrcList != NULL); };
pkgCacheFile();
explicit pkgCacheFile(pkgDepCache * const Owner);
virtual ~pkgCacheFile();
};
#endif

View File

@ -0,0 +1,549 @@
/*
* cachefilter-patterns.cc - Parser for aptitude-style patterns
*
* Copyright (c) 2019 Canonical Ltd
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <config.h>
#include <apt-pkg/cachefilter-patterns.h>
#include <apti18n.h>
namespace APT
{
namespace Internal
{
static const constexpr struct
{
APT::StringView shortName;
APT::StringView longName;
bool takesArgument;
} shortPatterns[] = {
{"r"_sv, "?architecture"_sv, true},
{"A"_sv, "?archive"_sv, true},
{"M"_sv, "?automatic"_sv, false},
{"b"_sv, "?broken"_sv, false},
{"c"_sv, "?config-files"_sv, false},
{"E"_sv, "?essential"_sv, false},
{"F"_sv, "?false"_sv, false},
{"g"_sv, "?garbage"_sv, false},
{"i"_sv, "?installed"_sv, false},
{"n"_sv, "?name"_sv, true},
{"o"_sv, "?obsolete"_sv, false},
{"O"_sv, "?origin"_sv, true},
{"s"_sv, "?section"_sv, true},
{"e"_sv, "?source-package"_sv, true},
{"T"_sv, "?true"_sv, false},
{"U"_sv, "?upgradable"_sv, false},
{"V"_sv, "?version"_sv, true},
{"v"_sv, "?virtual"_sv, false},
};
template <class... Args>
std::string rstrprintf(Args... args)
{
std::string str;
strprintf(str, std::forward<Args>(args)...);
return str;
}
// Parse a complete pattern, make sure it's the entire input
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseTop()
{
skipSpace();
auto node = parse();
skipSpace();
if (node == nullptr)
throw Error{Node{0, sentence.size()}, "Expected pattern"};
if (node->end != sentence.size())
throw Error{Node{node->end, sentence.size()}, "Expected end of file"};
return node;
}
// Parse any pattern
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parse()
{
return parseOr();
}
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseOr()
{
auto start = state.offset;
std::vector<std::unique_ptr<PatternTreeParser::Node>> nodes;
auto firstNode = parseAnd();
if (firstNode == nullptr)
return nullptr;
nodes.push_back(std::move(firstNode));
for (skipSpace(); sentence[state.offset] == '|'; skipSpace())
{
state.offset++;
skipSpace();
auto node = parseAnd();
if (node == nullptr)
throw Error{Node{state.offset, sentence.size()}, "Expected pattern after |"};
nodes.push_back(std::move(node));
}
if (nodes.size() == 0)
return nullptr;
if (nodes.size() == 1)
return std::move(nodes[0]);
auto node = std::make_unique<PatternNode>();
node->start = start;
node->end = nodes[nodes.size() - 1]->end;
node->term = "?or";
node->arguments = std::move(nodes);
node->haveArgumentList = true;
return node;
}
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseAnd()
{
auto start = state.offset;
std::vector<std::unique_ptr<PatternTreeParser::Node>> nodes;
for (skipSpace(); state.offset < sentence.size(); skipSpace())
{
auto node = parseUnary();
if (node == nullptr)
break;
nodes.push_back(std::move(node));
}
if (nodes.size() == 0)
return nullptr;
if (nodes.size() == 1)
return std::move(nodes[0]);
auto node = std::make_unique<PatternNode>();
node->start = start;
node->end = nodes[nodes.size() - 1]->end;
node->term = "?and";
node->arguments = std::move(nodes);
node->haveArgumentList = true;
return node;
}
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseUnary()
{
if (sentence[state.offset] != '!')
return parsePrimary();
auto start = ++state.offset;
auto primary = parsePrimary();
if (primary == nullptr)
throw Error{Node{start, sentence.size()}, "Expected pattern"};
auto node = std::make_unique<PatternNode>();
node->start = start;
node->end = primary->end;
node->term = "?not";
node->arguments.push_back(std::move(primary));
node->haveArgumentList = true;
return node;
}
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parsePrimary()
{
std::unique_ptr<Node> node;
if ((node = parseShortPattern()) != nullptr)
return node;
if ((node = parsePattern()) != nullptr)
return node;
if ((node = parseGroup()) != nullptr)
return node;
return nullptr;
}
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseGroup()
{
if (sentence[state.offset] != '(')
return nullptr;
auto start = state.offset++;
skipSpace();
auto node = parse();
if (node == nullptr)
throw Error{Node{state.offset, sentence.size()},
"Expected pattern after '('"};
skipSpace();
if (sentence[state.offset] != ')')
throw Error{Node{state.offset, sentence.size()},
"Expected closing parenthesis"};
auto end = ++state.offset;
node->start = start;
node->end = end;
return node;
}
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseArgument(bool shrt)
{
std::unique_ptr<Node> node;
if ((node = parseQuotedWord()) != nullptr)
return node;
if ((node = parseWord(shrt)) != nullptr)
return node;
if ((node = parse()) != nullptr)
return node;
throw Error{Node{state.offset, sentence.size()},
"Expected pattern, quoted word, or word"};
}
// Parse a short pattern
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseShortPattern()
{
if (sentence[state.offset] != '~')
return nullptr;
for (auto &sp : shortPatterns)
{
if (sentence.substr(state.offset + 1, sp.shortName.size()) != sp.shortName)
continue;
auto node = std::make_unique<PatternNode>();
node->end = node->start = state.offset;
node->term = sp.longName;
state.offset += sp.shortName.size() + 1;
if (sp.takesArgument)
{
node->arguments.push_back(parseArgument(true));
node->haveArgumentList = true;
}
node->end = state.offset;
return node;
}
throw Error{Node{state.offset, sentence.size()}, "Unknown short pattern"};
}
// Parse a list pattern (or function call pattern)
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parsePattern()
{
static constexpr auto CHARS = ("0123456789"
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"-"_sv);
if (sentence[state.offset] != '?')
return nullptr;
auto node = std::make_unique<PatternNode>();
node->end = node->start = state.offset;
state.offset++;
while (CHARS.find(sentence[state.offset]) != APT::StringView::npos)
{
++state.offset;
}
node->term = sentence.substr(node->start, state.offset - node->start);
if (node->term.size() <= 1)
throw Error{*node, "Pattern must have a term/name"};
node->end = skipSpace();
// We don't have any arguments, return node;
if (sentence[state.offset] != '(')
return node;
node->end = ++state.offset;
skipSpace();
node->haveArgumentList = true;
// Empty argument list, return
if (sentence[state.offset] == ')')
{
node->end = ++state.offset;
return node;
}
node->arguments.push_back(parseArgument(false));
skipSpace();
while (sentence[state.offset] == ',')
{
++state.offset;
skipSpace();
// This was a trailing comma - allow it and break the loop
if (sentence[state.offset] == ')')
break;
node->arguments.push_back(parseArgument(false));
skipSpace();
}
node->end = state.offset;
if (sentence[state.offset] != ')')
throw Error{node->arguments.empty() ? *node : *node->arguments[node->arguments.size() - 1],
rstrprintf("Expected closing parenthesis or comma after last argument, received %c", sentence[state.offset])};
node->end = ++state.offset;
return node;
}
// Parse a quoted word atom
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseQuotedWord()
{
if (sentence[state.offset] != '"')
return nullptr;
auto node = std::make_unique<WordNode>();
node->start = state.offset;
// Eat beginning of string
state.offset++;
while (sentence[state.offset] != '"' && sentence[state.offset] != '\0')
state.offset++;
// End of string
if (sentence[state.offset] != '"')
throw Error{*node, "Could not find end of string"};
state.offset++;
node->end = state.offset;
node->word = sentence.substr(node->start + 1, node->end - node->start - 2);
return node;
}
// Parse a bare word atom
std::unique_ptr<PatternTreeParser::Node> PatternTreeParser::parseWord(bool shrt)
{
static const constexpr auto DISALLOWED_START = "!?~|,() \0"_sv;
static const constexpr auto DISALLOWED_LONG = "|,()\0"_sv;
static const constexpr auto DISALLOWED_SHRT = "|,() ?\0"_sv;
const auto DISALLOWED = shrt ? DISALLOWED_SHRT : DISALLOWED_LONG;
if (DISALLOWED_START.find(sentence[state.offset]) != APT::StringView::npos)
return nullptr;
auto node = std::make_unique<WordNode>();
node->start = state.offset;
while (DISALLOWED.find(sentence[state.offset]) == APT::StringView::npos)
state.offset++;
node->end = state.offset;
node->word = sentence.substr(node->start, node->end - node->start);
return node;
}
// Rendering of the tree in JSON for debugging
std::ostream &PatternTreeParser::PatternNode::render(std::ostream &os)
{
os << term.to_string();
if (haveArgumentList)
{
os << "(";
for (auto &node : arguments)
node->render(os) << ",";
os << ")";
}
return os;
}
std::ostream &PatternTreeParser::WordNode::render(std::ostream &os)
{
return quoted ? os << '"' << word.to_string() << '"' : os << word.to_string();
}
std::nullptr_t PatternTreeParser::Node::error(std::string message)
{
throw Error{*this, message};
}
bool PatternTreeParser::PatternNode::matches(APT::StringView name, int min, int max)
{
if (name != term)
return false;
if (max != 0 && !haveArgumentList)
error(rstrprintf("%s expects an argument list", term.to_string().c_str()));
if (max == 0 && haveArgumentList)
error(rstrprintf("%s does not expect an argument list", term.to_string().c_str()));
if (min >= 0 && min == max && (arguments.size() != size_t(min)))
error(rstrprintf("%s expects %d arguments, but received %d arguments", term.to_string().c_str(), min, arguments.size()));
if (min >= 0 && arguments.size() < size_t(min))
error(rstrprintf("%s expects at least %d arguments, but received %d arguments", term.to_string().c_str(), min, arguments.size()));
if (max >= 0 && arguments.size() > size_t(max))
error(rstrprintf("%s expects at most %d arguments, but received %d arguments", term.to_string().c_str(), max, arguments.size()));
return true;
}
std::unique_ptr<APT::CacheFilter::Matcher> PatternParser::aPattern(std::unique_ptr<PatternTreeParser::Node> &nodeP)
{
assert(nodeP != nullptr);
auto node = dynamic_cast<PatternTreeParser::PatternNode *>(nodeP.get());
if (node == nullptr)
nodeP->error("Expected a pattern");
if (node->matches("?architecture", 1, 1))
return std::make_unique<APT::CacheFilter::PackageArchitectureMatchesSpecification>(aWord(node->arguments[0]));
if (node->matches("?archive", 1, 1))
return std::make_unique<Patterns::VersionIsArchive>(aWord(node->arguments[0]));
if (node->matches("?all-versions", 1, 1))
return std::make_unique<Patterns::VersionIsAllVersions>(aPattern(node->arguments[0]));
if (node->matches("?any-version", 1, 1))
return std::make_unique<Patterns::VersionIsAnyVersion>(aPattern(node->arguments[0]));
if (node->matches("?automatic", 0, 0))
return std::make_unique<Patterns::PackageIsAutomatic>(file);
if (node->matches("?broken", 0, 0))
return std::make_unique<Patterns::PackageIsBroken>(file);
if (node->matches("?config-files", 0, 0))
return std::make_unique<Patterns::PackageIsConfigFiles>();
if (node->matches("?essential", 0, 0))
return std::make_unique<Patterns::PackageIsEssential>();
if (node->matches("?exact-name", 1, 1))
return std::make_unique<Patterns::PackageHasExactName>(aWord(node->arguments[0]));
if (node->matches("?false", 0, 0))
return std::make_unique<APT::CacheFilter::FalseMatcher>();
if (node->matches("?garbage", 0, 0))
return std::make_unique<Patterns::PackageIsGarbage>(file);
if (node->matches("?installed", 0, 0))
return std::make_unique<Patterns::PackageIsInstalled>(file);
if (node->matches("?name", 1, 1))
return std::make_unique<APT::CacheFilter::PackageNameMatchesRegEx>(aWord(node->arguments[0]));
if (node->matches("?not", 1, 1))
return std::make_unique<APT::CacheFilter::NOTMatcher>(aPattern(node->arguments[0]).release());
if (node->matches("?obsolete", 0, 0))
return std::make_unique<Patterns::PackageIsObsolete>();
if (node->matches("?origin", 1, 1))
return std::make_unique<Patterns::VersionIsOrigin>(aWord(node->arguments[0]));
if (node->matches("?section", 1, 1))
return std::make_unique<Patterns::VersionIsSection>(aWord(node->arguments[0]));
if (node->matches("?source-package", 1, 1))
return std::make_unique<Patterns::VersionIsSourcePackage>(aWord(node->arguments[0]));
if (node->matches("?source-version", 1, 1))
return std::make_unique<Patterns::VersionIsSourceVersion>(aWord(node->arguments[0]));
if (node->matches("?true", 0, 0))
return std::make_unique<APT::CacheFilter::TrueMatcher>();
if (node->matches("?upgradable", 0, 0))
return std::make_unique<Patterns::PackageIsUpgradable>(file);
if (node->matches("?version", 1, 1))
return std::make_unique<Patterns::VersionIsVersion>(aWord(node->arguments[0]));
if (node->matches("?virtual", 0, 0))
return std::make_unique<Patterns::PackageIsVirtual>();
if (node->matches("?x-name-fnmatch", 1, 1))
return std::make_unique<APT::CacheFilter::PackageNameMatchesFnmatch>(aWord(node->arguments[0]));
// Variable argument patterns
if (node->matches("?and", 0, -1) || node->matches("?narrow", 0, -1))
{
auto pattern = std::make_unique<APT::CacheFilter::ANDMatcher>();
for (auto &arg : node->arguments)
pattern->AND(aPattern(arg).release());
if (node->term == "?narrow")
return std::make_unique<Patterns::VersionIsAnyVersion>(std::move(pattern));
return pattern;
}
if (node->matches("?or", 0, -1))
{
auto pattern = std::make_unique<APT::CacheFilter::ORMatcher>();
for (auto &arg : node->arguments)
pattern->OR(aPattern(arg).release());
return pattern;
}
node->error(rstrprintf("Unrecognized pattern '%s'", node->term.to_string().c_str()));
return nullptr;
}
std::string PatternParser::aWord(std::unique_ptr<PatternTreeParser::Node> &nodeP)
{
assert(nodeP != nullptr);
auto node = dynamic_cast<PatternTreeParser::WordNode *>(nodeP.get());
if (node == nullptr)
nodeP->error("Expected a word");
return node->word.to_string();
}
namespace Patterns
{
BaseRegexMatcher::BaseRegexMatcher(std::string const &Pattern)
{
pattern = new regex_t;
int const Res = regcomp(pattern, Pattern.c_str(), REG_EXTENDED | REG_ICASE | REG_NOSUB);
if (Res == 0)
return;
delete pattern;
pattern = NULL;
char Error[300];
regerror(Res, pattern, Error, sizeof(Error));
_error->Error(_("Regex compilation error - %s"), Error);
}
bool BaseRegexMatcher::operator()(const char *string)
{
if (unlikely(pattern == nullptr) || string == nullptr)
return false;
else
return regexec(pattern, string, 0, 0, 0) == 0;
}
BaseRegexMatcher::~BaseRegexMatcher()
{
if (pattern == NULL)
return;
regfree(pattern);
delete pattern;
}
} // namespace Patterns
} // namespace Internal
// The bridge into the public world
std::unique_ptr<APT::CacheFilter::Matcher> APT::CacheFilter::ParsePattern(APT::StringView pattern, pkgCacheFile *file)
{
if (file != nullptr && !file->BuildDepCache())
return nullptr;
try
{
auto top = APT::Internal::PatternTreeParser(pattern).parseTop();
APT::Internal::PatternParser parser{file};
return parser.aPattern(top);
}
catch (APT::Internal::PatternTreeParser::Error &e)
{
std::stringstream ss;
ss << "input:" << e.location.start << "-" << e.location.end << ": error: " << e.message << "\n";
ss << pattern.to_string() << "\n";
for (size_t i = 0; i < e.location.start; i++)
ss << " ";
for (size_t i = e.location.start; i < e.location.end; i++)
ss << "^";
ss << "\n";
_error->Error("%s", ss.str().c_str());
return nullptr;
}
}
} // namespace APT

View File

@ -0,0 +1,379 @@
/*
* cachefilter-patterns.h - Pattern parser and additional patterns as matchers
*
* Copyright (c) 2019 Canonical Ltd
*
* SPDX-License-Identifier: GPL-2.0+
*/
#ifndef APT_CACHEFILTER_PATTERNS_H
#define APT_CACHEFILTER_PATTERNS_H
#include <apt-pkg/cachefile.h>
#include <apt-pkg/cachefilter.h>
#include <apt-pkg/error.h>
#include <apt-pkg/string_view.h>
#include <apt-pkg/strutl.h>
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include <assert.h>
#ifndef APT_COMPILING_APT
#error Internal header
#endif
namespace APT
{
namespace Internal
{
/**
* \brief PatternTreeParser parses the given sentence into a parse tree.
*
* The parse tree consists of nodes:
* - Word nodes which contains words or quoted words
* - Patterns, which represent ?foo and ?foo(...) patterns
*/
struct APT_PUBLIC PatternTreeParser
{
struct Node
{
size_t start = 0;
size_t end = 0;
explicit Node(size_t start = 0, size_t end = 0) : start(start), end(end) {}
virtual std::ostream &render(std::ostream &os) { return os; };
std::nullptr_t error(std::string message);
};
struct Error : public std::exception
{
Node location;
std::string message;
Error(Node location, std::string message) : location(location), message(message) {}
const char *what() const throw() override { return message.c_str(); }
};
struct PatternNode : public Node
{
APT::StringView term;
std::vector<std::unique_ptr<Node>> arguments;
bool haveArgumentList = false;
APT_HIDDEN std::ostream &render(std::ostream &stream) override;
APT_HIDDEN bool matches(APT::StringView name, int min, int max);
};
struct WordNode : public Node
{
APT::StringView word;
bool quoted = false;
APT_HIDDEN std::ostream &render(std::ostream &stream) override;
};
struct State
{
size_t offset = 0;
};
APT::StringView sentence;
State state;
PatternTreeParser(APT::StringView sentence) : sentence(sentence){};
off_t skipSpace()
{
while (sentence[state.offset] == ' ' || sentence[state.offset] == '\t' || sentence[state.offset] == '\r' || sentence[state.offset] == '\n')
state.offset++;
return state.offset;
};
/// \brief Parse a complete pattern
///
/// There may not be anything before or after the pattern, except for
/// whitespace.
std::unique_ptr<Node> parseTop();
std::unique_ptr<Node> parse(); // public for test cases only
private:
APT_HIDDEN std::unique_ptr<Node> parseOr();
APT_HIDDEN std::unique_ptr<Node> parseAnd();
APT_HIDDEN std::unique_ptr<Node> parseUnary();
APT_HIDDEN std::unique_ptr<Node> parsePrimary();
APT_HIDDEN std::unique_ptr<Node> parseGroup();
APT_HIDDEN std::unique_ptr<Node> parsePattern();
APT_HIDDEN std::unique_ptr<Node> parseShortPattern();
APT_HIDDEN std::unique_ptr<Node> parseArgument(bool shrt);
APT_HIDDEN std::unique_ptr<Node> parseWord(bool shrt);
APT_HIDDEN std::unique_ptr<Node> parseQuotedWord();
};
/**
* \brief PatternParser parses the given sentence into a parse tree.
*
* The parse tree consists of nodes:
* - Word nodes which contains words or quoted words
* - Patterns, which represent ?foo and ?foo(...) patterns
*/
struct APT_HIDDEN PatternParser
{
pkgCacheFile *file;
std::unique_ptr<APT::CacheFilter::Matcher> aPattern(std::unique_ptr<PatternTreeParser::Node> &nodeP);
std::string aWord(std::unique_ptr<PatternTreeParser::Node> &nodeP);
};
namespace Patterns
{
using namespace APT::CacheFilter;
/** \brief Basic helper class for matching regex */
class BaseRegexMatcher
{
regex_t *pattern;
public:
BaseRegexMatcher(std::string const &string);
~BaseRegexMatcher();
bool operator()(const char *cstring);
bool operator()(std::string const &string)
{
return (*this)(string.c_str());
}
};
struct APT_HIDDEN PackageIsAutomatic : public PackageMatcher
{
pkgCacheFile *Cache;
explicit PackageIsAutomatic(pkgCacheFile *Cache) : Cache(Cache) {}
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
assert(Cache != nullptr);
return ((*Cache)[Pkg].Flags & pkgCache::Flag::Auto) != 0;
}
};
struct APT_HIDDEN PackageIsBroken : public PackageMatcher
{
pkgCacheFile *Cache;
explicit PackageIsBroken(pkgCacheFile *Cache) : Cache(Cache) {}
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
assert(Cache != nullptr);
auto state = (*Cache)[Pkg];
return state.InstBroken() || state.NowBroken();
}
};
struct APT_HIDDEN PackageIsConfigFiles : public PackageMatcher
{
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
return Pkg->CurrentState == pkgCache::State::ConfigFiles;
}
};
struct APT_HIDDEN PackageIsGarbage : public PackageMatcher
{
pkgCacheFile *Cache;
explicit PackageIsGarbage(pkgCacheFile *Cache) : Cache(Cache) {}
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
assert(Cache != nullptr);
return (*Cache)[Pkg].Garbage;
}
};
struct APT_HIDDEN PackageIsEssential : public PackageMatcher
{
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
return (Pkg->Flags & pkgCache::Flag::Essential) != 0;
}
};
struct APT_HIDDEN PackageHasExactName : public PackageMatcher
{
std::string name;
explicit PackageHasExactName(std::string name) : name(name) {}
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
return Pkg.Name() == name;
}
};
struct APT_HIDDEN PackageIsInstalled : public PackageMatcher
{
pkgCacheFile *Cache;
explicit PackageIsInstalled(pkgCacheFile *Cache) : Cache(Cache) {}
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
assert(Cache != nullptr);
return Pkg->CurrentVer != 0;
}
};
struct APT_HIDDEN PackageIsObsolete : public PackageMatcher
{
bool operator()(pkgCache::PkgIterator const &pkg) override
{
// This code can be written without loops, as aptitude does, but it
// is far less readable.
if (pkg.CurrentVer().end())
return false;
// See if there is any version that exists in a repository,
// if so return false
for (auto ver = pkg.VersionList(); !ver.end(); ver++)
{
for (auto file = ver.FileList(); !file.end(); file++)
{
if ((file.File()->Flags & pkgCache::Flag::NotSource) == 0)
return false;
}
}
return true;
}
};
struct APT_HIDDEN PackageIsUpgradable : public PackageMatcher
{
pkgCacheFile *Cache;
explicit PackageIsUpgradable(pkgCacheFile *Cache) : Cache(Cache) {}
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
assert(Cache != nullptr);
return Pkg->CurrentVer != 0 && (*Cache)[Pkg].Upgradable();
}
};
struct APT_HIDDEN PackageIsVirtual : public PackageMatcher
{
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
return Pkg->VersionList == 0;
}
};
struct APT_HIDDEN VersionAnyMatcher : public Matcher
{
bool operator()(pkgCache::GrpIterator const &) override { return false; }
bool operator()(pkgCache::VerIterator const &Ver) override = 0;
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
for (auto Ver = Pkg.VersionList(); not Ver.end(); Ver++)
{
if ((*this)(Ver))
return true;
}
return false;
}
};
struct APT_HIDDEN VersionIsAllVersions : public Matcher
{
std::unique_ptr<APT::CacheFilter::Matcher> base;
VersionIsAllVersions(std::unique_ptr<APT::CacheFilter::Matcher> base) : base(std::move(base)) {}
bool operator()(pkgCache::GrpIterator const &) override { return false; }
bool operator()(pkgCache::VerIterator const &Ver) override
{
return (*base)(Ver);
}
bool operator()(pkgCache::PkgIterator const &Pkg) override
{
for (auto Ver = Pkg.VersionList(); not Ver.end(); Ver++)
{
if (not(*this)(Ver))
return false;
}
return true;
}
};
struct APT_HIDDEN VersionIsAnyVersion : public VersionAnyMatcher
{
std::unique_ptr<APT::CacheFilter::Matcher> base;
VersionIsAnyVersion(std::unique_ptr<APT::CacheFilter::Matcher> base) : base(std::move(base)) {}
bool operator()(pkgCache::VerIterator const &Ver) override
{
return (*base)(Ver);
}
};
struct APT_HIDDEN VersionIsArchive : public VersionAnyMatcher
{
BaseRegexMatcher matcher;
VersionIsArchive(std::string const &pattern) : matcher(pattern) {}
bool operator()(pkgCache::VerIterator const &Ver) override
{
for (auto VF = Ver.FileList(); not VF.end(); VF++)
{
if (VF.File().Archive() && matcher(VF.File().Archive()))
return true;
}
return false;
}
};
struct APT_HIDDEN VersionIsOrigin : public VersionAnyMatcher
{
BaseRegexMatcher matcher;
VersionIsOrigin(std::string const &pattern) : matcher(pattern) {}
bool operator()(pkgCache::VerIterator const &Ver) override
{
for (auto VF = Ver.FileList(); not VF.end(); VF++)
{
if (VF.File().Origin() && matcher(VF.File().Origin()))
return true;
}
return false;
}
};
struct APT_HIDDEN VersionIsSection : public VersionAnyMatcher
{
BaseRegexMatcher matcher;
VersionIsSection(std::string const &pattern) : matcher(pattern) {}
bool operator()(pkgCache::VerIterator const &Ver) override
{
return matcher(Ver.Section());
}
};
struct APT_HIDDEN VersionIsSourcePackage : public VersionAnyMatcher
{
BaseRegexMatcher matcher;
VersionIsSourcePackage(std::string const &pattern) : matcher(pattern) {}
bool operator()(pkgCache::VerIterator const &Ver) override
{
return matcher(Ver.SourcePkgName());
}
};
struct APT_HIDDEN VersionIsSourceVersion : public VersionAnyMatcher
{
BaseRegexMatcher matcher;
VersionIsSourceVersion(std::string const &pattern) : matcher(pattern) {}
bool operator()(pkgCache::VerIterator const &Ver) override
{
return matcher(Ver.SourceVerStr());
}
};
struct APT_HIDDEN VersionIsVersion : public VersionAnyMatcher
{
BaseRegexMatcher matcher;
VersionIsVersion(std::string const &pattern) : matcher(pattern) {}
bool operator()(pkgCache::VerIterator const &Ver) override
{
return matcher(Ver.VerStr());
}
};
} // namespace Patterns
} // namespace Internal
} // namespace APT
#endif

264
apt-pkg/cachefilter.cc Normal file
View File

@ -0,0 +1,264 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/** \file cachefilter.h
Collection of functor classes */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/cachefile.h>
#include <apt-pkg/cachefilter.h>
#include <apt-pkg/error.h>
#include <apt-pkg/macros.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/strutl.h>
#include <algorithm>
#include <string>
#include <unordered_map>
#include <fnmatch.h>
#include <regex.h>
#include <string.h>
#include <apti18n.h>
/*}}}*/
namespace APT {
APT_HIDDEN std::unordered_map<std::string, std::vector<std::string>> ArchToTupleMap;
namespace CacheFilter {
Matcher::~Matcher() {}
PackageMatcher::~PackageMatcher() {}
// Name matches RegEx /*{{{*/
PackageNameMatchesRegEx::PackageNameMatchesRegEx(std::string const &Pattern) {
pattern = new regex_t;
int const Res = regcomp(pattern, Pattern.c_str(), REG_EXTENDED | REG_ICASE | REG_NOSUB);
if (Res == 0)
return;
delete pattern;
pattern = NULL;
char Error[300];
regerror(Res, pattern, Error, sizeof(Error));
_error->Error(_("Regex compilation error - %s"), Error);
}
bool PackageNameMatchesRegEx::operator() (pkgCache::PkgIterator const &Pkg) {
if (unlikely(pattern == NULL))
return false;
else
return regexec(pattern, Pkg.Name(), 0, 0, 0) == 0;
}
bool PackageNameMatchesRegEx::operator() (pkgCache::GrpIterator const &Grp) {
if (unlikely(pattern == NULL))
return false;
else
return regexec(pattern, Grp.Name(), 0, 0, 0) == 0;
}
PackageNameMatchesRegEx::~PackageNameMatchesRegEx() {
if (pattern == NULL)
return;
regfree(pattern);
delete pattern;
}
/*}}}*/
// Name matches Fnmatch /*{{{*/
PackageNameMatchesFnmatch::PackageNameMatchesFnmatch(std::string const &Pattern) :
Pattern(Pattern) {}
bool PackageNameMatchesFnmatch::operator() (pkgCache::PkgIterator const &Pkg) {
return fnmatch(Pattern.c_str(), Pkg.Name(), FNM_CASEFOLD) == 0;
}
bool PackageNameMatchesFnmatch::operator() (pkgCache::GrpIterator const &Grp) {
return fnmatch(Pattern.c_str(), Grp.Name(), FNM_CASEFOLD) == 0;
}
/*}}}*/
// Architecture matches <abi>-<libc>-<kernel>-<cpu> specification /*{{{*/
//----------------------------------------------------------------------
static std::vector<std::string> ArchToTuple(std::string arch) {
// Strip leading linux- from arch if present
// dpkg says this may disappear in the future
if (APT::String::Startswith(arch, std::string("linux-")))
arch = arch.substr(6);
auto it = ArchToTupleMap.find(arch);
if (it != ArchToTupleMap.end())
{
std::vector<std::string> result = it->second;
// Hack in support for triplets
if (result.size() == 3)
result.emplace(result.begin(), "base");
return result;
} else
{
return {};
}
}
static std::vector<std::string> PatternToTuple(std::string const &arch) {
std::vector<std::string> tuple = VectorizeString(arch, '-');
if (std::find(tuple.begin(), tuple.end(), std::string("any")) != tuple.end() ||
std::find(arch.begin(), arch.end(), '*') != arch.end()) {
while (tuple.size() < 4) {
tuple.emplace(tuple.begin(), "any");
}
return tuple;
} else
return ArchToTuple(arch);
}
/* The complete architecture, consisting of <abi>-<libc>-<kernel>-<cpu>. */
static std::string CompleteArch(std::string const &arch, bool const isPattern) {
auto tuple = isPattern ? PatternToTuple(arch) : ArchToTuple(arch);
// Bah, the commandline will try and pass us stuff like amd64- -- we need
// that not to match an architecture, but the code below would turn it into
// a valid tuple. Let's just use an invalid tuple here.
if (APT::String::Endswith(arch, "-") || APT::String::Startswith(arch, "-"))
return "invalid-invalid-invalid-invalid";
if (tuple.empty()) {
// Fallback for unknown architectures
// Patterns never fail if they contain wildcards, so by this point, arch
// has no wildcards.
tuple = VectorizeString(arch, '-');
switch (tuple.size()) {
case 1:
tuple.emplace(tuple.begin(), "linux");
/* fall through */
case 2:
tuple.emplace(tuple.begin(), "gnu");
/* fall through */
case 3:
tuple.emplace(tuple.begin(), "base");
/* fall through */
break;
}
}
std::replace(tuple.begin(), tuple.end(), std::string("any"), std::string("*"));
return APT::String::Join(tuple, "-");
}
PackageArchitectureMatchesSpecification::PackageArchitectureMatchesSpecification(std::string const &pattern, bool const pisPattern) :
literal(pattern), complete(CompleteArch(pattern, pisPattern)), isPattern(pisPattern) {
}
bool PackageArchitectureMatchesSpecification::operator() (char const * const &arch) {
if (strcmp(literal.c_str(), arch) == 0 ||
strcmp(complete.c_str(), arch) == 0)
return true;
std::string const pkgarch = CompleteArch(arch, !isPattern);
if (isPattern == true)
return fnmatch(complete.c_str(), pkgarch.c_str(), 0) == 0;
return fnmatch(pkgarch.c_str(), complete.c_str(), 0) == 0;
}
bool PackageArchitectureMatchesSpecification::operator() (pkgCache::PkgIterator const &Pkg) {
return (*this)(Pkg.Arch());
}
PackageArchitectureMatchesSpecification::~PackageArchitectureMatchesSpecification() {
}
/*}}}*/
// Package is new install /*{{{*/
PackageIsNewInstall::PackageIsNewInstall(pkgCacheFile * const Cache) : Cache(Cache) {}
APT_PURE bool PackageIsNewInstall::operator() (pkgCache::PkgIterator const &Pkg) {
return (*Cache)[Pkg].NewInstall();
}
PackageIsNewInstall::~PackageIsNewInstall() {}
/*}}}*/
// Generica like True, False, NOT, AND, OR /*{{{*/
APT_PURE bool TrueMatcher::operator() (pkgCache::PkgIterator const &) { return true; }
APT_PURE bool TrueMatcher::operator() (pkgCache::GrpIterator const &) { return true; }
APT_PURE bool TrueMatcher::operator() (pkgCache::VerIterator const &) { return true; }
APT_PURE bool FalseMatcher::operator() (pkgCache::PkgIterator const &) { return false; }
APT_PURE bool FalseMatcher::operator() (pkgCache::GrpIterator const &) { return false; }
APT_PURE bool FalseMatcher::operator() (pkgCache::VerIterator const &) { return false; }
NOTMatcher::NOTMatcher(Matcher * const matcher) : matcher(matcher) {}
bool NOTMatcher::operator() (pkgCache::PkgIterator const &Pkg) { return ! (*matcher)(Pkg); }
bool NOTMatcher::operator() (pkgCache::GrpIterator const &Grp) { return ! (*matcher)(Grp); }
bool NOTMatcher::operator() (pkgCache::VerIterator const &Ver) { return ! (*matcher)(Ver); }
NOTMatcher::~NOTMatcher() { delete matcher; }
ANDMatcher::ANDMatcher() {}
ANDMatcher::ANDMatcher(Matcher * const matcher1) {
AND(matcher1);
}
ANDMatcher::ANDMatcher(Matcher * const matcher1, Matcher * const matcher2) {
AND(matcher1).AND(matcher2);
}
ANDMatcher::ANDMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3) {
AND(matcher1).AND(matcher2).AND(matcher3);
}
ANDMatcher::ANDMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3, Matcher * const matcher4) {
AND(matcher1).AND(matcher2).AND(matcher3).AND(matcher4);
}
ANDMatcher::ANDMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3, Matcher * const matcher4, Matcher * const matcher5) {
AND(matcher1).AND(matcher2).AND(matcher3).AND(matcher4).AND(matcher5);
}
ANDMatcher& ANDMatcher::AND(Matcher * const matcher) { matchers.push_back(matcher); return *this; }
bool ANDMatcher::operator() (pkgCache::PkgIterator const &Pkg) {
for (std::vector<Matcher *>::const_iterator M = matchers.begin(); M != matchers.end(); ++M)
if ((**M)(Pkg) == false)
return false;
return true;
}
bool ANDMatcher::operator() (pkgCache::GrpIterator const &Grp) {
for (std::vector<Matcher *>::const_iterator M = matchers.begin(); M != matchers.end(); ++M)
if ((**M)(Grp) == false)
return false;
return true;
}
bool ANDMatcher::operator() (pkgCache::VerIterator const &Ver) {
for (std::vector<Matcher *>::const_iterator M = matchers.begin(); M != matchers.end(); ++M)
if ((**M)(Ver) == false)
return false;
return true;
}
ANDMatcher::~ANDMatcher() {
for (std::vector<Matcher *>::iterator M = matchers.begin(); M != matchers.end(); ++M)
delete *M;
}
ORMatcher::ORMatcher() {}
ORMatcher::ORMatcher(Matcher * const matcher1) {
OR(matcher1);
}
ORMatcher::ORMatcher(Matcher * const matcher1, Matcher * const matcher2) {
OR(matcher1).OR(matcher2);
}
ORMatcher::ORMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3) {
OR(matcher1).OR(matcher2).OR(matcher3);
}
ORMatcher::ORMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3, Matcher * const matcher4) {
OR(matcher1).OR(matcher2).OR(matcher3).OR(matcher4);
}
ORMatcher::ORMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3, Matcher * const matcher4, Matcher * const matcher5) {
OR(matcher1).OR(matcher2).OR(matcher3).OR(matcher4).OR(matcher5);
}
ORMatcher& ORMatcher::OR(Matcher * const matcher) { matchers.push_back(matcher); return *this; }
bool ORMatcher::operator() (pkgCache::PkgIterator const &Pkg) {
for (std::vector<Matcher *>::const_iterator M = matchers.begin(); M != matchers.end(); ++M)
if ((**M)(Pkg) == true)
return true;
return false;
}
bool ORMatcher::operator() (pkgCache::GrpIterator const &Grp) {
for (std::vector<Matcher *>::const_iterator M = matchers.begin(); M != matchers.end(); ++M)
if ((**M)(Grp) == true)
return true;
return false;
}
bool ORMatcher::operator() (pkgCache::VerIterator const &Ver) {
for (std::vector<Matcher *>::const_iterator M = matchers.begin(); M != matchers.end(); ++M)
if ((**M)(Ver) == true)
return true;
return false;
}
ORMatcher::~ORMatcher() {
for (std::vector<Matcher *>::iterator M = matchers.begin(); M != matchers.end(); ++M)
delete *M;
}
/*}}}*/
}
}

154
apt-pkg/cachefilter.h Normal file
View File

@ -0,0 +1,154 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/** \file cachefilter.h
Collection of functor classes */
/*}}}*/
#ifndef APT_CACHEFILTER_H
#define APT_CACHEFILTER_H
// Include Files /*{{{*/
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/string_view.h>
#include <memory>
#include <string>
#include <vector>
#include <regex.h>
class pkgCacheFile;
/*}}}*/
namespace APT {
namespace CacheFilter {
class APT_PUBLIC Matcher {
public:
virtual bool operator() (pkgCache::PkgIterator const &/*Pkg*/) = 0;
virtual bool operator() (pkgCache::GrpIterator const &/*Grp*/) = 0;
virtual bool operator() (pkgCache::VerIterator const &/*Ver*/) = 0;
virtual ~Matcher();
};
class APT_PUBLIC PackageMatcher : public Matcher {
public:
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE = 0;
virtual bool operator() (pkgCache::VerIterator const &Ver) APT_OVERRIDE { return (*this)(Ver.ParentPkg()); }
virtual bool operator() (pkgCache::GrpIterator const &/*Grp*/) APT_OVERRIDE { return false; }
virtual ~PackageMatcher();
};
// Generica like True, False, NOT, AND, OR /*{{{*/
class APT_PUBLIC TrueMatcher : public Matcher {
public:
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual bool operator() (pkgCache::GrpIterator const &Grp) APT_OVERRIDE;
virtual bool operator() (pkgCache::VerIterator const &Ver) APT_OVERRIDE;
};
class APT_PUBLIC FalseMatcher : public Matcher {
public:
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual bool operator() (pkgCache::GrpIterator const &Grp) APT_OVERRIDE;
virtual bool operator() (pkgCache::VerIterator const &Ver) APT_OVERRIDE;
};
class APT_PUBLIC NOTMatcher : public Matcher {
Matcher * const matcher;
public:
explicit NOTMatcher(Matcher * const matcher);
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual bool operator() (pkgCache::GrpIterator const &Grp) APT_OVERRIDE;
virtual bool operator() (pkgCache::VerIterator const &Ver) APT_OVERRIDE;
virtual ~NOTMatcher();
};
class APT_PUBLIC ANDMatcher : public Matcher {
std::vector<Matcher *> matchers;
public:
// 5 ought to be enough for everybody… c++11 variadic templates would be nice
ANDMatcher();
explicit ANDMatcher(Matcher * const matcher1);
ANDMatcher(Matcher * const matcher1, Matcher * const matcher2);
ANDMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3);
ANDMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3, Matcher * const matcher4);
ANDMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3, Matcher * const matcher4, Matcher * const matcher5);
ANDMatcher& AND(Matcher * const matcher);
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual bool operator() (pkgCache::GrpIterator const &Grp) APT_OVERRIDE;
virtual bool operator() (pkgCache::VerIterator const &Ver) APT_OVERRIDE;
virtual ~ANDMatcher();
};
class APT_PUBLIC ORMatcher : public Matcher {
std::vector<Matcher *> matchers;
public:
// 5 ought to be enough for everybody… c++11 variadic templates would be nice
ORMatcher();
explicit ORMatcher(Matcher * const matcher1);
ORMatcher(Matcher * const matcher1, Matcher * const matcher2);
ORMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3);
ORMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3, Matcher * const matcher4);
ORMatcher(Matcher * const matcher1, Matcher * const matcher2, Matcher * const matcher3, Matcher * const matcher4, Matcher * const matcher5);
ORMatcher& OR(Matcher * const matcher);
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual bool operator() (pkgCache::GrpIterator const &Grp) APT_OVERRIDE;
virtual bool operator() (pkgCache::VerIterator const &Ver) APT_OVERRIDE;
virtual ~ORMatcher();
};
/*}}}*/
class APT_PUBLIC PackageNameMatchesRegEx : public PackageMatcher { /*{{{*/
regex_t* pattern;
public:
explicit PackageNameMatchesRegEx(std::string const &Pattern);
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual bool operator() (pkgCache::GrpIterator const &Grp) APT_OVERRIDE;
virtual ~PackageNameMatchesRegEx();
};
/*}}}*/
class APT_PUBLIC PackageNameMatchesFnmatch : public PackageMatcher { /*{{{*/
const std::string Pattern;
public:
explicit PackageNameMatchesFnmatch(std::string const &Pattern);
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual bool operator() (pkgCache::GrpIterator const &Grp) APT_OVERRIDE;
virtual ~PackageNameMatchesFnmatch() {};
};
/*}}}*/
class APT_PUBLIC PackageArchitectureMatchesSpecification : public PackageMatcher { /*{{{*/
/** \class PackageArchitectureMatchesSpecification
\brief matching against architecture specification strings
The strings are of the format <libc>-<kernel>-<cpu> where either component,
or the whole string, can be the wildcard "any" as defined in
debian-policy §11.1 "Architecture specification strings".
Examples: i386, mipsel, musl-linux-amd64, linux-any, any-amd64, any */
std::string literal;
std::string complete;
bool isPattern;
public:
/** \brief matching against architecture specification strings
*
* @param pattern is the architecture specification string
* @param isPattern defines if the given \b pattern is a
* architecture specification pattern to match others against
* or if it is the fixed string and matched against patterns
*/
PackageArchitectureMatchesSpecification(std::string const &pattern, bool const isPattern = true);
bool operator() (char const * const &arch);
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual ~PackageArchitectureMatchesSpecification();
};
/*}}}*/
class APT_PUBLIC PackageIsNewInstall : public PackageMatcher { /*{{{*/
pkgCacheFile * const Cache;
public:
explicit PackageIsNewInstall(pkgCacheFile * const Cache);
virtual bool operator() (pkgCache::PkgIterator const &Pkg) APT_OVERRIDE;
virtual ~PackageIsNewInstall();
};
/*}}}*/
/// \brief Parse a pattern, return nullptr or pattern
APT_PUBLIC std::unique_ptr<APT::CacheFilter::Matcher> ParsePattern(APT::StringView pattern, pkgCacheFile *file);
}
}
#endif

529
apt-pkg/cacheiterators.h Normal file
View File

@ -0,0 +1,529 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Cache Iterators - Iterators for navigating the cache structure
The iterators all provides ++,==,!=,->,* and end for their type.
The end function can be used to tell if the list has been fully
traversed.
Unlike STL iterators these contain helper functions to access the data
that is being iterated over. This is because the data structures can't
be formed in a manner that is intuitive to use and also mmapable.
For each variable in the target structure that would need a translation
to be accessed correctly a translating function of the same name is
present in the iterator. If applicable the translating function will
return an iterator.
The DepIterator can iterate over two lists, a list of 'version depends'
or a list of 'package reverse depends'. The type is determined by the
structure passed to the constructor, which should be the structure
that has the depends pointer as a member. The provide iterator has the
same system.
This header is not user includable, please use apt-pkg/pkgcache.h
##################################################################### */
/*}}}*/
#ifndef PKGLIB_CACHEITERATORS_H
#define PKGLIB_CACHEITERATORS_H
#ifndef __PKGLIB_IN_PKGCACHE_H
#warning apt-pkg/cacheiterators.h should not be included directly, include apt-pkg/pkgcache.h instead
#endif
#include <apt-pkg/macros.h>
#include <iosfwd>
#include <iterator>
#include <string>
#include <apt-pkg/string_view.h>
#include <string.h>
// abstract Iterator template /*{{{*/
/* This template provides the very basic iterator methods we
need to have for doing some walk-over-the-cache magic */
template<typename Str, typename Itr> class APT_PUBLIC pkgCache::Iterator :
public std::iterator<std::forward_iterator_tag, Str> {
/** \brief Returns the Pointer for this struct in the owner
* The implementation of this method should be pretty short
* as it will only return the Pointer into the mmap stored
* in the owner but the name of this pointer is different for
* each structure and we want to abstract here at least for the
* basic methods from the actual structure.
* \return Pointer to the first structure of this type
*/
Str* OwnerPointer() const { return static_cast<Itr const*>(this)->OwnerPointer(); }
protected:
Str *S;
pkgCache *Owner;
public:
// Iteration
inline bool end() const {return Owner == 0 || S == OwnerPointer();}
// Comparison
inline bool operator ==(const Itr &B) const {return S == B.S;}
inline bool operator !=(const Itr &B) const {return S != B.S;}
// Accessors
inline Str *operator ->() {return S;}
inline Str const *operator ->() const {return S;}
inline operator Str *() {return S == OwnerPointer() ? 0 : S;}
inline operator Str const *() const {return S == OwnerPointer() ? 0 : S;}
inline Str &operator *() {return *S;}
inline Str const &operator *() const {return *S;}
inline pkgCache *Cache() const {return Owner;}
// Mixed stuff
inline bool IsGood() const { return S && Owner && ! end();}
inline unsigned long Index() const {return S - OwnerPointer();}
inline map_pointer<Str> MapPointer() const {return map_pointer<Str>(Index()) ;}
void ReMap(void const * const oldMap, void const * const newMap) {
if (Owner == 0 || S == 0)
return;
S += static_cast<Str const *>(newMap) - static_cast<Str const *>(oldMap);
}
// Constructors - look out for the variable assigning
inline Iterator() : S(0), Owner(0) {}
inline Iterator(pkgCache &Owner,Str *T = 0) : S(T), Owner(&Owner) {}
};
/*}}}*/
// Group Iterator /*{{{*/
/* Packages with the same name are collected in a Group so someone only
interest in package names can iterate easily over the names, so the
different architectures can be treated as of the "same" package
(apt internally treat them as totally different packages) */
class APT_PUBLIC pkgCache::GrpIterator: public Iterator<Group, GrpIterator> {
long HashIndex;
public:
inline Group* OwnerPointer() const {
return (Owner != 0) ? Owner->GrpP : 0;
}
// This constructor is the 'begin' constructor, never use it.
explicit inline GrpIterator(pkgCache &Owner) : Iterator<Group, GrpIterator>(Owner), HashIndex(-1) {
S = OwnerPointer();
operator++();
}
GrpIterator& operator++();
inline GrpIterator operator++(int) { GrpIterator const tmp(*this); operator++(); return tmp; }
inline const char *Name() const {return S->Name == 0?0:Owner->StrP + S->Name;}
inline PkgIterator PackageList() const;
inline VerIterator VersionsInSource() const;
PkgIterator FindPkg(APT::StringView Arch = APT::StringView("any", 3)) const;
/** \brief find the package with the "best" architecture
The best architecture is either the "native" or the first
in the list of Architectures which is not an end-Pointer
\param PreferNonVirtual tries to respond with a non-virtual package
and only if this fails returns the best virtual package */
PkgIterator FindPreferredPkg(bool const &PreferNonVirtual = true) const;
PkgIterator NextPkg(PkgIterator const &Pkg) const;
// Constructors
inline GrpIterator(pkgCache &Owner, Group *Trg) : Iterator<Group, GrpIterator>(Owner, Trg), HashIndex(0) {
if (S == 0)
S = OwnerPointer();
}
inline GrpIterator() : Iterator<Group, GrpIterator>(), HashIndex(0) {}
};
/*}}}*/
// Package Iterator /*{{{*/
class APT_PUBLIC pkgCache::PkgIterator: public Iterator<Package, PkgIterator> {
long HashIndex;
public:
inline Package* OwnerPointer() const {
return (Owner != 0) ? Owner->PkgP : 0;
}
// This constructor is the 'begin' constructor, never use it.
explicit inline PkgIterator(pkgCache &Owner) : Iterator<Package, PkgIterator>(Owner), HashIndex(-1) {
S = OwnerPointer();
operator++();
}
PkgIterator& operator++();
inline PkgIterator operator++(int) { PkgIterator const tmp(*this); operator++(); return tmp; }
enum OkState {NeedsNothing,NeedsUnpack,NeedsConfigure};
// Accessors
inline const char *Name() const { return Group().Name(); }
inline bool Purge() const {return S->CurrentState == pkgCache::State::Purge ||
(S->CurrentVer == 0 && S->CurrentState == pkgCache::State::NotInstalled);}
inline const char *Arch() const {return S->Arch == 0?0:Owner->StrP + S->Arch;}
inline APT_PURE GrpIterator Group() const { return GrpIterator(*Owner, Owner->GrpP + S->Group);}
inline VerIterator VersionList() const APT_PURE;
inline VerIterator CurrentVer() const APT_PURE;
inline DepIterator RevDependsList() const APT_PURE;
inline PrvIterator ProvidesList() const APT_PURE;
OkState State() const APT_PURE;
const char *CurVersion() const APT_PURE;
//Nice printable representation
APT_DEPRECATED_MSG("Use APT::PrettyPkg instead") friend std::ostream& operator <<(std::ostream& out, PkgIterator i);
std::string FullName(bool const &Pretty = false) const;
// Constructors
inline PkgIterator(pkgCache &Owner,Package *Trg) : Iterator<Package, PkgIterator>(Owner, Trg), HashIndex(0) {
if (S == 0)
S = OwnerPointer();
}
inline PkgIterator() : Iterator<Package, PkgIterator>(), HashIndex(0) {}
};
/*}}}*/
// Version Iterator /*{{{*/
class APT_PUBLIC pkgCache::VerIterator : public Iterator<Version, VerIterator> {
public:
inline Version* OwnerPointer() const {
return (Owner != 0) ? Owner->VerP : 0;
}
// Iteration
inline VerIterator& operator++() {if (S != Owner->VerP) S = Owner->VerP + S->NextVer; return *this;}
inline VerIterator operator++(int) { VerIterator const tmp(*this); operator++(); return tmp; }
inline VerIterator NextInSource()
{
if (S != Owner->VerP)
S = Owner->VerP + S->NextInSource;
return *this;
}
// Comparison
int CompareVer(const VerIterator &B) const;
/** \brief compares two version and returns if they are similar
This method should be used to identify if two pseudo versions are
referring to the same "real" version */
inline bool SimilarVer(const VerIterator &B) const {
return (B.end() == false && S->Hash == B->Hash && strcmp(VerStr(), B.VerStr()) == 0);
}
// Accessors
inline const char *VerStr() const {return S->VerStr == 0?0:Owner->StrP + S->VerStr;}
inline const char *Section() const {return S->Section == 0?0:Owner->StrP + S->Section;}
/** \brief source package name this version comes from
Always contains the name, even if it is the same as the binary name */
inline const char *SourcePkgName() const {return Owner->StrP + S->SourcePkgName;}
/** \brief source version this version comes from
Always contains the version string, even if it is the same as the binary version */
inline const char *SourceVerStr() const {return Owner->StrP + S->SourceVerStr;}
inline const char *Arch() const {
if ((S->MultiArch & pkgCache::Version::All) == pkgCache::Version::All)
return "all";
return S->ParentPkg == 0?0:Owner->StrP + ParentPkg()->Arch;
}
inline PkgIterator ParentPkg() const {return PkgIterator(*Owner,Owner->PkgP + S->ParentPkg);}
inline DescIterator DescriptionList() const;
DescIterator TranslatedDescriptionForLanguage(APT::StringView lang) const;
DescIterator TranslatedDescription() const;
inline DepIterator DependsList() const;
inline PrvIterator ProvidesList() const;
inline VerFileIterator FileList() const;
bool Downloadable() const;
inline const char *PriorityType() const {return Owner->Priority(S->Priority);}
const char *MultiArchType() const APT_PURE;
std::string RelStr() const;
bool Automatic() const;
VerFileIterator NewestFile() const;
inline VerIterator(pkgCache &Owner,Version *Trg = 0) : Iterator<Version, VerIterator>(Owner, Trg) {
if (S == 0)
S = OwnerPointer();
}
inline VerIterator() : Iterator<Version, VerIterator>() {}
};
/*}}}*/
// Description Iterator /*{{{*/
class APT_PUBLIC pkgCache::DescIterator : public Iterator<Description, DescIterator> {
public:
inline Description* OwnerPointer() const {
return (Owner != 0) ? Owner->DescP : 0;
}
// Iteration
inline DescIterator& operator++() {if (S != Owner->DescP) S = Owner->DescP + S->NextDesc; return *this;}
inline DescIterator operator++(int) { DescIterator const tmp(*this); operator++(); return tmp; }
// Comparison
int CompareDesc(const DescIterator &B) const;
// Accessors
inline const char *LanguageCode() const {return Owner->StrP + S->language_code;}
inline const char *md5() const {return Owner->StrP + S->md5sum;}
inline DescFileIterator FileList() const;
inline DescIterator() : Iterator<Description, DescIterator>() {}
inline DescIterator(pkgCache &Owner,Description *Trg = 0) : Iterator<Description, DescIterator>(Owner, Trg) {
if (S == 0)
S = Owner.DescP;
}
};
/*}}}*/
// Dependency iterator /*{{{*/
class APT_PUBLIC pkgCache::DepIterator : public Iterator<Dependency, DepIterator> {
enum {DepVer, DepRev} Type;
DependencyData * S2;
public:
inline Dependency* OwnerPointer() const {
return (Owner != 0) ? Owner->DepP : 0;
}
// Iteration
DepIterator& operator++();
inline DepIterator operator++(int) { DepIterator const tmp(*this); operator++(); return tmp; }
// Accessors
inline const char *TargetVer() const {return S2->Version == 0?0:Owner->StrP + S2->Version;}
inline PkgIterator TargetPkg() const {return PkgIterator(*Owner,Owner->PkgP + S2->Package);}
inline PkgIterator SmartTargetPkg() const {PkgIterator R(*Owner,0);SmartTargetPkg(R);return R;}
inline VerIterator ParentVer() const {return VerIterator(*Owner,Owner->VerP + S->ParentVer);}
inline PkgIterator ParentPkg() const {return PkgIterator(*Owner,Owner->PkgP + Owner->VerP[uint32_t(S->ParentVer)].ParentPkg);}
inline bool Reverse() const {return Type == DepRev;}
bool IsCritical() const APT_PURE;
bool IsNegative() const APT_PURE;
bool IsIgnorable(PrvIterator const &Prv) const APT_PURE;
bool IsIgnorable(PkgIterator const &Pkg) const APT_PURE;
/* MultiArch can be translated to SingleArch for an resolver and we did so,
by adding dependencies to help the resolver understand the problem, but
sometimes it is needed to identify these to ignore them */
inline bool IsMultiArchImplicit() const APT_PURE {
return (S2->CompareOp & pkgCache::Dep::MultiArchImplicit) == pkgCache::Dep::MultiArchImplicit;
}
/* This covers additionally negative dependencies, which aren't arch-specific,
but change architecture nonetheless as a Conflicts: foo does applies for all archs */
bool IsImplicit() const APT_PURE;
bool IsSatisfied(VerIterator const &Ver) const APT_PURE;
bool IsSatisfied(PrvIterator const &Prv) const APT_PURE;
void GlobOr(DepIterator &Start,DepIterator &End);
Version **AllTargets() const;
bool SmartTargetPkg(PkgIterator &Result) const;
inline const char *CompType() const {return Owner->CompType(S2->CompareOp);}
inline const char *DepType() const {return Owner->DepType(S2->Type);}
// overrides because we are special
struct DependencyProxy
{
map_stringitem_t &Version;
map_pointer<pkgCache::Package> &Package;
map_id_t &ID;
unsigned char &Type;
unsigned char &CompareOp;
map_pointer<pkgCache::Version> &ParentVer;
map_pointer<pkgCache::DependencyData> &DependencyData;
map_pointer<Dependency> &NextRevDepends;
map_pointer<Dependency> &NextDepends;
map_pointer<pkgCache::DependencyData> &NextData;
DependencyProxy const * operator->() const { return this; }
DependencyProxy * operator->() { return this; }
};
inline DependencyProxy operator->() const {return (DependencyProxy) { S2->Version, S2->Package, S->ID, S2->Type, S2->CompareOp, S->ParentVer, S->DependencyData, S->NextRevDepends, S->NextDepends, S2->NextData };}
inline DependencyProxy operator->() {return (DependencyProxy) { S2->Version, S2->Package, S->ID, S2->Type, S2->CompareOp, S->ParentVer, S->DependencyData, S->NextRevDepends, S->NextDepends, S2->NextData };}
void ReMap(void const * const oldMap, void const * const newMap)
{
Iterator<Dependency, DepIterator>::ReMap(oldMap, newMap);
if (Owner == 0 || S == 0 || S2 == 0)
return;
S2 += static_cast<DependencyData const *>(newMap) - static_cast<DependencyData const *>(oldMap);
}
//Nice printable representation
APT_DEPRECATED_MSG("Use APT::PrettyDep instead") friend std::ostream& operator <<(std::ostream& out, DepIterator D);
inline DepIterator(pkgCache &Owner, Dependency *Trg, Version* = 0) :
Iterator<Dependency, DepIterator>(Owner, Trg), Type(DepVer), S2(Trg == 0 ? Owner.DepDataP : (Owner.DepDataP + Trg->DependencyData)) {
if (S == 0)
S = Owner.DepP;
}
inline DepIterator(pkgCache &Owner, Dependency *Trg, Package*) :
Iterator<Dependency, DepIterator>(Owner, Trg), Type(DepRev), S2(Trg == 0 ? Owner.DepDataP : (Owner.DepDataP + Trg->DependencyData)) {
if (S == 0)
S = Owner.DepP;
}
inline DepIterator() : Iterator<Dependency, DepIterator>(), Type(DepVer), S2(0) {}
};
/*}}}*/
// Provides iterator /*{{{*/
class APT_PUBLIC pkgCache::PrvIterator : public Iterator<Provides, PrvIterator> {
enum {PrvVer, PrvPkg} Type;
public:
inline Provides* OwnerPointer() const {
return (Owner != 0) ? Owner->ProvideP : 0;
}
// Iteration
inline PrvIterator& operator ++() {if (S != Owner->ProvideP) S = Owner->ProvideP +
(Type == PrvVer?S->NextPkgProv:S->NextProvides); return *this;}
inline PrvIterator operator++(int) { PrvIterator const tmp(*this); operator++(); return tmp; }
// Accessors
inline const char *Name() const {return ParentPkg().Name();}
inline const char *ProvideVersion() const {return S->ProvideVersion == 0?0:Owner->StrP + S->ProvideVersion;}
inline PkgIterator ParentPkg() const {return PkgIterator(*Owner,Owner->PkgP + S->ParentPkg);}
inline VerIterator OwnerVer() const {return VerIterator(*Owner,Owner->VerP + S->Version);}
inline PkgIterator OwnerPkg() const {return PkgIterator(*Owner,Owner->PkgP + Owner->VerP[uint32_t(S->Version)].ParentPkg);}
/* MultiArch can be translated to SingleArch for an resolver and we did so,
by adding provides to help the resolver understand the problem, but
sometimes it is needed to identify these to ignore them */
bool IsMultiArchImplicit() const APT_PURE
{ return (S->Flags & pkgCache::Flag::MultiArchImplicit) == pkgCache::Flag::MultiArchImplicit; }
inline PrvIterator() : Iterator<Provides, PrvIterator>(), Type(PrvVer) {}
inline PrvIterator(pkgCache &Owner, Provides *Trg, Version*) :
Iterator<Provides, PrvIterator>(Owner, Trg), Type(PrvVer) {
if (S == 0)
S = Owner.ProvideP;
}
inline PrvIterator(pkgCache &Owner, Provides *Trg, Package*) :
Iterator<Provides, PrvIterator>(Owner, Trg), Type(PrvPkg) {
if (S == 0)
S = Owner.ProvideP;
}
};
/*}}}*/
// Release file /*{{{*/
class APT_PUBLIC pkgCache::RlsFileIterator : public Iterator<ReleaseFile, RlsFileIterator> {
public:
inline ReleaseFile* OwnerPointer() const {
return (Owner != 0) ? Owner->RlsFileP : 0;
}
// Iteration
inline RlsFileIterator& operator++() {if (S != Owner->RlsFileP) S = Owner->RlsFileP + S->NextFile;return *this;}
inline RlsFileIterator operator++(int) { RlsFileIterator const tmp(*this); operator++(); return tmp; }
// Accessors
inline const char *FileName() const {return S->FileName == 0?0:Owner->StrP + S->FileName;}
inline const char *Archive() const {return S->Archive == 0?0:Owner->StrP + S->Archive;}
inline const char *Version() const {return S->Version == 0?0:Owner->StrP + S->Version;}
inline const char *Origin() const {return S->Origin == 0?0:Owner->StrP + S->Origin;}
inline const char *Codename() const {return S->Codename ==0?0:Owner->StrP + S->Codename;}
inline const char *Label() const {return S->Label == 0?0:Owner->StrP + S->Label;}
inline const char *Site() const {return S->Site == 0?0:Owner->StrP + S->Site;}
inline bool Flagged(pkgCache::Flag::ReleaseFileFlags const flag) const {return (S->Flags & flag) == flag; }
std::string RelStr();
// Constructors
inline RlsFileIterator() : Iterator<ReleaseFile, RlsFileIterator>() {}
explicit inline RlsFileIterator(pkgCache &Owner) : Iterator<ReleaseFile, RlsFileIterator>(Owner, Owner.RlsFileP) {}
inline RlsFileIterator(pkgCache &Owner,ReleaseFile *Trg) : Iterator<ReleaseFile, RlsFileIterator>(Owner, Trg) {}
};
/*}}}*/
// Package file /*{{{*/
class APT_PUBLIC pkgCache::PkgFileIterator : public Iterator<PackageFile, PkgFileIterator> {
public:
inline PackageFile* OwnerPointer() const {
return (Owner != 0) ? Owner->PkgFileP : 0;
}
// Iteration
inline PkgFileIterator& operator++() {if (S != Owner->PkgFileP) S = Owner->PkgFileP + S->NextFile; return *this;}
inline PkgFileIterator operator++(int) { PkgFileIterator const tmp(*this); operator++(); return tmp; }
// Accessors
inline const char *FileName() const {return S->FileName == 0?0:Owner->StrP + S->FileName;}
inline pkgCache::RlsFileIterator ReleaseFile() const {return RlsFileIterator(*Owner, Owner->RlsFileP + S->Release);}
inline const char *Archive() const {return S->Release == 0 ? Component() : ReleaseFile().Archive();}
inline const char *Version() const {return S->Release == 0 ? NULL : ReleaseFile().Version();}
inline const char *Origin() const {return S->Release == 0 ? NULL : ReleaseFile().Origin();}
inline const char *Codename() const {return S->Release == 0 ? NULL : ReleaseFile().Codename();}
inline const char *Label() const {return S->Release == 0 ? NULL : ReleaseFile().Label();}
inline const char *Site() const {return S->Release == 0 ? NULL : ReleaseFile().Site();}
inline bool Flagged(pkgCache::Flag::ReleaseFileFlags const flag) const {return S->Release== 0 ? false : ReleaseFile().Flagged(flag);}
inline bool Flagged(pkgCache::Flag::PkgFFlags const flag) const {return (S->Flags & flag) == flag;}
inline const char *Component() const {return S->Component == 0?0:Owner->StrP + S->Component;}
inline const char *Architecture() const {return S->Architecture == 0?0:Owner->StrP + S->Architecture;}
inline const char *IndexType() const {return S->IndexType == 0?0:Owner->StrP + S->IndexType;}
std::string RelStr();
// Constructors
inline PkgFileIterator() : Iterator<PackageFile, PkgFileIterator>() {}
explicit inline PkgFileIterator(pkgCache &Owner) : Iterator<PackageFile, PkgFileIterator>(Owner, Owner.PkgFileP) {}
inline PkgFileIterator(pkgCache &Owner,PackageFile *Trg) : Iterator<PackageFile, PkgFileIterator>(Owner, Trg) {}
};
/*}}}*/
// Version File /*{{{*/
class APT_PUBLIC pkgCache::VerFileIterator : public pkgCache::Iterator<VerFile, VerFileIterator> {
public:
inline VerFile* OwnerPointer() const {
return (Owner != 0) ? Owner->VerFileP : 0;
}
// Iteration
inline VerFileIterator& operator++() {if (S != Owner->VerFileP) S = Owner->VerFileP + S->NextFile; return *this;}
inline VerFileIterator operator++(int) { VerFileIterator const tmp(*this); operator++(); return tmp; }
// Accessors
inline PkgFileIterator File() const {return PkgFileIterator(*Owner, Owner->PkgFileP + S->File);}
inline VerFileIterator() : Iterator<VerFile, VerFileIterator>() {}
inline VerFileIterator(pkgCache &Owner,VerFile *Trg) : Iterator<VerFile, VerFileIterator>(Owner, Trg) {}
};
/*}}}*/
// Description File /*{{{*/
class APT_PUBLIC pkgCache::DescFileIterator : public Iterator<DescFile, DescFileIterator> {
public:
inline DescFile* OwnerPointer() const {
return (Owner != 0) ? Owner->DescFileP : 0;
}
// Iteration
inline DescFileIterator& operator++() {if (S != Owner->DescFileP) S = Owner->DescFileP + S->NextFile; return *this;}
inline DescFileIterator operator++(int) { DescFileIterator const tmp(*this); operator++(); return tmp; }
// Accessors
inline PkgFileIterator File() const {return PkgFileIterator(*Owner, Owner->PkgFileP + S->File);}
inline DescFileIterator() : Iterator<DescFile, DescFileIterator>() {}
inline DescFileIterator(pkgCache &Owner,DescFile *Trg) : Iterator<DescFile, DescFileIterator>(Owner, Trg) {}
};
/*}}}*/
// Inlined Begin functions can't be in the class because of order problems /*{{{*/
inline pkgCache::PkgIterator pkgCache::GrpIterator::PackageList() const
{return PkgIterator(*Owner,Owner->PkgP + S->FirstPackage);}
inline pkgCache::VerIterator pkgCache::GrpIterator::VersionsInSource() const
{
return VerIterator(*Owner, Owner->VerP + S->VersionsInSource);
}
inline pkgCache::VerIterator pkgCache::PkgIterator::VersionList() const
{return VerIterator(*Owner,Owner->VerP + S->VersionList);}
inline pkgCache::VerIterator pkgCache::PkgIterator::CurrentVer() const
{return VerIterator(*Owner,Owner->VerP + S->CurrentVer);}
inline pkgCache::DepIterator pkgCache::PkgIterator::RevDependsList() const
{return DepIterator(*Owner,Owner->DepP + S->RevDepends,S);}
inline pkgCache::PrvIterator pkgCache::PkgIterator::ProvidesList() const
{return PrvIterator(*Owner,Owner->ProvideP + S->ProvidesList,S);}
inline pkgCache::DescIterator pkgCache::VerIterator::DescriptionList() const
{return DescIterator(*Owner,Owner->DescP + S->DescriptionList);}
inline pkgCache::PrvIterator pkgCache::VerIterator::ProvidesList() const
{return PrvIterator(*Owner,Owner->ProvideP + S->ProvidesList,S);}
inline pkgCache::DepIterator pkgCache::VerIterator::DependsList() const
{return DepIterator(*Owner,Owner->DepP + S->DependsList,S);}
inline pkgCache::VerFileIterator pkgCache::VerIterator::FileList() const
{return VerFileIterator(*Owner,Owner->VerFileP + S->FileList);}
inline pkgCache::DescFileIterator pkgCache::DescIterator::FileList() const
{return DescFileIterator(*Owner,Owner->DescFileP + S->FileList);}
/*}}}*/
#endif

940
apt-pkg/cacheset.cc Normal file
View File

@ -0,0 +1,940 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Simple wrapper around a std::set to provide a similar interface to
a set of cache structures as to the complete set of all structures
in the pkgCache. Currently only Package is supported.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/cachefile.h>
#include <apt-pkg/cachefilter.h>
#include <apt-pkg/cacheset.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/depcache.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/macros.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/pkgrecords.h>
#include <apt-pkg/policy.h>
#include <apt-pkg/versionmatch.h>
#include <list>
#include <string>
#include <vector>
#include <regex.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include <apti18n.h>
/*}}}*/
namespace APT {
// PackageFrom - selecting the appropriate method for package selection /*{{{*/
bool CacheSetHelper::PackageFrom(enum PkgSelector const select, PackageContainerInterface * const pci,
pkgCacheFile &Cache, std::string const &pattern) {
switch (select) {
case UNKNOWN: return false;
case REGEX: return PackageFromRegEx(pci, Cache, pattern);
case TASK: return PackageFromTask(pci, Cache, pattern);
case FNMATCH: return PackageFromFnmatch(pci, Cache, pattern);
case PACKAGENAME: return PackageFromPackageName(pci, Cache, pattern);
case STRING: return PackageFromString(pci, Cache, pattern);
case PATTERN: return PackageFromPattern(pci, Cache, pattern);
}
return false;
}
/*}}}*/
// PackageFromTask - Return all packages in the cache from a specific task /*{{{*/
bool CacheSetHelper::PackageFromTask(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string pattern) {
size_t const archfound = pattern.find_last_of(':');
std::string arch = "native";
if (archfound != std::string::npos) {
arch = pattern.substr(archfound+1);
pattern.erase(archfound);
}
if (pattern[pattern.length() -1] != '^')
return false;
pattern.erase(pattern.length()-1);
if (unlikely(Cache.GetPkgCache() == 0 || Cache.GetDepCache() == 0))
return false;
bool const wasEmpty = pci->empty();
if (wasEmpty == true)
pci->setConstructor(CacheSetHelper::TASK);
// get the records
pkgRecords Recs(Cache);
// build regexp for the task
regex_t Pattern;
char S[300];
snprintf(S, sizeof(S), "^Task:.*[, ]%s([, ]|$)", pattern.c_str());
if(regcomp(&Pattern,S, REG_EXTENDED | REG_NOSUB | REG_NEWLINE) != 0) {
_error->Error("Failed to compile task regexp");
return false;
}
bool found = false;
for (pkgCache::GrpIterator Grp = Cache->GrpBegin(); Grp.end() == false; ++Grp) {
pkgCache::PkgIterator Pkg = Grp.FindPkg(arch);
if (Pkg.end() == true)
continue;
pkgCache::VerIterator ver = Cache[Pkg].CandidateVerIter(Cache);
if(ver.end() == true)
continue;
pkgRecords::Parser &parser = Recs.Lookup(ver.FileList());
const char *start, *end;
parser.GetRec(start,end);
unsigned int const length = end - start;
if (unlikely(length == 0))
continue;
char buf[length];
strncpy(buf, start, length);
buf[length-1] = '\0';
if (regexec(&Pattern, buf, 0, 0, 0) != 0)
continue;
pci->insert(Pkg);
showPackageSelection(Pkg, CacheSetHelper::TASK, pattern);
found = true;
}
regfree(&Pattern);
if (found == false) {
canNotFindPackage(CacheSetHelper::TASK, pci, Cache, pattern);
pci->setConstructor(CacheSetHelper::UNKNOWN);
return false;
}
if (wasEmpty == false && pci->getConstructor() != CacheSetHelper::UNKNOWN)
pci->setConstructor(CacheSetHelper::UNKNOWN);
return true;
}
/*}}}*/
// PackageFromRegEx - Return all packages in the cache matching a pattern /*{{{*/
bool CacheSetHelper::PackageFromRegEx(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string pattern) {
static const char * const isregex = ".?+*|[^$";
if (_config->FindB("APT::Cmd::Pattern-Only", false))
{
// Only allow explicit regexp pattern.
if (pattern.size() == 0 || (pattern[0] != '^' && pattern[pattern.size() - 1] != '$'))
return false;
} else {
if (pattern.find_first_of(isregex) == std::string::npos)
return false;
}
bool const wasEmpty = pci->empty();
if (wasEmpty == true)
pci->setConstructor(CacheSetHelper::REGEX);
size_t archfound = pattern.find_last_of(':');
std::string arch = "native";
if (archfound != std::string::npos) {
arch = pattern.substr(archfound+1);
if (arch.find_first_of(isregex) == std::string::npos)
pattern.erase(archfound);
else
arch = "native";
}
if (unlikely(Cache.GetPkgCache() == 0))
return false;
APT::CacheFilter::PackageNameMatchesRegEx regexfilter(pattern);
bool found = false;
for (pkgCache::GrpIterator Grp = Cache.GetPkgCache()->GrpBegin(); Grp.end() == false; ++Grp) {
if (regexfilter(Grp) == false)
continue;
pkgCache::PkgIterator Pkg = Grp.FindPkg(arch);
if (Pkg.end() == true) {
if (archfound == std::string::npos)
Pkg = Grp.FindPreferredPkg(true);
if (Pkg.end() == true)
continue;
}
pci->insert(Pkg);
showPackageSelection(Pkg, CacheSetHelper::REGEX, pattern);
found = true;
}
if (found == false) {
canNotFindPackage(CacheSetHelper::REGEX, pci, Cache, pattern);
pci->setConstructor(CacheSetHelper::UNKNOWN);
return false;
}
if (wasEmpty == false && pci->getConstructor() != CacheSetHelper::UNKNOWN)
pci->setConstructor(CacheSetHelper::UNKNOWN);
return true;
}
/*}}}*/
// PackageFromFnmatch - Returns the package defined by this fnmatch /*{{{*/
bool CacheSetHelper::PackageFromFnmatch(PackageContainerInterface * const pci,
pkgCacheFile &Cache, std::string pattern)
{
static const char * const isfnmatch = ".?*[]!";
if (_config->FindB("APT::Cmd::Pattern-Only", false))
return false;
if (pattern.find_first_of(isfnmatch) == std::string::npos)
return false;
bool const wasEmpty = pci->empty();
if (wasEmpty == true)
pci->setConstructor(CacheSetHelper::FNMATCH);
size_t archfound = pattern.find_last_of(':');
std::string arch = "native";
if (archfound != std::string::npos) {
arch = pattern.substr(archfound+1);
if (arch.find_first_of(isfnmatch) == std::string::npos)
pattern.erase(archfound);
else
arch = "native";
}
if (unlikely(Cache.GetPkgCache() == 0))
return false;
APT::CacheFilter::PackageNameMatchesFnmatch filter(pattern);
bool found = false;
for (pkgCache::GrpIterator Grp = Cache.GetPkgCache()->GrpBegin(); Grp.end() == false; ++Grp) {
if (filter(Grp) == false)
continue;
pkgCache::PkgIterator Pkg = Grp.FindPkg(arch);
if (Pkg.end() == true) {
if (archfound == std::string::npos)
Pkg = Grp.FindPreferredPkg(true);
if (Pkg.end() == true)
continue;
}
pci->insert(Pkg);
showPackageSelection(Pkg, CacheSetHelper::FNMATCH, pattern);
found = true;
}
if (found == false) {
canNotFindPackage(CacheSetHelper::FNMATCH, pci, Cache, pattern);
pci->setConstructor(CacheSetHelper::UNKNOWN);
return false;
}
if (wasEmpty == false && pci->getConstructor() != CacheSetHelper::UNKNOWN)
pci->setConstructor(CacheSetHelper::UNKNOWN);
return true;
}
/*}}}*/
// PackageFromPackageName - Returns the package defined by this string /*{{{*/
bool CacheSetHelper::PackageFromPackageName(PackageContainerInterface * const pci, pkgCacheFile &Cache,
std::string pkg) {
if (unlikely(Cache.GetPkgCache() == 0))
return false;
std::string const pkgstring = pkg;
size_t const archfound = pkg.find_last_of(':');
std::string arch;
if (archfound != std::string::npos) {
arch = pkg.substr(archfound+1);
pkg.erase(archfound);
if (arch == "all" || arch == "native")
arch = _config->Find("APT::Architecture");
}
pkgCache::GrpIterator Grp = Cache.GetPkgCache()->FindGrp(pkg);
if (Grp.end() == false) {
if (arch.empty() == true) {
pkgCache::PkgIterator Pkg = Grp.FindPreferredPkg();
if (Pkg.end() == false)
{
pci->insert(Pkg);
return true;
}
} else {
bool found = false;
// for 'linux-any' return the first package matching, for 'linux-*' return all matches
bool const isGlobal = arch.find('*') != std::string::npos;
APT::CacheFilter::PackageArchitectureMatchesSpecification pams(arch);
for (pkgCache::PkgIterator Pkg = Grp.PackageList(); Pkg.end() == false; Pkg = Grp.NextPkg(Pkg)) {
if (pams(Pkg) == false)
continue;
pci->insert(Pkg);
found = true;
if (isGlobal == false)
break;
}
if (found == true)
return true;
}
}
pkgCache::PkgIterator Pkg = canNotFindPkgName(Cache, pkgstring);
if (Pkg.end() == true)
return false;
pci->insert(Pkg);
return true;
}
bool CacheSetHelper::PackageFromPattern(PackageContainerInterface *const pci, pkgCacheFile &Cache, std::string const &pattern)
{
if (pattern.size() < 1 || (pattern[0] != '?' && pattern[0] != '~'))
return false;
auto compiledPattern = APT::CacheFilter::ParsePattern(pattern, &Cache);
if (!compiledPattern)
return false;
for (pkgCache::PkgIterator Pkg = Cache->PkgBegin(); Pkg.end() == false; ++Pkg)
{
if ((*compiledPattern)(Pkg) == false)
continue;
pci->insert(Pkg);
}
return true;
}
/*}}}*/
// PackageFromString - Return all packages matching a specific string /*{{{*/
bool CacheSetHelper::PackageFromString(PackageContainerInterface * const pci, pkgCacheFile &Cache, std::string const &str) {
bool found = true;
_error->PushToStack();
if (PackageFrom(CacheSetHelper::PATTERN, pci, Cache, str) == false &&
PackageFrom(CacheSetHelper::PACKAGENAME, pci, Cache, str) == false &&
PackageFrom(CacheSetHelper::TASK, pci, Cache, str) == false &&
// FIXME: hm, hm, regexp/fnmatch incompatible?
PackageFrom(CacheSetHelper::FNMATCH, pci, Cache, str) == false &&
PackageFrom(CacheSetHelper::REGEX, pci, Cache, str) == false)
{
canNotFindPackage(CacheSetHelper::PACKAGENAME, pci, Cache, str);
found = false;
}
if (found == true)
_error->RevertToStack();
else
_error->MergeWithStack();
return found;
}
/*}}}*/
// PackageFromCommandLine - Return all packages specified on commandline /*{{{*/
bool CacheSetHelper::PackageFromCommandLine(PackageContainerInterface * const pci, pkgCacheFile &Cache, const char **cmdline) {
bool found = false;
for (const char **I = cmdline; *I != 0; ++I)
found |= PackageFrom(CacheSetHelper::STRING, pci, Cache, *I);
return found;
}
/*}}}*/
// FromModifierCommandLine - helper doing the work for PKG:GroupedFromCommandLine /*{{{*/
bool CacheSetHelper::PackageFromModifierCommandLine(unsigned short &modID, PackageContainerInterface * const pci,
pkgCacheFile &Cache, const char * cmdline,
std::list<PkgModifier> const &mods) {
std::string str = cmdline;
unsigned short fallback = modID;
bool modifierPresent = false;
for (std::list<PkgModifier>::const_iterator mod = mods.begin();
mod != mods.end(); ++mod) {
size_t const alength = strlen(mod->Alias);
switch(mod->Pos) {
case PkgModifier::POSTFIX:
if (str.compare(str.length() - alength, alength,
mod->Alias, 0, alength) != 0)
continue;
str.erase(str.length() - alength);
modID = mod->ID;
break;
case PkgModifier::PREFIX:
continue;
case PkgModifier::NONE:
continue;
}
modifierPresent = true;
break;
}
if (modifierPresent == true) {
bool const errors = showErrors(false);
bool const found = PackageFrom(PACKAGENAME, pci, Cache, cmdline);
showErrors(errors);
if (found == true) {
modID = fallback;
return true;
}
}
return PackageFrom(CacheSetHelper::PACKAGENAME, pci, Cache, str);
}
/*}}}*/
// FromModifierCommandLine - helper doing the work for VER:GroupedFromCommandLine /*{{{*/
bool VersionContainerInterface::FromModifierCommandLine(unsigned short &modID,
VersionContainerInterface * const vci,
pkgCacheFile &Cache, const char * cmdline,
std::list<Modifier> const &mods,
CacheSetHelper &helper) {
CacheSetHelper::VerSelector select = CacheSetHelper::NEWEST;
std::string str = cmdline;
if (unlikely(str.empty() == true))
return false;
bool modifierPresent = false;
unsigned short fallback = modID;
for (std::list<Modifier>::const_iterator mod = mods.begin();
mod != mods.end(); ++mod) {
if (modID == fallback && mod->ID == fallback)
select = mod->SelectVersion;
size_t const alength = strlen(mod->Alias);
switch(mod->Pos) {
case Modifier::POSTFIX:
if (str.length() <= alength ||
str.compare(str.length() - alength, alength, mod->Alias, 0, alength) != 0)
continue;
str.erase(str.length() - alength);
modID = mod->ID;
select = mod->SelectVersion;
break;
case Modifier::PREFIX:
continue;
case Modifier::NONE:
continue;
}
modifierPresent = true;
break;
}
if (modifierPresent == true) {
bool const errors = helper.showErrors(false);
bool const found = VersionContainerInterface::FromString(vci, Cache, cmdline, select, helper, true);
helper.showErrors(errors);
if (found == true) {
modID = fallback;
return true;
}
}
return FromString(vci, Cache, str, select, helper);
}
/*}}}*/
// FromCommandLine - Return all versions specified on commandline /*{{{*/
bool VersionContainerInterface::FromCommandLine(VersionContainerInterface * const vci,
pkgCacheFile &Cache, const char **cmdline,
CacheSetHelper::VerSelector const fallback,
CacheSetHelper &helper) {
bool found = false;
for (const char **I = cmdline; *I != 0; ++I)
found |= VersionContainerInterface::FromString(vci, Cache, *I, fallback, helper);
return found;
}
/*}}}*/
// FromString - Returns all versions spedcified by a string /*{{{*/
bool VersionContainerInterface::FromString(VersionContainerInterface * const vci,
pkgCacheFile &Cache, std::string pkg,
CacheSetHelper::VerSelector const fallback,
CacheSetHelper &helper,
bool const onlyFromName) {
std::string ver;
bool verIsRel = false;
size_t const vertag = pkg.find_last_of("/=");
if (vertag != std::string::npos) {
ver = pkg.substr(vertag+1);
verIsRel = (pkg[vertag] == '/');
pkg.erase(vertag);
}
PackageSet pkgset;
if (onlyFromName == false)
helper.PackageFrom(CacheSetHelper::STRING, &pkgset, Cache, pkg);
else {
helper.PackageFrom(CacheSetHelper::PACKAGENAME, &pkgset, Cache, pkg);
}
bool errors = true;
if (pkgset.getConstructor() != CacheSetHelper::UNKNOWN)
errors = helper.showErrors(false);
bool found = false;
for (PackageSet::const_iterator P = pkgset.begin();
P != pkgset.end(); ++P) {
if (vertag == std::string::npos) {
found |= VersionContainerInterface::FromPackage(vci, Cache, P, fallback, helper);
continue;
}
pkgCache::VerIterator V;
if (ver == "installed")
V = getInstalledVer(Cache, P, helper);
else if (ver == "candidate")
V = getCandidateVer(Cache, P, helper);
else if (ver == "newest") {
if (P->VersionList != 0)
V = P.VersionList();
else
V = helper.canNotGetVersion(CacheSetHelper::NEWEST, Cache, P);
} else {
pkgVersionMatch Match(ver, (verIsRel == true ? pkgVersionMatch::Release :
pkgVersionMatch::Version));
V = Match.Find(P);
if (V.end() == true) {
if (verIsRel == true)
_error->Error(_("Release '%s' for '%s' was not found"),
ver.c_str(), P.FullName(true).c_str());
else
_error->Error(_("Version '%s' for '%s' was not found"),
ver.c_str(), P.FullName(true).c_str());
continue;
}
}
if (V.end() == true)
continue;
if (verIsRel == true)
helper.showVersionSelection(P, V, CacheSetHelper::RELEASE, ver);
else
helper.showVersionSelection(P, V, CacheSetHelper::VERSIONNUMBER, ver);
vci->insert(V);
found = true;
}
if (pkgset.getConstructor() != CacheSetHelper::UNKNOWN)
helper.showErrors(errors);
return found;
}
/*}}}*/
// FromPackage - versions from package based on fallback /*{{{*/
bool VersionContainerInterface::FromPackage(VersionContainerInterface * const vci,
pkgCacheFile &Cache,
pkgCache::PkgIterator const &P,
CacheSetHelper::VerSelector const fallback,
CacheSetHelper &helper) {
pkgCache::VerIterator V;
bool showErrors;
bool found = false;
switch(fallback) {
case CacheSetHelper::ALL:
if (P->VersionList != 0)
for (V = P.VersionList(); V.end() != true; ++V)
found |= vci->insert(V);
else
helper.canNotFindVersion(CacheSetHelper::ALL, vci, Cache, P);
break;
case CacheSetHelper::CANDANDINST:
found |= vci->insert(getInstalledVer(Cache, P, helper));
found |= vci->insert(getCandidateVer(Cache, P, helper));
break;
case CacheSetHelper::CANDIDATE:
found |= vci->insert(getCandidateVer(Cache, P, helper));
break;
case CacheSetHelper::INSTALLED:
found |= vci->insert(getInstalledVer(Cache, P, helper));
break;
case CacheSetHelper::CANDINST:
showErrors = helper.showErrors(false);
V = getCandidateVer(Cache, P, helper);
if (V.end() == true)
V = getInstalledVer(Cache, P, helper);
helper.showErrors(showErrors);
if (V.end() == false)
found |= vci->insert(V);
else
helper.canNotFindVersion(CacheSetHelper::CANDINST, vci, Cache, P);
break;
case CacheSetHelper::INSTCAND:
showErrors = helper.showErrors(false);
V = getInstalledVer(Cache, P, helper);
if (V.end() == true)
V = getCandidateVer(Cache, P, helper);
helper.showErrors(showErrors);
if (V.end() == false)
found |= vci->insert(V);
else
helper.canNotFindVersion(CacheSetHelper::INSTCAND, vci, Cache, P);
break;
case CacheSetHelper::NEWEST:
if (P->VersionList != 0)
found |= vci->insert(P.VersionList());
else
helper.canNotFindVersion(CacheSetHelper::NEWEST, vci, Cache, P);
break;
case CacheSetHelper::RELEASE:
case CacheSetHelper::VERSIONNUMBER:
// both make no sense here, so always false
return false;
}
return found;
}
/*}}}*/
// FromDependency - versions satisfying a given dependency /*{{{*/
bool VersionContainerInterface::FromDependency(VersionContainerInterface * const vci,
pkgCacheFile &Cache,
pkgCache::DepIterator const &D,
CacheSetHelper::VerSelector const selector,
CacheSetHelper &helper)
{
bool found = false;
switch(selector) {
case CacheSetHelper::ALL:
{
pkgCache::PkgIterator const T = D.TargetPkg();
for (pkgCache::VerIterator Ver = T.VersionList(); Ver.end() == false; ++Ver)
{
if (D.IsSatisfied(Ver) == true)
{
vci->insert(Ver);
found = true;
}
for (pkgCache::PrvIterator Prv = T.ProvidesList(); Prv.end() != true; ++Prv)
{
pkgCache::VerIterator const V = Prv.OwnerVer();
if (unlikely(V.end() == true) || D.IsSatisfied(Prv) == false)
continue;
vci->insert(V);
found = true;
}
}
return found;
}
case CacheSetHelper::CANDANDINST:
{
found = FromDependency(vci, Cache, D, CacheSetHelper::CANDIDATE, helper);
found &= FromDependency(vci, Cache, D, CacheSetHelper::INSTALLED, helper);
return found;
}
case CacheSetHelper::CANDIDATE:
{
pkgCache::PkgIterator const T = D.TargetPkg();
pkgCache::VerIterator const Cand = Cache[T].CandidateVerIter(Cache);
if (Cand.end() == false && D.IsSatisfied(Cand) == true)
{
vci->insert(Cand);
found = true;
}
for (pkgCache::PrvIterator Prv = T.ProvidesList(); Prv.end() != true; ++Prv)
{
pkgCache::VerIterator const V = Prv.OwnerVer();
pkgCache::VerIterator const Cand = Cache[Prv.OwnerPkg()].CandidateVerIter(Cache);
if (Cand.end() == true || V != Cand || D.IsSatisfied(Prv) == false)
continue;
vci->insert(Cand);
found = true;
}
return found;
}
case CacheSetHelper::INSTALLED:
{
pkgCache::PkgIterator const T = D.TargetPkg();
pkgCache::VerIterator const Cand = T.CurrentVer();
if (Cand.end() == false && D.IsSatisfied(Cand) == true)
{
vci->insert(Cand);
found = true;
}
for (pkgCache::PrvIterator Prv = T.ProvidesList(); Prv.end() != true; ++Prv)
{
pkgCache::VerIterator const V = Prv.OwnerVer();
pkgCache::VerIterator const Cand = Prv.OwnerPkg().CurrentVer();
if (Cand.end() == true || V != Cand || D.IsSatisfied(Prv) == false)
continue;
vci->insert(Cand);
found = true;
}
return found;
}
case CacheSetHelper::CANDINST:
return FromDependency(vci, Cache, D, CacheSetHelper::CANDIDATE, helper) ||
FromDependency(vci, Cache, D, CacheSetHelper::INSTALLED, helper);
case CacheSetHelper::INSTCAND:
return FromDependency(vci, Cache, D, CacheSetHelper::INSTALLED, helper) ||
FromDependency(vci, Cache, D, CacheSetHelper::CANDIDATE, helper);
case CacheSetHelper::NEWEST:
{
pkgCache::PkgIterator const T = D.TargetPkg();
pkgCache::VerIterator const Cand = T.VersionList();
if (Cand.end() == false && D.IsSatisfied(Cand) == true)
{
vci->insert(Cand);
found = true;
}
for (pkgCache::PrvIterator Prv = T.ProvidesList(); Prv.end() != true; ++Prv)
{
pkgCache::VerIterator const V = Prv.OwnerVer();
pkgCache::VerIterator const Cand = Prv.OwnerPkg().VersionList();
if (Cand.end() == true || V != Cand || D.IsSatisfied(Prv) == false)
continue;
vci->insert(Cand);
found = true;
}
return found;
}
case CacheSetHelper::RELEASE:
case CacheSetHelper::VERSIONNUMBER:
// both make no sense here, so always false
return false;
}
return found;
}
/*}}}*/
// getCandidateVer - Returns the candidate version of the given package /*{{{*/
pkgCache::VerIterator VersionContainerInterface::getCandidateVer(pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg, CacheSetHelper &helper) {
pkgCache::VerIterator Cand;
if (Cache.IsDepCacheBuilt() == true) {
Cand = Cache[Pkg].CandidateVerIter(Cache);
} else if (unlikely(Cache.GetPolicy() == nullptr)) {
return pkgCache::VerIterator(Cache);
} else {
Cand = Cache.GetPolicy()->GetCandidateVer(Pkg);
}
if (Cand.end() == true)
return helper.canNotGetVersion(CacheSetHelper::CANDIDATE, Cache, Pkg);
return Cand;
}
/*}}}*/
// getInstalledVer - Returns the installed version of the given package /*{{{*/
pkgCache::VerIterator VersionContainerInterface::getInstalledVer(pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg, CacheSetHelper &helper) {
if (Pkg->CurrentVer == 0)
return helper.canNotGetVersion(CacheSetHelper::INSTALLED, Cache, Pkg);
return Pkg.CurrentVer();
}
/*}}}*/
// canNotFindPackage - with the given selector and pattern /*{{{*/
void CacheSetHelper::canNotFindPackage(enum PkgSelector const select,
PackageContainerInterface * const pci, pkgCacheFile &Cache,
std::string const &pattern) {
switch (select) {
case REGEX: canNotFindRegEx(pci, Cache, pattern); break;
case TASK: canNotFindTask(pci, Cache, pattern); break;
case FNMATCH: canNotFindFnmatch(pci, Cache, pattern); break;
case PACKAGENAME: canNotFindPackage(pci, Cache, pattern); break;
case STRING: canNotFindPackage(pci, Cache, pattern); break;
case PATTERN: canNotFindPackage(pci, Cache, pattern); break;
case UNKNOWN: break;
}
}
// canNotFindTask - handle the case no package is found for a task /*{{{*/
void CacheSetHelper::canNotFindTask(PackageContainerInterface * const /*pci*/, pkgCacheFile &/*Cache*/, std::string pattern) {
if (ShowError == true)
_error->Insert(ErrorType, _("Couldn't find task '%s'"), pattern.c_str());
}
/*}}}*/
// canNotFindRegEx - handle the case no package is found by a regex /*{{{*/
void CacheSetHelper::canNotFindRegEx(PackageContainerInterface * const /*pci*/, pkgCacheFile &/*Cache*/, std::string pattern) {
if (ShowError == true)
_error->Insert(ErrorType, _("Couldn't find any package by regex '%s'"), pattern.c_str());
}
/*}}}*/
// canNotFindFnmatch - handle the case no package is found by a fnmatch /*{{{*/
void CacheSetHelper::canNotFindFnmatch(PackageContainerInterface * const /*pci*/, pkgCacheFile &/*Cache*/, std::string pattern) {
if (ShowError == true)
_error->Insert(ErrorType, _("Couldn't find any package by glob '%s'"), pattern.c_str());
}
/*}}}*/
// canNotFindPackage - handle the case no package is found from a string/*{{{*/
void CacheSetHelper::canNotFindPackage(PackageContainerInterface * const /*pci*/, pkgCacheFile &/*Cache*/, std::string const &/*str*/) {
}
/*}}}*/
/*}}}*/
// canNotFindPkgName - handle the case no package has this name /*{{{*/
pkgCache::PkgIterator CacheSetHelper::canNotFindPkgName(pkgCacheFile &Cache,
std::string const &str) {
if (ShowError == true)
_error->Insert(ErrorType, _("Unable to locate package %s"), str.c_str());
return pkgCache::PkgIterator(Cache, 0);
}
/*}}}*/
// canNotFindVersion - for package by selector /*{{{*/
void CacheSetHelper::canNotFindVersion(enum VerSelector const select, VersionContainerInterface * const vci, pkgCacheFile &Cache, pkgCache::PkgIterator const &Pkg)
{
switch (select) {
case ALL: canNotFindAllVer(vci, Cache, Pkg); break;
case INSTCAND: canNotFindInstCandVer(vci, Cache, Pkg); break;
case CANDINST: canNotFindCandInstVer(vci, Cache, Pkg); break;
case NEWEST: canNotFindNewestVer(Cache, Pkg); break;
case CANDIDATE: canNotFindCandidateVer(Cache, Pkg); break;
case INSTALLED: canNotFindInstalledVer(Cache, Pkg); break;
case CANDANDINST: canNotGetCandInstVer(Cache, Pkg); break;
case RELEASE:
case VERSIONNUMBER:
// invalid in this branch
break;
}
}
// canNotFindAllVer /*{{{*/
void CacheSetHelper::canNotFindAllVer(VersionContainerInterface * const /*vci*/, pkgCacheFile &/*Cache*/,
pkgCache::PkgIterator const &Pkg) {
if (ShowError == true)
_error->Insert(ErrorType, _("Can't select versions from package '%s' as it is purely virtual"), Pkg.FullName(true).c_str());
}
/*}}}*/
// canNotFindInstCandVer /*{{{*/
void CacheSetHelper::canNotFindInstCandVer(VersionContainerInterface * const /*vci*/, pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg) {
canNotGetInstCandVer(Cache, Pkg);
}
/*}}}*/
// canNotFindInstCandVer /*{{{*/
void CacheSetHelper::canNotFindCandInstVer(VersionContainerInterface * const /*vci*/, pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg) {
canNotGetCandInstVer(Cache, Pkg);
}
/*}}}*/
/*}}}*/
// canNotGetVersion - for package by selector /*{{{*/
pkgCache::VerIterator CacheSetHelper::canNotGetVersion(enum VerSelector const select, pkgCacheFile &Cache, pkgCache::PkgIterator const &Pkg) {
switch (select) {
case NEWEST: return canNotFindNewestVer(Cache, Pkg);
case CANDIDATE: return canNotFindCandidateVer(Cache, Pkg);
case INSTALLED: return canNotFindInstalledVer(Cache, Pkg);
case CANDINST: return canNotGetCandInstVer(Cache, Pkg);
case INSTCAND: return canNotGetInstCandVer(Cache, Pkg);
case ALL:
case CANDANDINST:
case RELEASE:
case VERSIONNUMBER:
// invalid in this branch
return pkgCache::VerIterator(Cache, 0);
}
return pkgCache::VerIterator(Cache, 0);
}
// canNotFindNewestVer /*{{{*/
pkgCache::VerIterator CacheSetHelper::canNotFindNewestVer(pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg) {
if (ShowError == true)
_error->Insert(ErrorType, _("Can't select newest version from package '%s' as it is purely virtual"), Pkg.FullName(true).c_str());
return pkgCache::VerIterator(Cache, 0);
}
/*}}}*/
// canNotFindCandidateVer /*{{{*/
pkgCache::VerIterator CacheSetHelper::canNotFindCandidateVer(pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg) {
if (ShowError == true)
_error->Insert(ErrorType, _("Can't select candidate version from package %s as it has no candidate"), Pkg.FullName(true).c_str());
return pkgCache::VerIterator(Cache, 0);
}
/*}}}*/
// canNotFindInstalledVer /*{{{*/
pkgCache::VerIterator CacheSetHelper::canNotFindInstalledVer(pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg) {
if (ShowError == true)
_error->Insert(ErrorType, _("Can't select installed version from package %s as it is not installed"), Pkg.FullName(true).c_str());
return pkgCache::VerIterator(Cache, 0);
}
/*}}}*/
// canNotFindInstCandVer /*{{{*/
pkgCache::VerIterator CacheSetHelper::canNotGetInstCandVer(pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg) {
if (ShowError == true)
_error->Insert(ErrorType, _("Can't select installed nor candidate version from package '%s' as it has neither of them"), Pkg.FullName(true).c_str());
return pkgCache::VerIterator(Cache, 0);
}
/*}}}*/
// canNotFindInstCandVer /*{{{*/
pkgCache::VerIterator CacheSetHelper::canNotGetCandInstVer(pkgCacheFile &Cache,
pkgCache::PkgIterator const &Pkg) {
if (ShowError == true)
_error->Insert(ErrorType, _("Can't select installed nor candidate version from package '%s' as it has neither of them"), Pkg.FullName(true).c_str());
return pkgCache::VerIterator(Cache, 0);
}
/*}}}*/
/*}}}*/
// showPackageSelection - by selector and given pattern /*{{{*/
void CacheSetHelper::showPackageSelection(pkgCache::PkgIterator const &pkg, enum PkgSelector const select,
std::string const &pattern) {
switch (select) {
case REGEX: showRegExSelection(pkg, pattern); break;
case TASK: showTaskSelection(pkg, pattern); break;
case FNMATCH: showFnmatchSelection(pkg, pattern); break;
case PATTERN: showPatternSelection(pkg, pattern); break;
case PACKAGENAME: /* no surprises here */ break;
case STRING: /* handled by the special cases */ break;
case UNKNOWN: break;
}
}
// showTaskSelection /*{{{*/
void CacheSetHelper::showTaskSelection(pkgCache::PkgIterator const &/*pkg*/,
std::string const &/*pattern*/) {
}
/*}}}*/
// showRegExSelection /*{{{*/
void CacheSetHelper::showRegExSelection(pkgCache::PkgIterator const &/*pkg*/,
std::string const &/*pattern*/) {
}
/*}}}*/
// showFnmatchSelection /*{{{*/
void CacheSetHelper::showFnmatchSelection(pkgCache::PkgIterator const &/*pkg*/,
std::string const &/*pattern*/) {
}
/*}}}*/
// showPatternSelection /*{{{*/
void CacheSetHelper::showPatternSelection(pkgCache::PkgIterator const & /*pkg*/,
std::string const & /*pattern*/)
{
}
/*}}}*/
/*}}}*/
// showVersionSelection /*{{{*/
void CacheSetHelper::showVersionSelection(pkgCache::PkgIterator const &Pkg,
pkgCache::VerIterator const &Ver, enum VerSelector const select, std::string const &pattern) {
switch (select) {
case RELEASE:
showSelectedVersion(Pkg, Ver, pattern, true);
break;
case VERSIONNUMBER:
showSelectedVersion(Pkg, Ver, pattern, false);
break;
case NEWEST:
case CANDIDATE:
case INSTALLED:
case CANDINST:
case INSTCAND:
case ALL:
case CANDANDINST:
// not really surprises, but in fact: just not implemented
break;
}
}
void CacheSetHelper::showSelectedVersion(pkgCache::PkgIterator const &/*Pkg*/,
pkgCache::VerIterator const /*Ver*/,
std::string const &/*ver*/,
bool const /*verIsRel*/) {
}
/*}}}*/
CacheSetHelper::CacheSetHelper(bool const ShowError, GlobalError::MsgType ErrorType) :
ShowError(ShowError), ErrorType(ErrorType), d(NULL) {}
CacheSetHelper::~CacheSetHelper() {}
PackageContainerInterface::PackageContainerInterface() : ConstructedBy(CacheSetHelper::UNKNOWN), d(NULL) {}
PackageContainerInterface::PackageContainerInterface(PackageContainerInterface const &by) : PackageContainerInterface() { *this = by; }
PackageContainerInterface::PackageContainerInterface(CacheSetHelper::PkgSelector const by) : ConstructedBy(by), d(NULL) {}
PackageContainerInterface& PackageContainerInterface::operator=(PackageContainerInterface const &other) {
if (this != &other)
this->ConstructedBy = other.ConstructedBy;
return *this;
}
PackageContainerInterface::~PackageContainerInterface() {}
PackageUniverse::PackageUniverse(pkgCache * const Owner) : _cont(Owner), d(NULL) {}
PackageUniverse::PackageUniverse(pkgCacheFile * const Owner) : _cont(Owner->GetPkgCache()), d(NULL) {}
PackageUniverse::~PackageUniverse() {}
VersionContainerInterface::VersionContainerInterface() : d(NULL) {}
VersionContainerInterface::VersionContainerInterface(VersionContainerInterface const &other) : VersionContainerInterface() {
*this = other;
};
VersionContainerInterface& VersionContainerInterface::operator=(VersionContainerInterface const &) {
return *this;
}
VersionContainerInterface::~VersionContainerInterface() {}
}

1067
apt-pkg/cacheset.h Normal file

File diff suppressed because it is too large Load Diff

994
apt-pkg/cdrom.cc Normal file
View File

@ -0,0 +1,994 @@
/*
*/
#include <config.h>
#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/cdrom.h>
#include <apt-pkg/cdromutl.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/indexcopy.h>
#include <apt-pkg/strutl.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <dirent.h>
#include <dlfcn.h>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <apti18n.h>
#ifdef HAVE_UDEV
#include <libudev.h>
#endif
using namespace std;
// FindPackages - Find the package files on the CDROM /*{{{*/
// ---------------------------------------------------------------------
/* We look over the cdrom for package files. This is a recursive
search that short circuits when it his a package file in the dir.
This speeds it up greatly as the majority of the size is in the
binary-* sub dirs. */
bool pkgCdrom::FindPackages(string CD,
vector<string> &List,
vector<string> &SList,
vector<string> &SigList,
vector<string> &TransList,
string &InfoDir, pkgCdromStatus *log,
unsigned int Depth)
{
static ino_t Inodes[9];
DIR *D;
// if we have a look we "pulse" now
if(log)
log->Update();
if (Depth >= 7)
return true;
if (CD[CD.length()-1] != '/')
CD += '/';
if (chdir(CD.c_str()) != 0)
return _error->Errno("chdir","Unable to change to %s",CD.c_str());
// Look for a .disk subdirectory
if (InfoDir.empty() == true)
{
if (DirectoryExists(".disk") == true)
InfoDir = InfoDir + CD + ".disk/";
}
// Don't look into directories that have been marked to ignore.
if (RealFileExists(".aptignr") == true)
return true;
/* Check _first_ for a signature file as apt-cdrom assumes that all files
under a Packages/Source file are in control of that file and stops
the scanning
*/
if (RealFileExists("Release.gpg") == true || RealFileExists("InRelease") == true)
{
SigList.push_back(CD);
}
/* Aha! We found some package files. We assume that everything under
this dir is controlled by those package files so we don't look down
anymore */
std::vector<APT::Configuration::Compressor> const compressor = APT::Configuration::getCompressors();
for (std::vector<APT::Configuration::Compressor>::const_iterator c = compressor.begin();
c != compressor.end(); ++c)
{
if (RealFileExists(std::string("Packages").append(c->Extension).c_str()) == false)
continue;
if (_config->FindB("Debug::aptcdrom",false) == true)
std::clog << "Found Packages in " << CD << std::endl;
List.push_back(CD);
// Continue down if thorough is given
if (_config->FindB("APT::CDROM::Thorough",false) == false)
return true;
break;
}
for (std::vector<APT::Configuration::Compressor>::const_iterator c = compressor.begin();
c != compressor.end(); ++c)
{
if (RealFileExists(std::string("Sources").append(c->Extension).c_str()) == false)
continue;
if (_config->FindB("Debug::aptcdrom",false) == true)
std::clog << "Found Sources in " << CD << std::endl;
SList.push_back(CD);
// Continue down if thorough is given
if (_config->FindB("APT::CDROM::Thorough",false) == false)
return true;
break;
}
// see if we find translation indices
if (DirectoryExists("i18n") == true)
{
D = opendir("i18n");
for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D))
{
if(strncmp(Dir->d_name, "Translation-", strlen("Translation-")) != 0)
continue;
string file = Dir->d_name;
for (std::vector<APT::Configuration::Compressor>::const_iterator c = compressor.begin();
c != compressor.end(); ++c)
{
string fileext = flExtension(file);
if (file == fileext)
fileext.clear();
else if (fileext.empty() == false)
fileext = "." + fileext;
if (c->Extension == fileext)
{
if (_config->FindB("Debug::aptcdrom",false) == true)
std::clog << "Found translation " << Dir->d_name << " in " << CD << "i18n/" << std::endl;
file.erase(file.size() - fileext.size());
TransList.push_back(CD + "i18n/" + file);
break;
}
}
}
closedir(D);
}
D = opendir(".");
if (D == 0)
return _error->Errno("opendir","Unable to read %s",CD.c_str());
// Run over the directory
for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D))
{
// Skip some files..
if (strcmp(Dir->d_name,".") == 0 ||
strcmp(Dir->d_name,"..") == 0 ||
strcmp(Dir->d_name,".disk") == 0 ||
strcmp(Dir->d_name,"debian-installer") == 0)
continue;
// See if the name is a sub directory
struct stat Buf;
if (stat(Dir->d_name,&Buf) != 0)
continue;
if (S_ISDIR(Buf.st_mode) == 0)
continue;
unsigned int I;
for (I = 0; I != Depth; I++)
if (Inodes[I] == Buf.st_ino)
break;
if (I != Depth)
continue;
// Store the inodes weve seen
Inodes[Depth] = Buf.st_ino;
// Descend
if (FindPackages(CD + Dir->d_name,List,SList,SigList,TransList,InfoDir,log,Depth+1) == false)
break;
if (chdir(CD.c_str()) != 0)
{
_error->Errno("chdir","Unable to change to %s", CD.c_str());
closedir(D);
return false;
}
};
closedir(D);
return !_error->PendingError();
}
/*}}}*/
// Score - We compute a 'score' for a path /*{{{*/
// ---------------------------------------------------------------------
/* Paths are scored based on how close they come to what I consider
normal. That is ones that have 'dist' 'stable' 'testing' will score
higher than ones without. */
int pkgCdrom::Score(string Path)
{
int Res = 0;
if (Path.find("stable/") != string::npos)
Res += 29;
if (Path.find("/binary-") != string::npos)
Res += 20;
if (Path.find("testing/") != string::npos)
Res += 28;
if (Path.find("unstable/") != string::npos)
Res += 27;
if (Path.find("/dists/") != string::npos)
Res += 40;
if (Path.find("/main/") != string::npos)
Res += 20;
if (Path.find("/contrib/") != string::npos)
Res += 20;
if (Path.find("/non-free/") != string::npos)
Res += 20;
if (Path.find("/non-US/") != string::npos)
Res += 20;
if (Path.find("/source/") != string::npos)
Res += 10;
if (Path.find("/debian/") != string::npos)
Res -= 10;
// check for symlinks in the patch leading to the actual file
// a symlink gets a big penalty
struct stat Buf;
string statPath = flNotFile(Path);
string cdromPath = _config->FindDir("Acquire::cdrom::mount");
while(statPath != cdromPath && statPath != "./") {
statPath.resize(statPath.size()-1); // remove the trailing '/'
if (lstat(statPath.c_str(),&Buf) == 0) {
if(S_ISLNK(Buf.st_mode)) {
Res -= 60;
break;
}
}
statPath = flNotFile(statPath); // descent
}
return Res;
}
/*}}}*/
// DropBinaryArch - Dump dirs with a string like /binary-<foo>/ /*{{{*/
// ---------------------------------------------------------------------
/* Here we drop everything that is not this machines arch */
bool pkgCdrom::DropBinaryArch(vector<string> &List)
{
for (unsigned int I = 0; I < List.size(); I++)
{
const char *Str = List[I].c_str();
const char *Start, *End;
if ((Start = strstr(Str,"/binary-")) == 0)
continue;
// Between Start and End is the architecture
Start += 8;
if ((End = strstr(Start,"/")) != 0 && Start != End &&
APT::Configuration::checkArchitecture(string(Start, End)) == true)
continue; // okay, architecture is accepted
// not accepted -> Erase it
List.erase(List.begin() + I);
--I; // the next entry is at the same index after the erase
}
return true;
}
/*}}}*/
// DropTranslation - Dump unwanted Translation-<lang> files /*{{{*/
// ---------------------------------------------------------------------
/* Here we drop everything that is not configured in Acquire::Languages */
bool pkgCdrom::DropTranslation(vector<string> &List)
{
for (unsigned int I = 0; I < List.size(); I++)
{
const char *Start;
if ((Start = strstr(List[I].c_str(), "/Translation-")) == NULL)
continue;
Start += strlen("/Translation-");
if (APT::Configuration::checkLanguage(Start, true) == true)
continue;
// not accepted -> Erase it
List.erase(List.begin() + I);
--I; // the next entry is at the same index after the erase
}
return true;
}
/*}}}*/
// DropRepeats - Drop repeated files resulting from symlinks /*{{{*/
// ---------------------------------------------------------------------
/* Here we go and stat every file that we found and strip dup inodes. */
bool pkgCdrom::DropRepeats(vector<string> &List,const char *Name)
{
bool couldFindAllFiles = true;
// Get a list of all the inodes
ino_t *Inodes = new ino_t[List.size()];
for (unsigned int I = 0; I != List.size(); ++I)
{
struct stat Buf;
bool found = false;
std::vector<APT::Configuration::Compressor> const compressor = APT::Configuration::getCompressors();
for (std::vector<APT::Configuration::Compressor>::const_iterator c = compressor.begin();
c != compressor.end(); ++c)
{
std::string const filename = List[I] + Name + c->Extension;
if (stat(filename.c_str(), &Buf) != 0)
continue;
Inodes[I] = Buf.st_ino;
found = true;
break;
}
if (found == false)
{
_error->Errno("stat","Failed to stat %s%s",List[I].c_str(), Name);
couldFindAllFiles = false;
Inodes[I] = 0;
}
}
// Look for dups
for (unsigned int I = 0; I != List.size(); I++)
{
if (Inodes[I] == 0)
continue;
for (unsigned int J = I+1; J < List.size(); J++)
{
// No match
if (Inodes[J] == 0 || Inodes[J] != Inodes[I])
continue;
// We score the two paths.. and erase one
int ScoreA = Score(List[I]);
int ScoreB = Score(List[J]);
if (ScoreA < ScoreB)
{
List[I] = string();
break;
}
List[J] = string();
}
}
delete[] Inodes;
// Wipe erased entries
for (unsigned int I = 0; I < List.size();)
{
if (List[I].empty() == false)
I++;
else
List.erase(List.begin()+I);
}
return couldFindAllFiles;
}
/*}}}*/
// ReduceSourceList - Takes the path list and reduces it /*{{{*/
// ---------------------------------------------------------------------
/* This takes the list of source list expressed entries and collects
similar ones to form a single entry for each dist */
void pkgCdrom::ReduceSourcelist(string /*CD*/,vector<string> &List)
{
sort(List.begin(),List.end());
// Collect similar entries
for (vector<string>::iterator I = List.begin(); I != List.end(); ++I)
{
// Find a space..
string::size_type Space = (*I).find(' ');
if (Space == string::npos)
continue;
string::size_type SSpace = (*I).find(' ',Space + 1);
if (SSpace == string::npos)
continue;
string Word1 = string(*I,Space,SSpace-Space);
string Prefix = string(*I,0,Space);
string Component = string(*I,SSpace);
for (vector<string>::iterator J = List.begin(); J != I; ++J)
{
// Find a space..
string::size_type Space2 = (*J).find(' ');
if (Space2 == string::npos)
continue;
string::size_type SSpace2 = (*J).find(' ',Space2 + 1);
if (SSpace2 == string::npos)
continue;
if (string(*J,0,Space2) != Prefix)
continue;
if (string(*J,Space2,SSpace2-Space2) != Word1)
continue;
string Component2 = string(*J, SSpace2) + " ";
if (Component2.find(Component + " ") == std::string::npos)
*J += Component;
I->clear();
}
}
// Wipe erased entries
for (unsigned int I = 0; I < List.size();)
{
if (List[I].empty() == false)
I++;
else
List.erase(List.begin()+I);
}
}
/*}}}*/
// WriteDatabase - Write the CDROM Database file /*{{{*/
// ---------------------------------------------------------------------
/* We rewrite the configuration class associated with the cdrom database. */
bool pkgCdrom::WriteDatabase(Configuration &Cnf)
{
string DFile = _config->FindFile("Dir::State::cdroms");
string NewFile = DFile + ".new";
RemoveFile("WriteDatabase", NewFile);
ofstream Out(NewFile.c_str());
if (!Out)
return _error->Errno("ofstream::ofstream",
"Failed to open %s.new",DFile.c_str());
/* Write out all of the configuration directives by walking the
configuration tree */
Cnf.Dump(Out, NULL, "%F \"%v\";\n", false);
Out.close();
if (FileExists(DFile) == true)
rename(DFile.c_str(), (DFile + '~').c_str());
if (rename(NewFile.c_str(),DFile.c_str()) != 0)
return _error->Errno("rename","Failed to rename %s.new to %s",
DFile.c_str(),DFile.c_str());
return true;
}
/*}}}*/
// WriteSourceList - Write an updated sourcelist /*{{{*/
// ---------------------------------------------------------------------
/* This reads the old source list and copies it into the new one. It
appends the new CDROM entries just after the first block of comments.
This places them first in the file. It also removes any old entries
that were the same. */
bool pkgCdrom::WriteSourceList(string Name,vector<string> &List,bool Source)
{
if (List.empty() == true)
return true;
string File = _config->FindFile("Dir::Etc::sourcelist");
// Open the stream for reading
ifstream F((FileExists(File)?File.c_str():"/dev/null"),
ios::in );
if (F.fail() == true)
return _error->Errno("ifstream::ifstream","Opening %s",File.c_str());
string NewFile = File + ".new";
RemoveFile("WriteDatabase", NewFile);
ofstream Out(NewFile.c_str());
if (!Out)
return _error->Errno("ofstream::ofstream",
"Failed to open %s.new",File.c_str());
// Create a short uri without the path
string ShortURI = "cdrom:[" + Name + "]/";
string ShortURI2 = "cdrom:" + Name + "/"; // For Compatibility
string Type;
if (Source == true)
Type = "deb-src";
else
Type = "deb";
char Buffer[300];
int CurLine = 0;
bool First = true;
while (F.eof() == false)
{
F.getline(Buffer,sizeof(Buffer));
CurLine++;
if (F.fail() && !F.eof())
return _error->Error(_("Line %u too long in source list %s."),
CurLine,File.c_str());
_strtabexpand(Buffer,sizeof(Buffer));
_strstrip(Buffer);
// Comment or blank
if (Buffer[0] == '#' || Buffer[0] == 0)
{
Out << Buffer << endl;
continue;
}
if (First == true)
{
for (vector<string>::iterator I = List.begin(); I != List.end(); ++I)
{
string::size_type Space = (*I).find(' ');
if (Space == string::npos)
return _error->Error("Internal error");
Out << Type << " cdrom:[" << Name << "]/" << string(*I,0,Space) <<
" " << string(*I,Space+1) << endl;
}
}
First = false;
// Grok it
string cType;
string URI;
const char *C = Buffer;
if (ParseQuoteWord(C,cType) == false ||
ParseQuoteWord(C,URI) == false)
{
Out << Buffer << endl;
continue;
}
// Emit lines like this one
if (cType != Type || (string(URI,0,ShortURI.length()) != ShortURI &&
string(URI,0,ShortURI.length()) != ShortURI2))
{
Out << Buffer << endl;
continue;
}
}
// Just in case the file was empty
if (First == true)
{
for (vector<string>::iterator I = List.begin(); I != List.end(); ++I)
{
string::size_type Space = (*I).find(' ');
if (Space == string::npos)
return _error->Error("Internal error");
Out << "deb cdrom:[" << Name << "]/" << string(*I,0,Space) <<
" " << string(*I,Space+1) << endl;
}
}
Out.close();
rename(File.c_str(), (File + '~').c_str());
if (rename(NewFile.c_str(),File.c_str()) != 0)
return _error->Errno("rename","Failed to rename %s.new to %s",
File.c_str(),File.c_str());
return true;
}
/*}}}*/
bool pkgCdrom::UnmountCDROM(std::string const &CDROM, pkgCdromStatus * const log)/*{{{*/
{
if (_config->FindB("APT::CDROM::NoMount",false) == true)
return true;
if (log != NULL)
log->Update(_("Unmounting CD-ROM...\n"), STEP_LAST);
return UnmountCdrom(CDROM);
}
/*}}}*/
bool pkgCdrom::MountAndIdentCDROM(Configuration &Database, std::string &CDROM, std::string &ident, pkgCdromStatus * const log, bool const interactive)/*{{{*/
{
// Startup
CDROM = _config->FindDir("Acquire::cdrom::mount");
if (CDROM[0] == '.')
CDROM= SafeGetCWD() + '/' + CDROM;
if (log != NULL)
{
string msg;
log->SetTotal(STEP_LAST);
strprintf(msg, _("Using CD-ROM mount point %s\n"), CDROM.c_str());
log->Update(msg, STEP_PREPARE);
}
// Unmount the CD and get the user to put in the one they want
if (_config->FindB("APT::CDROM::NoMount", false) == false)
{
if (interactive == true)
{
UnmountCDROM(CDROM, log);
if(log != NULL)
{
log->Update(_("Waiting for disc...\n"), STEP_WAIT);
if(!log->ChangeCdrom()) {
// user aborted
return false;
}
}
}
// Mount the new CDROM
if(log != NULL)
log->Update(_("Mounting CD-ROM...\n"), STEP_MOUNT);
if (MountCdrom(CDROM) == false)
return _error->Error("Failed to mount the cdrom.");
}
if (IsMounted(CDROM) == false)
return _error->Error("Failed to mount the cdrom.");
// Hash the CD to get an ID
if (log != NULL)
log->Update(_("Identifying... "), STEP_IDENT);
if (IdentCdrom(CDROM,ident) == false)
{
ident = "";
if (log != NULL)
log->Update("\n");
UnmountCDROM(CDROM, NULL);
return false;
}
if (log != NULL)
{
string msg;
strprintf(msg, "[%s]\n", ident.c_str());
log->Update(msg);
}
// Read the database
string DFile = _config->FindFile("Dir::State::cdroms");
if (FileExists(DFile) == true)
{
if (ReadConfigFile(Database,DFile) == false)
{
UnmountCDROM(CDROM, NULL);
return _error->Error("Unable to read the cdrom database %s",
DFile.c_str());
}
}
return true;
}
/*}}}*/
bool pkgCdrom::Ident(string &ident, pkgCdromStatus *log) /*{{{*/
{
Configuration Database;
std::string CDROM;
if (MountAndIdentCDROM(Database, CDROM, ident, log, false) == false)
return false;
if (log != NULL)
{
string msg;
strprintf(msg, _("Stored label: %s\n"),
Database.Find("CD::"+ident).c_str());
log->Update(msg);
}
// Unmount and finish
UnmountCDROM(CDROM, log);
return true;
}
/*}}}*/
bool pkgCdrom::Add(pkgCdromStatus *log) /*{{{*/
{
Configuration Database;
std::string ID, CDROM;
if (MountAndIdentCDROM(Database, CDROM, ID, log, true) == false)
return false;
if(log != NULL)
log->Update(_("Scanning disc for index files...\n"),STEP_SCAN);
// Get the CD structure
vector<string> List;
vector<string> SourceList;
vector<string> SigList;
vector<string> TransList;
string StartDir = SafeGetCWD();
string InfoDir;
if (FindPackages(CDROM,List,SourceList, SigList,TransList,InfoDir,log) == false)
{
if (log != NULL)
log->Update("\n");
UnmountCDROM(CDROM, NULL);
return false;
}
if (chdir(StartDir.c_str()) != 0)
{
UnmountCDROM(CDROM, NULL);
return _error->Errno("chdir","Unable to change to %s", StartDir.c_str());
}
if (_config->FindB("Debug::aptcdrom",false) == true)
{
cout << "I found (binary):" << endl;
for (vector<string>::iterator I = List.begin(); I != List.end(); ++I)
cout << *I << endl;
cout << "I found (source):" << endl;
for (vector<string>::iterator I = SourceList.begin(); I != SourceList.end(); ++I)
cout << *I << endl;
cout << "I found (Signatures):" << endl;
for (vector<string>::iterator I = SigList.begin(); I != SigList.end(); ++I)
cout << *I << endl;
}
//log->Update(_("Cleaning package lists..."), STEP_CLEAN);
// Fix up the list
DropBinaryArch(List);
DropRepeats(List,"Packages");
DropRepeats(SourceList,"Sources");
// FIXME: We ignore stat() errors here as we usually have only one of those in use
// This has little potential to drop 'valid' stat() errors as we know that one of these
// files need to exist, but it would be better if we would check it here
_error->PushToStack();
DropRepeats(SigList,"Release.gpg");
DropRepeats(SigList,"InRelease");
_error->RevertToStack();
DropRepeats(TransList,"");
if (_config->FindB("APT::CDROM::DropTranslation", true) == true)
DropTranslation(TransList);
if(log != NULL) {
string msg;
strprintf(msg, _("Found %zu package indexes, %zu source indexes, "
"%zu translation indexes and %zu signatures\n"),
List.size(), SourceList.size(), TransList.size(),
SigList.size());
log->Update(msg, STEP_SCAN);
}
if (List.empty() == true && SourceList.empty() == true)
{
UnmountCDROM(CDROM, NULL);
return _error->Error(_("Unable to locate any package files, perhaps this is not a Debian Disc or the wrong architecture?"));
}
// Check if the CD is in the database
string Name;
if (Database.Exists("CD::" + ID) == false ||
_config->FindB("APT::CDROM::Rename",false) == true)
{
// Try to use the CDs label if at all possible
if (InfoDir.empty() == false &&
FileExists(InfoDir + "/info") == true)
{
ifstream F((InfoDir + "/info").c_str());
if (F.good() == true)
getline(F,Name);
if (Name.empty() == false)
{
// Escape special characters
string::iterator J = Name.begin();
for (; J != Name.end(); ++J)
if (*J == '"' || *J == ']' || *J == '[')
*J = '_';
if(log != NULL)
{
string msg;
strprintf(msg, _("Found label '%s'\n"), Name.c_str());
log->Update(msg);
}
Database.Set("CD::" + ID + "::Label",Name);
}
}
if (_config->FindB("APT::CDROM::Rename",false) == true ||
Name.empty() == true)
{
if(log == NULL)
{
UnmountCDROM(CDROM, NULL);
return _error->Error("No disc name found and no way to ask for it");
}
while(true) {
if(!log->AskCdromName(Name)) {
// user canceld
UnmountCDROM(CDROM, NULL);
return false;
}
cout << "Name: '" << Name << "'" << endl;
if (Name.empty() == false &&
Name.find('"') == string::npos &&
Name.find('[') == string::npos &&
Name.find(']') == string::npos)
break;
log->Update(_("That is not a valid name, try again.\n"));
}
}
}
else
Name = Database.Find("CD::" + ID);
// Escape special characters
string::iterator J = Name.begin();
for (; J != Name.end(); ++J)
if (*J == '"' || *J == ']' || *J == '[')
*J = '_';
Database.Set("CD::" + ID,Name);
if(log != NULL)
{
string msg;
strprintf(msg, _("This disc is called: \n'%s'\n"), Name.c_str());
log->Update(msg);
log->Update(_("Copying package lists..."), STEP_COPY);
}
// check for existence and possibly create state directory for copying
string const listDir = _config->FindDir("Dir::State::lists");
string const partialListDir = listDir + "partial/";
mode_t const mode = umask(S_IWGRP | S_IWOTH);
bool const creation_fail = (CreateAPTDirectoryIfNeeded(_config->FindDir("Dir::State"), partialListDir) == false &&
CreateAPTDirectoryIfNeeded(listDir, partialListDir) == false);
umask(mode);
if (creation_fail == true)
{
UnmountCDROM(CDROM, NULL);
return _error->Errno("cdrom", _("List directory %s is missing."), (listDir + "partial").c_str());
}
// take care of the signatures and copy them if they are ok
// (we do this before PackageCopy as it modifies "List" and "SourceList")
SigVerify SignVerify;
SignVerify.CopyAndVerify(CDROM, Name, SigList, List, SourceList);
// Copy the package files to the state directory
PackageCopy Copy;
SourceCopy SrcCopy;
TranslationsCopy TransCopy;
if (Copy.CopyPackages(CDROM,Name,List, log) == false ||
SrcCopy.CopyPackages(CDROM,Name,SourceList, log) == false ||
TransCopy.CopyTranslations(CDROM,Name,TransList, log) == false)
{
UnmountCDROM(CDROM, NULL);
return false;
}
// reduce the List so that it takes less space in sources.list
ReduceSourcelist(CDROM,List);
ReduceSourcelist(CDROM,SourceList);
// Write the database and sourcelist
if (_config->FindB("APT::cdrom::NoAct",false) == false)
{
if (WriteDatabase(Database) == false)
{
UnmountCDROM(CDROM, NULL);
return false;
}
if(log != NULL)
log->Update(_("Writing new source list\n"), STEP_WRITE);
if (WriteSourceList(Name,List,false) == false ||
WriteSourceList(Name,SourceList,true) == false)
{
UnmountCDROM(CDROM, NULL);
return false;
}
}
// Print the sourcelist entries
if(log != NULL)
log->Update(_("Source list entries for this disc are:\n"));
for (vector<string>::iterator I = List.begin(); I != List.end(); ++I)
{
string::size_type Space = (*I).find(' ');
if (Space == string::npos)
{
UnmountCDROM(CDROM, NULL);
return _error->Error("Internal error");
}
if(log != NULL)
{
stringstream msg;
msg << "deb cdrom:[" << Name << "]/" << string(*I,0,Space) <<
" " << string(*I,Space+1) << endl;
log->Update(msg.str());
}
}
for (vector<string>::iterator I = SourceList.begin(); I != SourceList.end(); ++I)
{
string::size_type Space = (*I).find(' ');
if (Space == string::npos)
{
UnmountCDROM(CDROM, NULL);
return _error->Error("Internal error");
}
if(log != NULL) {
stringstream msg;
msg << "deb-src cdrom:[" << Name << "]/" << string(*I,0,Space) <<
" " << string(*I,Space+1) << endl;
log->Update(msg.str());
}
}
// Unmount and finish
UnmountCDROM(CDROM, log);
return true;
}
/*}}}*/
pkgUdevCdromDevices::pkgUdevCdromDevices() /*{{{*/
: d(NULL)
{
}
/*}}}*/
// convenience interface, this will just call ScanForRemovable /*{{{*/
vector<CdromDevice> pkgUdevCdromDevices::Scan()
{
bool CdromOnly = _config->FindB("APT::cdrom::CdromOnly", true);
return ScanForRemovable(CdromOnly);
}
/*}}}*/
vector<CdromDevice> pkgUdevCdromDevices::ScanForRemovable(bool CdromOnly)/*{{{*/
{
vector<CdromDevice> cdrom_devices;
#ifdef HAVE_UDEV
struct udev_enumerate *enumerate;
struct udev_list_entry *l, *devices;
struct udev *udev_ctx;
udev_ctx = udev_new();
enumerate = udev_enumerate_new (udev_ctx);
if (CdromOnly)
udev_enumerate_add_match_property(enumerate, "ID_CDROM", "1");
else {
udev_enumerate_add_match_sysattr(enumerate, "removable", "1");
}
udev_enumerate_scan_devices (enumerate);
devices = udev_enumerate_get_list_entry (enumerate);
for (l = devices; l != NULL; l = udev_list_entry_get_next (l))
{
CdromDevice cdrom;
struct udev_device *udevice;
udevice = udev_device_new_from_syspath (udev_enumerate_get_udev (enumerate), udev_list_entry_get_name (l));
if (udevice == NULL)
continue;
const char* devnode = udev_device_get_devnode(udevice);
// try fstab_dir first
string mountpath;
const char* mp = udev_device_get_property_value(udevice, "FSTAB_DIR");
if (mp)
mountpath = string(mp);
else
mountpath = FindMountPointForDevice(devnode);
// fill in the struct
cdrom.DeviceName = string(devnode);
if (mountpath != "") {
cdrom.MountPath = mountpath;
string s = mountpath;
cdrom.Mounted = IsMounted(s);
} else {
cdrom.Mounted = false;
cdrom.MountPath = "";
}
cdrom_devices.push_back(cdrom);
}
#endif
return cdrom_devices;
}
/*}}}*/
pkgUdevCdromDevices::~pkgUdevCdromDevices() /*{{{*/
{
}
/*}}}*/
pkgCdromStatus::pkgCdromStatus() : d(NULL), totalSteps(0) {}
pkgCdromStatus::~pkgCdromStatus() {}
pkgCdrom::pkgCdrom() : d(NULL) {}
pkgCdrom::~pkgCdrom() {}

110
apt-pkg/cdrom.h Normal file
View File

@ -0,0 +1,110 @@
#ifndef PKGLIB_CDROM_H
#define PKGLIB_CDROM_H
#include <apt-pkg/macros.h>
#include <string>
#include <vector>
#include <stddef.h>
class Configuration;
class OpProgress;
class APT_PUBLIC pkgCdromStatus /*{{{*/
{
void * const d;
protected:
int totalSteps;
public:
pkgCdromStatus();
virtual ~pkgCdromStatus();
// total steps
virtual void SetTotal(int total) { totalSteps = total; };
// update steps, will be called regularly as a "pulse"
virtual void Update(std::string text="", int current=0) = 0;
// ask for cdrom insert
virtual bool ChangeCdrom() = 0;
// ask for cdrom name
virtual bool AskCdromName(std::string &Name) = 0;
// Progress indicator for the Index rewriter
virtual OpProgress* GetOpProgress() {return NULL; };
};
/*}}}*/
class APT_PUBLIC pkgCdrom /*{{{*/
{
protected:
enum {
STEP_PREPARE = 1,
STEP_UNMOUNT,
STEP_WAIT,
STEP_MOUNT,
STEP_IDENT,
STEP_SCAN,
STEP_COPY,
STEP_WRITE,
STEP_UNMOUNT3,
STEP_LAST
};
bool FindPackages(std::string CD,
std::vector<std::string> &List,
std::vector<std::string> &SList,
std::vector<std::string> &SigList,
std::vector<std::string> &TransList,
std::string &InfoDir, pkgCdromStatus *log,
unsigned int Depth = 0);
bool DropBinaryArch(std::vector<std::string> &List);
bool DropRepeats(std::vector<std::string> &List,const char *Name);
bool DropTranslation(std::vector<std::string> &List);
void ReduceSourcelist(std::string CD,std::vector<std::string> &List);
bool WriteDatabase(Configuration &Cnf);
bool WriteSourceList(std::string Name,std::vector<std::string> &List,bool Source);
int Score(std::string Path);
public:
bool Ident(std::string &ident, pkgCdromStatus *log);
bool Add(pkgCdromStatus *log);
pkgCdrom();
virtual ~pkgCdrom();
private:
void * const d;
APT_HIDDEN bool MountAndIdentCDROM(Configuration &Database, std::string &CDROM,
std::string &ident, pkgCdromStatus * const log, bool const interactive);
APT_HIDDEN bool UnmountCDROM(std::string const &CDROM, pkgCdromStatus * const log);
};
/*}}}*/
// class that uses libudev to find cdrom/removable devices dynamically
struct APT_PUBLIC CdromDevice /*{{{*/
{
std::string DeviceName;
bool Mounted;
std::string MountPath;
};
/*}}}*/
class APT_PUBLIC pkgUdevCdromDevices /*{{{*/
{
void * const d;
public:
pkgUdevCdromDevices();
virtual ~pkgUdevCdromDevices();
// convenience interface, this will just call ScanForRemovable
// with "APT::cdrom::CdromOnly"
std::vector<CdromDevice> Scan();
std::vector<CdromDevice> ScanForRemovable(bool CdromOnly);
};
/*}}}*/
#endif

134
apt-pkg/clean.cc Normal file
View File

@ -0,0 +1,134 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Clean - Clean out downloaded directories
##################################################################### */
/*}}}*/
// Includes /*{{{*/
#include <config.h>
#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/clean.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/strutl.h>
#include <string>
#include <dirent.h>
#include <fcntl.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <apti18n.h>
/*}}}*/
// ArchiveCleaner::Go - Perform smart cleanup of the archive /*{{{*/
// ---------------------------------------------------------------------
/* Scan the directory for files to erase, we check the version information
against our database to see if it is interesting */
bool pkgArchiveCleaner::Go(std::string Dir,pkgCache &Cache)
{
bool CleanInstalled = _config->FindB("APT::Clean-Installed",true);
if(Dir == "/")
return _error->Error(_("Clean of %s is not supported"), Dir.c_str());
// non-existing directories are always clean
// we do not check for a directory explicitly to support symlinks
if (FileExists(Dir) == false)
return true;
int const dirfd = open(Dir.c_str(), O_RDONLY | O_DIRECTORY | O_CLOEXEC);
if (dirfd == -1)
return _error->Errno("open",_("Unable to read %s"),Dir.c_str());
DIR * const D = fdopendir(dirfd);
if (D == nullptr)
return _error->Errno("opendir",_("Unable to read %s"),Dir.c_str());
for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D))
{
// Skip some files..
if (strcmp(Dir->d_name, "lock") == 0 ||
strcmp(Dir->d_name, "partial") == 0 ||
strcmp(Dir->d_name, "auxfiles") == 0 ||
strcmp(Dir->d_name, "lost+found") == 0 ||
strcmp(Dir->d_name, ".") == 0 ||
strcmp(Dir->d_name, "..") == 0)
continue;
struct stat St;
if (fstatat(dirfd, Dir->d_name,&St, 0) != 0)
{
_error->Errno("stat",_("Unable to stat %s."),Dir->d_name);
closedir(D);
return false;
}
// Grab the package name
const char *I = Dir->d_name;
for (; *I != 0 && *I != '_';I++);
if (*I != '_')
continue;
std::string Pkg = DeQuoteString(std::string(Dir->d_name,I-Dir->d_name));
// Grab the version
const char *Start = I + 1;
for (I = Start; *I != 0 && *I != '_';I++);
if (*I != '_')
continue;
std::string Ver = DeQuoteString(std::string(Start,I-Start));
// Grab the arch
Start = I + 1;
for (I = Start; *I != 0 && *I != '.' ;I++);
if (*I != '.')
continue;
std::string const Arch = DeQuoteString(std::string(Start,I-Start));
// ignore packages of unconfigured architectures
if (APT::Configuration::checkArchitecture(Arch) == false)
continue;
// Lookup the package
pkgCache::PkgIterator P = Cache.FindPkg(Pkg, Arch);
if (P.end() != true)
{
pkgCache::VerIterator V = P.VersionList();
for (; V.end() == false; ++V)
{
// See if we can fetch this version at all
bool IsFetchable = false;
for (pkgCache::VerFileIterator J = V.FileList();
J.end() == false; ++J)
{
if (CleanInstalled == true &&
J.File().Flagged(pkgCache::Flag::NotSource))
continue;
IsFetchable = true;
break;
}
// See if this version matches the file
if (IsFetchable == true && Ver == V.VerStr())
break;
}
// We found a match, keep the file
if (V.end() == false)
continue;
}
Erase(dirfd, Dir->d_name, Pkg, Ver, St);
}
closedir(D);
return true;
}
/*}}}*/
pkgArchiveCleaner::pkgArchiveCleaner() : d(NULL) {}
pkgArchiveCleaner::~pkgArchiveCleaner() {}

38
apt-pkg/clean.h Normal file
View File

@ -0,0 +1,38 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Clean - Clean out downloaded directories
##################################################################### */
/*}}}*/
#ifndef APTPKG_CLEAN_H
#define APTPKG_CLEAN_H
#include <string>
#include <apt-pkg/macros.h>
class pkgCache;
class APT_PUBLIC pkgArchiveCleaner
{
/** \brief dpointer placeholder (for later in case we need it) */
void * const d;
protected:
virtual void Erase(int const dirfd, char const * const File,
std::string const &Pkg,std::string const &Ver,
struct stat const &St) = 0;
public:
bool Go(std::string Dir,pkgCache &Cache);
pkgArchiveCleaner();
virtual ~pkgArchiveCleaner();
};
#endif

160
apt-pkg/contrib/arfile.cc Normal file
View File

@ -0,0 +1,160 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
AR File - Handle an 'AR' archive
AR Archives have plain text headers at the start of each file
section. The headers are aligned on a 2 byte boundary.
Information about the structure of AR files can be found in ar(5)
on a BSD system, or in the binutils source.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/arfile.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/strutl.h>
#include <string>
#include <string.h>
#include <sys/types.h>
#include <apti18n.h>
/*}}}*/
struct ARArchive::MemberHeader
{
char Name[16];
char MTime[12];
char UID[6];
char GID[6];
char Mode[8];
char Size[10];
char Magic[2];
};
// ARArchive::ARArchive - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
ARArchive::ARArchive(FileFd &File) : List(0), File(File)
{
LoadHeaders();
}
/*}}}*/
// ARArchive::~ARArchive - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
ARArchive::~ARArchive()
{
while (List != 0)
{
Member *Tmp = List;
List = List->Next;
delete Tmp;
}
}
/*}}}*/
// ARArchive::LoadHeaders - Load the headers from each file /*{{{*/
// ---------------------------------------------------------------------
/* AR files are structured with a 8 byte magic string followed by a 60
byte plain text header then the file data, another header, data, etc */
bool ARArchive::LoadHeaders()
{
off_t Left = File.Size();
// Check the magic byte
char Magic[8];
if (File.Read(Magic,sizeof(Magic)) == false)
return false;
if (memcmp(Magic,"!<arch>\012",sizeof(Magic)) != 0)
return _error->Error(_("Invalid archive signature"));
Left -= sizeof(Magic);
// Read the member list
while (Left > 0)
{
MemberHeader Head;
if (File.Read(&Head,sizeof(Head)) == false)
return _error->Error(_("Error reading archive member header"));
Left -= sizeof(Head);
// Convert all of the integer members
Member *Memb = new Member();
if (StrToNum(Head.MTime,Memb->MTime,sizeof(Head.MTime)) == false ||
StrToNum(Head.UID,Memb->UID,sizeof(Head.UID)) == false ||
StrToNum(Head.GID,Memb->GID,sizeof(Head.GID)) == false ||
StrToNum(Head.Mode,Memb->Mode,sizeof(Head.Mode),8) == false ||
StrToNum(Head.Size,Memb->Size,sizeof(Head.Size)) == false)
{
delete Memb;
return _error->Error(_("Invalid archive member header %s"), Head.Name);
}
// Check for an extra long name string
if (memcmp(Head.Name,"#1/",3) == 0)
{
char S[300];
unsigned long Len;
if (StrToNum(Head.Name+3,Len,sizeof(Head.Size)-3) == false ||
Len >= sizeof(S))
{
delete Memb;
return _error->Error(_("Invalid archive member header"));
}
if (File.Read(S,Len) == false)
{
delete Memb;
return false;
}
S[Len] = 0;
Memb->Name = S;
Memb->Size -= Len;
Left -= Len;
}
else
{
unsigned int I = sizeof(Head.Name) - 1;
for (; Head.Name[I] == ' ' || Head.Name[I] == '/'; I--);
Memb->Name = std::string(Head.Name,I+1);
}
// Account for the AR header alignment
off_t Skip = Memb->Size % 2;
// Add it to the list
Memb->Next = List;
List = Memb;
Memb->Start = File.Tell();
if (File.Skip(Memb->Size + Skip) == false)
return false;
if (Left < (off_t)(Memb->Size + Skip))
return _error->Error(_("Archive is too short"));
Left -= Memb->Size + Skip;
}
if (Left != 0)
return _error->Error(_("Failed to read the archive headers"));
return true;
}
/*}}}*/
// ARArchive::FindMember - Find a name in the member list /*{{{*/
// ---------------------------------------------------------------------
/* Find a member with the given name */
const ARArchive::Member *ARArchive::FindMember(const char *Name) const
{
const Member *Res = List;
while (Res != 0)
{
if (Res->Name == Name)
return Res;
Res = Res->Next;
}
return 0;
}
/*}}}*/

66
apt-pkg/contrib/arfile.h Normal file
View File

@ -0,0 +1,66 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
AR File - Handle an 'AR' archive
This is a reader for the usual 4.4 BSD AR format. It allows raw
stream access to a single member at a time. Basically all this class
provides is header parsing and verification. It is up to the client
to correctly make use of the stream start/stop points.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_ARFILE_H
#define PKGLIB_ARFILE_H
#include <apt-pkg/macros.h>
#include <string>
class FileFd;
class APT_PUBLIC ARArchive
{
struct MemberHeader;
public:
struct Member;
protected:
// Linked list of members
Member *List;
bool LoadHeaders();
public:
// The stream file
FileFd &File;
// Locate a member by name
const Member *FindMember(const char *Name) const;
inline Member *Members() { return List; }
APT_PUBLIC explicit ARArchive(FileFd &File);
APT_PUBLIC ~ARArchive();
};
// A member of the archive
struct ARArchive::Member
{
// Fields from the header
std::string Name;
unsigned long MTime;
unsigned long UID;
unsigned long GID;
unsigned long Mode;
unsigned long long Size;
// Location of the data.
unsigned long long Start;
Member *Next;
Member() : Start(0), Next(0) {};
};
#endif

293
apt-pkg/contrib/cdromutl.cc Normal file
View File

@ -0,0 +1,293 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
CDROM Utilities - Some functions to manipulate CDROM mounts.
These are here for the cdrom method and apt-cdrom.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/cdromutl.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/strutl.h>
#include <iostream>
#include <string>
#include <vector>
#include <dirent.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/statvfs.h>
#include <unistd.h>
#include <apti18n.h>
/*}}}*/
using std::string;
// IsMounted - Returns true if the mount point is mounted /*{{{*/
// ---------------------------------------------------------------------
/* This is a simple algorithm that should always work, we stat the mount point
and the '..' file in the mount point and see if they are on the same device.
By definition if they are the same then it is not mounted. This should
account for symlinked mount points as well. */
bool IsMounted(string &Path)
{
if (Path.empty() == true)
return false;
// Need that trailing slash for directories
if (Path[Path.length() - 1] != '/')
Path += '/';
// if the path has a ".disk" directory we treat it as mounted
// this way even extracted copies of disks are recognized
if (DirectoryExists(Path + ".disk/") == true)
return true;
/* First we check if the path is actually mounted, we do this by
stating the path and the previous directory (careful of links!)
and comparing their device fields. */
struct stat Buf,Buf2;
if (stat(Path.c_str(),&Buf) != 0 ||
stat((Path + "../").c_str(),&Buf2) != 0)
return _error->Errno("stat",_("Unable to stat the mount point %s"),Path.c_str());
if (Buf.st_dev == Buf2.st_dev)
return false;
return true;
}
/*}}}*/
// UnmountCdrom - Unmount a cdrom /*{{{*/
// ---------------------------------------------------------------------
/* Forking umount works much better than the umount syscall which can
leave /etc/mtab inconsitant. We drop all messages this produces. */
bool UnmountCdrom(string Path)
{
// do not generate errors, even if the mountpoint does not exist
// the mountpoint might be auto-created by the mount command
// and a non-existing mountpoint is surely not mounted
_error->PushToStack();
bool const mounted = IsMounted(Path);
_error->RevertToStack();
if (mounted == false)
return true;
for (int i=0;i<3;i++)
{
int Child = ExecFork();
// The child
if (Child == 0)
{
// Make all the fds /dev/null
int const null_fd = open("/dev/null",O_RDWR);
for (int I = 0; I != 3; ++I)
dup2(null_fd, I);
if (_config->Exists("Acquire::cdrom::"+Path+"::UMount") == true)
{
if (system(_config->Find("Acquire::cdrom::"+Path+"::UMount").c_str()) != 0)
_exit(100);
_exit(0);
}
else
{
const char * const Args[] = {
"umount",
Path.c_str(),
nullptr
};
execvp(Args[0], const_cast<char **>(Args));
_exit(100);
}
}
// if it can not be umounted, give it a bit more time
// this can happen when auto-mount magic or fs/cdrom prober attack
if (ExecWait(Child,"umount",true) == true)
return true;
sleep(1);
}
return false;
}
/*}}}*/
// MountCdrom - Mount a cdrom /*{{{*/
// ---------------------------------------------------------------------
/* We fork mount and drop all messages */
bool MountCdrom(string Path, string DeviceName)
{
// do not generate errors, even if the mountpoint does not exist
// the mountpoint might be auto-created by the mount command
_error->PushToStack();
bool const mounted = IsMounted(Path);
_error->RevertToStack();
if (mounted == true)
return true;
int Child = ExecFork();
// The child
if (Child == 0)
{
// Make all the fds /dev/null
int const null_fd = open("/dev/null",O_RDWR);
for (int I = 0; I != 3; ++I)
dup2(null_fd, I);
if (_config->Exists("Acquire::cdrom::"+Path+"::Mount") == true)
{
if (system(_config->Find("Acquire::cdrom::"+Path+"::Mount").c_str()) != 0)
_exit(100);
_exit(0);
}
else
{
const char *Args[10];
Args[0] = "mount";
if (DeviceName == "")
{
Args[1] = Path.c_str();
Args[2] = 0;
} else {
Args[1] = DeviceName.c_str();
Args[2] = Path.c_str();
Args[3] = 0;
}
execvp(Args[0],(char **)Args);
_exit(100);
}
}
// Wait for mount
return ExecWait(Child,"mount",true);
}
/*}}}*/
// IdentCdrom - Generate a unique string for this CD /*{{{*/
// ---------------------------------------------------------------------
/* We convert everything we hash into a string, this prevents byte size/order
from effecting the outcome. */
bool IdentCdrom(string CD,string &Res,unsigned int Version)
{
Hashes Hash(Hashes::MD5SUM);
bool writable_media = false;
int dirfd = open(CD.c_str(), O_RDONLY | O_DIRECTORY | O_CLOEXEC);
if (dirfd == -1)
return _error->Errno("open",_("Unable to read %s"),CD.c_str());
// if we are on a writable medium (like a usb-stick) that is just
// used like a cdrom don't use "." as it will constantly change,
// use .disk instead
if (faccessat(dirfd, ".", W_OK, 0) == 0)
{
int diskfd = openat(dirfd, "./.disk", O_RDONLY | O_DIRECTORY | O_CLOEXEC, 0);
if (diskfd != -1)
{
close(dirfd);
dirfd = diskfd;
writable_media = true;
CD = CD.append("/.disk");
if (_config->FindB("Debug::aptcdrom",false) == true)
std::clog << "Found writable cdrom, using alternative path: " << CD
<< std::endl;
}
}
DIR * const D = fdopendir(dirfd);
if (D == nullptr)
return _error->Errno("opendir",_("Unable to read %s"),CD.c_str());
/* Run over the directory, we assume that the reader order will never
change as the media is read-only. In theory if the kernel did
some sort of wacked caching this might not be true.. */
for (struct dirent *Dir = readdir(D); Dir != 0; Dir = readdir(D))
{
// Skip some files..
if (strcmp(Dir->d_name,".") == 0 ||
strcmp(Dir->d_name,"..") == 0)
continue;
std::string S;
if (Version <= 1)
S = std::to_string(Dir->d_ino);
else
{
struct stat Buf;
if (fstatat(dirfd, Dir->d_name, &Buf, 0) != 0)
continue;
S = std::to_string(Buf.st_mtime);
}
Hash.Add(S.c_str());
Hash.Add(Dir->d_name);
}
// Some stats from the fsys
std::string S;
if (_config->FindB("Debug::identcdrom",false) == false)
{
struct statvfs Buf;
if (fstatvfs(dirfd, &Buf) != 0)
return _error->Errno("statfs",_("Failed to stat the cdrom"));
// We use a kilobyte block size to avoid overflow
S = std::to_string(Buf.f_blocks * (Buf.f_bsize / 1024));
if (writable_media == false)
S.append(" ").append(std::to_string(Buf.f_bfree * (Buf.f_bsize / 1024)));
Hash.Add(S.c_str(), S.length());
strprintf(S, "-%u", Version);
}
else
strprintf(S, "-%u.debug", Version);
closedir(D);
Res = Hash.GetHashString(Hashes::MD5SUM).HashValue().append(std::move(S));
return true;
}
/*}}}*/
// FindMountPointForDevice - Find mountpoint for the given device /*{{{*/
string FindMountPointForDevice(const char *devnode)
{
// this is the order that mount uses as well
std::vector<std::string> const mounts = _config->FindVector("Dir::state::MountPoints", "/etc/mtab,/proc/mount");
for (std::vector<std::string>::const_iterator m = mounts.begin(); m != mounts.end(); ++m)
if (FileExists(*m) == true)
{
char * line = NULL;
size_t line_len = 0;
FILE * f = fopen(m->c_str(), "r");
while(getline(&line, &line_len, f) != -1)
{
char * out[] = { NULL, NULL, NULL };
TokSplitString(' ', line, out, 3);
if (out[2] != NULL || out[1] == NULL || out[0] == NULL)
continue;
if (strcmp(out[0], devnode) != 0)
continue;
fclose(f);
// unescape the \0XXX chars in the path
string mount_point = out[1];
free(line);
return DeEscapeString(mount_point);
}
fclose(f);
free(line);
}
return string();
}
/*}}}*/

View File

@ -0,0 +1,24 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
CDROM Utilities - Some functions to manipulate CDROM mounts.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_CDROMUTL_H
#define PKGLIB_CDROMUTL_H
#include <apt-pkg/macros.h>
#include <string>
// mount cdrom, DeviceName (e.g. /dev/sr0) is optional
APT_PUBLIC bool MountCdrom(std::string Path, std::string DeviceName="");
APT_PUBLIC bool UnmountCdrom(std::string Path);
APT_PUBLIC bool IdentCdrom(std::string CD,std::string &Res,unsigned int Version = 2);
APT_PUBLIC bool IsMounted(std::string &Path);
APT_PUBLIC std::string FindMountPointForDevice(const char *device);
#endif

441
apt-pkg/contrib/cmndline.cc Normal file
View File

@ -0,0 +1,441 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Command Line Class - Sophisticated command line parser
This source is placed in the Public Domain, do with it what you will
It was originally written by Jason Gunthorpe <jgg@debian.org>.
##################################################################### */
/*}}}*/
// Include files /*{{{*/
#include <config.h>
#include <apt-pkg/cmndline.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/strutl.h>
#include <string>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <apti18n.h>
/*}}}*/
using namespace std;
// CommandLine::CommandLine - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
CommandLine::CommandLine(Args *AList,Configuration *Conf) : ArgList(AList),
Conf(Conf), FileList(0)
{
}
CommandLine::CommandLine() : ArgList(NULL), Conf(NULL), FileList(0)
{
}
/*}}}*/
// CommandLine::~CommandLine - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
CommandLine::~CommandLine()
{
delete [] FileList;
}
/*}}}*/
// CommandLine::GetCommand - return the first non-option word /*{{{*/
char const * CommandLine::GetCommand(Dispatch const * const Map,
unsigned int const argc, char const * const * const argv)
{
// if there is a -- on the line there must be the word we search for either
// before it (as -- marks the end of the options) or right after it (as we can't
// decide if the command is actually an option, given that in theory, you could
// have parameters named like commands)
for (size_t i = 1; i < argc; ++i)
{
if (strcmp(argv[i], "--") != 0)
continue;
// check if command is before --
for (size_t k = 1; k < i; ++k)
for (size_t j = 0; Map[j].Match != NULL; ++j)
if (strcmp(argv[k], Map[j].Match) == 0)
return Map[j].Match;
// see if the next token after -- is the command
++i;
if (i < argc)
for (size_t j = 0; Map[j].Match != NULL; ++j)
if (strcmp(argv[i], Map[j].Match) == 0)
return Map[j].Match;
// we found a --, but not a command
return NULL;
}
// no --, so search for the first word matching a command
// FIXME: How like is it that an option parameter will be also a valid Match ?
for (size_t i = 1; i < argc; ++i)
{
if (*(argv[i]) == '-')
continue;
for (size_t j = 0; Map[j].Match != NULL; ++j)
if (strcmp(argv[i], Map[j].Match) == 0)
return Map[j].Match;
}
return NULL;
}
/*}}}*/
// CommandLine::Parse - Main action member /*{{{*/
// ---------------------------------------------------------------------
/* */
bool CommandLine::Parse(int argc,const char **argv)
{
delete [] FileList;
FileList = new const char *[argc];
const char **Files = FileList;
int I;
for (I = 1; I != argc; I++)
{
const char *Opt = argv[I];
// It is not an option
if (*Opt != '-')
{
*Files++ = Opt;
continue;
}
Opt++;
// Double dash signifies the end of option processing
if (*Opt == '-' && Opt[1] == 0)
{
I++;
break;
}
// Single dash is a short option
if (*Opt != '-')
{
// Iterate over each letter
while (*Opt != 0)
{
// Search for the option
Args *A;
for (A = ArgList; A->end() == false && A->ShortOpt != *Opt; A++);
if (A->end() == true)
return _error->Error(_("Command line option '%c' [from %s] is not understood in combination with the other options."),*Opt,argv[I]);
if (HandleOpt(I,argc,argv,Opt,A) == false)
return false;
if (*Opt != 0)
Opt++;
}
continue;
}
Opt++;
// Match up to a = against the list
Args *A;
const char *OptEnd = strchrnul(Opt, '=');
for (A = ArgList; A->end() == false &&
(A->LongOpt == 0 || stringcasecmp(Opt,OptEnd,A->LongOpt) != 0);
++A);
// Failed, look for a word after the first - (no-foo)
bool PreceedMatch = false;
if (A->end() == true)
{
Opt = (const char*) memchr(Opt, '-', OptEnd - Opt);
if (Opt == NULL)
return _error->Error(_("Command line option %s is not understood in combination with the other options"),argv[I]);
Opt++;
for (A = ArgList; A->end() == false &&
(A->LongOpt == 0 || stringcasecmp(Opt,OptEnd,A->LongOpt) != 0);
++A);
// Failed again..
if (A->end() == true && OptEnd - Opt != 1)
return _error->Error(_("Command line option %s is not understood in combination with the other options"),argv[I]);
// The option could be a single letter option prefixed by a no-..
if (A->end() == true)
{
for (A = ArgList; A->end() == false && A->ShortOpt != *Opt; A++);
if (A->end() == true)
return _error->Error(_("Command line option %s is not understood in combination with the other options"),argv[I]);
}
// The option is not boolean
if (A->IsBoolean() == false)
return _error->Error(_("Command line option %s is not boolean"),argv[I]);
PreceedMatch = true;
}
// Deal with it.
OptEnd--;
if (HandleOpt(I,argc,argv,OptEnd,A,PreceedMatch) == false)
return false;
}
// Copy any remaining file names over
for (; I != argc; I++)
*Files++ = argv[I];
*Files = 0;
SaveInConfig(argc, argv);
return true;
}
/*}}}*/
// CommandLine::HandleOpt - Handle a single option including all flags /*{{{*/
// ---------------------------------------------------------------------
/* This is a helper function for parser, it looks at a given argument
and looks for specific patterns in the string, it gets tokanized
-ruffly- like -*[yes|true|enable]-(o|longopt)[=][ ][argument] */
bool CommandLine::HandleOpt(int &I,int argc,const char *argv[],
const char *&Opt,Args *A,bool PreceedMatch)
{
const char *Argument = 0;
bool CertainArg = false;
int IncI = 0;
/* Determine the possible location of an option or 0 if their is
no option */
if (Opt[1] == 0)
{
if (I + 1 < argc && argv[I+1][0] != '-')
Argument = argv[I+1];
IncI = 1;
}
else
{
if (Opt[1] == '=')
{
CertainArg = true;
Argument = Opt + 2;
}
else
Argument = Opt + 1;
}
// Option is an argument set
if ((A->Flags & HasArg) == HasArg)
{
if (Argument == 0)
return _error->Error(_("Option %s requires an argument."),argv[I]);
Opt += strlen(Opt);
I += IncI;
// Parse a configuration file
if ((A->Flags & ConfigFile) == ConfigFile)
return ReadConfigFile(*Conf,Argument);
// Arbitrary item specification
if ((A->Flags & ArbItem) == ArbItem)
{
const char * const J = strchr(Argument, '=');
if (J == nullptr)
return _error->Error(_("Option %s: Configuration item specification must have an =<val>."),argv[I]);
Conf->Set(string(Argument,J-Argument), J+1);
return true;
}
const char *I = strchrnul(A->ConfName, ' ');
if (*I == ' ')
Conf->Set(string(A->ConfName,0,I-A->ConfName),string(I+1) + Argument);
else
Conf->Set(A->ConfName,string(I) + Argument);
return true;
}
// Option is an integer level
if ((A->Flags & IntLevel) == IntLevel)
{
// There might be an argument
if (Argument != 0)
{
char *EndPtr;
unsigned long Value = strtol(Argument,&EndPtr,10);
// Conversion failed and the argument was specified with an =s
if (EndPtr == Argument && CertainArg == true)
return _error->Error(_("Option %s requires an integer argument, not '%s'"),argv[I],Argument);
// Conversion was ok, set the value and return
if (EndPtr != 0 && EndPtr != Argument && *EndPtr == 0)
{
Conf->Set(A->ConfName,Value);
Opt += strlen(Opt);
I += IncI;
return true;
}
}
// Increase the level
Conf->Set(A->ConfName,Conf->FindI(A->ConfName)+1);
return true;
}
// Option is a boolean
int Sense = -1; // -1 is unspecified, 0 is yes 1 is no
// Look for an argument.
while (1)
{
// Look at preceding text
char Buffer[300];
if (Argument == 0)
{
if (PreceedMatch == false)
break;
if (strlen(argv[I]) >= sizeof(Buffer))
return _error->Error(_("Option '%s' is too long"),argv[I]);
// Skip the leading dash
const char *J = argv[I];
for (; *J == '-'; J++)
;
const char *JEnd = strchr(J, '-');
if (JEnd != NULL)
{
strncpy(Buffer,J,JEnd - J);
Buffer[JEnd - J] = 0;
Argument = Buffer;
CertainArg = true;
}
else
break;
}
// Check for boolean
Sense = StringToBool(Argument);
if (Sense >= 0)
{
// Eat the argument
if (Argument != Buffer)
{
Opt += strlen(Opt);
I += IncI;
}
break;
}
if (CertainArg == true)
return _error->Error(_("Sense %s is not understood, try true or false."),Argument);
Argument = 0;
}
// Indeterminate sense depends on the flag
if (Sense == -1)
{
if ((A->Flags & InvBoolean) == InvBoolean)
Sense = 0;
else
Sense = 1;
}
Conf->Set(A->ConfName,Sense);
return true;
}
/*}}}*/
// CommandLine::FileSize - Count the number of filenames /*{{{*/
// ---------------------------------------------------------------------
/* */
unsigned int CommandLine::FileSize() const
{
unsigned int Count = 0;
for (const char **I = FileList; I != 0 && *I != 0; I++)
Count++;
return Count;
}
/*}}}*/
// CommandLine::DispatchArg - Do something with the first arg /*{{{*/
bool CommandLine::DispatchArg(Dispatch const * const Map,bool NoMatch)
{
int I;
for (I = 0; Map[I].Match != 0; I++)
{
if (strcmp(FileList[0],Map[I].Match) == 0)
{
bool Res = Map[I].Handler(*this);
if (Res == false && _error->PendingError() == false)
_error->Error("Handler silently failed");
return Res;
}
}
// No matching name
if (Map[I].Match == 0)
{
if (NoMatch == true)
_error->Error(_("Invalid operation %s"),FileList[0]);
}
return false;
}
/*}}}*/
// CommandLine::SaveInConfig - for output later in a logfile or so /*{{{*/
// ---------------------------------------------------------------------
/* We save the commandline here to have it around later for e.g. logging.
It feels a bit like a hack here and isn't bulletproof, but it is better
than nothing after all. */
void CommandLine::SaveInConfig(unsigned int const &argc, char const * const * const argv)
{
char cmdline[100 + argc * 50];
memset(cmdline, 0, sizeof(cmdline));
unsigned int length = 0;
bool lastWasOption = false;
bool closeQuote = false;
for (unsigned int i = 0; i < argc && length < sizeof(cmdline); ++i, ++length)
{
for (unsigned int j = 0; argv[i][j] != '\0' && length < sizeof(cmdline)-2; ++j)
{
// we can't really sensibly deal with quoting so skip it
if (strchr("\"\'\r\n", argv[i][j]) != nullptr)
continue;
cmdline[length++] = argv[i][j];
if (lastWasOption == true && argv[i][j] == '=')
{
// That is possibly an option: Quote it if it includes spaces,
// the benefit is that this will eliminate also most false positives
const char* c = strchr(&argv[i][j+1], ' ');
if (c == NULL) continue;
cmdline[length++] = '\'';
closeQuote = true;
}
}
if (closeQuote == true)
{
cmdline[length++] = '\'';
closeQuote = false;
}
// Problem: detects also --hello
if (cmdline[length-1] == 'o')
lastWasOption = true;
cmdline[length] = ' ';
}
cmdline[--length] = '\0';
_config->Set("CommandLine::AsString", cmdline);
}
/*}}}*/
CommandLine::Args CommandLine::MakeArgs(char ShortOpt, char const *LongOpt, char const *ConfName, unsigned long Flags)/*{{{*/
{
/* In theory, this should be a constructor for CommandLine::Args instead,
but this breaks compatibility as gcc thinks this is a c++11 initializer_list */
CommandLine::Args arg;
arg.ShortOpt = ShortOpt;
arg.LongOpt = LongOpt;
arg.ConfName = ConfName;
arg.Flags = Flags;
return arg;
}
/*}}}*/

113
apt-pkg/contrib/cmndline.h Normal file
View File

@ -0,0 +1,113 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Command Line Class - Sophisticated command line parser
This class provides a unified command line parser/option handliner/
configuration mechanism. It allows the caller to specify the option
set and map the option set into the configuration class or other
special functioning.
Filenames are stripped from the option stream and put into their
own array.
The argument descriptor array can be initialized as:
CommandLine::Args Args[] =
{{'q',"quiet","apt::get::quiet",CommandLine::IntLevel},
{0,0,0,0}};
The flags mean,
HasArg - Means the argument has a value
IntLevel - Means the argument is an integer level indication, the
following -qqqq (+3) -q5 (=5) -q=5 (=5) are valid
Boolean - Means it is true/false or yes/no.
-d (true) --no-d (false) --yes-d (true)
--long (true) --no-long (false) --yes-long (true)
-d=yes (true) -d=no (false) Words like enable, disable,
true false, yes no and on off are recognized in logical
places.
InvBoolean - Same as boolean but the case with no specified sense
(first case) is set to false.
ConfigFile - Means this flag should be interprited as the name of
a config file to read in at this point in option processing.
Implies HasArg.
ArbItem - Means the item is an arbitrary configuration string of
the form item=value, where item is passed directly
to the configuration class.
The default, if the flags are 0 is to use Boolean
##################################################################### */
/*}}}*/
#ifndef PKGLIB_CMNDLINE_H
#define PKGLIB_CMNDLINE_H
#include <apt-pkg/macros.h>
class Configuration;
class APT_PUBLIC CommandLine
{
public:
struct Args;
struct Dispatch;
struct DispatchWithHelp;
protected:
Args *ArgList;
Configuration *Conf;
bool HandleOpt(int &I,int argc,const char *argv[],
const char *&Opt,Args *A,bool PreceedeMatch = false);
void static SaveInConfig(unsigned int const &argc, char const * const * const argv);
public:
enum AFlags
{
HasArg = (1 << 0),
IntLevel = (1 << 1),
Boolean = (1 << 2),
InvBoolean = (1 << 3),
ConfigFile = (1 << 4) | HasArg,
ArbItem = (1 << 5) | HasArg
};
const char **FileList;
bool Parse(int argc,const char **argv);
void ShowHelp();
unsigned int FileSize() const APT_PURE;
bool DispatchArg(Dispatch const * const List,bool NoMatch = true);
static char const * GetCommand(Dispatch const * const Map,
unsigned int const argc, char const * const * const argv) APT_PURE;
static CommandLine::Args MakeArgs(char ShortOpt, char const *LongOpt,
char const *ConfName, unsigned long Flags) APT_PURE;
CommandLine();
CommandLine(Args *AList,Configuration *Conf);
~CommandLine();
};
struct CommandLine::Args
{
char ShortOpt;
const char *LongOpt;
const char *ConfName;
unsigned long Flags;
inline bool end() {return ShortOpt == 0 && LongOpt == 0;};
inline bool IsBoolean() {return Flags == 0 || (Flags & (Boolean|InvBoolean)) != 0;};
};
struct CommandLine::Dispatch
{
const char *Match;
bool (*Handler)(CommandLine &);
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,152 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Configuration Class
This class provides a configuration file and command line parser
for a tree-oriented configuration environment. All runtime configuration
is stored in here.
Each configuration name is given as a fully scoped string such as
Foo::Bar
And has associated with it a text string. The Configuration class only
provides storage and lookup for this tree, other classes provide
configuration file formats (and parsers/emitters if needed).
Most things can get by quite happily with,
cout << _config->Find("Foo::Bar") << endl;
A special extension, support for ordered lists is provided by using the
special syntax, "block::list::" the trailing :: designates the
item as a list. To access the list you must use the tree function on
"block::list".
##################################################################### */
/*}}}*/
#ifndef PKGLIB_CONFIGURATION_H
#define PKGLIB_CONFIGURATION_H
#include <regex.h>
#include <iostream>
#include <string>
#include <vector>
#include <apt-pkg/macros.h>
class APT_PUBLIC Configuration
{
public:
struct Item
{
std::string Value;
std::string Tag;
Item *Parent;
Item *Child;
Item *Next;
std::string FullTag(const Item *Stop = 0) const;
Item() : Parent(0), Child(0), Next(0) {};
};
private:
Item *Root;
bool ToFree;
Item *Lookup(Item *Head,const char *S,unsigned long const &Len,bool const &Create);
Item *Lookup(const char *Name,const bool &Create);
inline const Item *Lookup(const char *Name) const
{
return const_cast<Configuration *>(this)->Lookup(Name,false);
}
public:
std::string Find(const char *Name,const char *Default = 0) const;
std::string Find(std::string const &Name,const char *Default = 0) const {return Find(Name.c_str(),Default);};
std::string Find(std::string const &Name, std::string const &Default) const {return Find(Name.c_str(),Default.c_str());};
std::string FindFile(const char *Name,const char *Default = 0) const;
std::string FindDir(const char *Name,const char *Default = 0) const;
/** return a list of child options
*
* Options like Acquire::Languages are handled as lists which
* can be overridden and have a default. For the later two a comma
* separated list of values is supported.
*
* \param Name of the parent node
* \param Default list of values separated by commas */
std::vector<std::string> FindVector(const char *Name, std::string const &Default = "", bool const Keys = false) const;
std::vector<std::string> FindVector(std::string const &Name, std::string const &Default = "", bool const Keys = false) const { return FindVector(Name.c_str(), Default, Keys); };
int FindI(const char *Name,int const &Default = 0) const;
int FindI(std::string const &Name,int const &Default = 0) const {return FindI(Name.c_str(),Default);};
bool FindB(const char *Name,bool const &Default = false) const;
bool FindB(std::string const &Name,bool const &Default = false) const {return FindB(Name.c_str(),Default);};
std::string FindAny(const char *Name,const char *Default = 0) const;
inline void Set(const std::string &Name,const std::string &Value) {Set(Name.c_str(),Value);};
void CndSet(const char *Name,const std::string &Value);
void CndSet(const char *Name,const int Value);
void Set(const char *Name,const std::string &Value);
void Set(const char *Name,const int &Value);
inline bool Exists(const std::string &Name) const {return Exists(Name.c_str());};
bool Exists(const char *Name) const;
bool ExistsAny(const char *Name) const;
void MoveSubTree(char const * const OldRoot, char const * const NewRoot);
// clear a whole tree
void Clear(const std::string &Name);
void Clear();
// remove a certain value from a list (e.g. the list of "APT::Keep-Fds")
void Clear(std::string const &List, std::string const &Value);
void Clear(std::string const &List, int const &Value);
inline const Item *Tree(const char *Name) const {return Lookup(Name);};
inline void Dump() { Dump(std::clog); };
void Dump(std::ostream& str);
void Dump(std::ostream& str, char const * const root,
char const * const format, bool const emptyValue);
explicit Configuration(const Item *Root);
Configuration();
~Configuration();
/** \brief match a string against a configurable list of patterns */
class MatchAgainstConfig
{
std::vector<regex_t *> patterns;
APT_HIDDEN void clearPatterns();
public:
explicit MatchAgainstConfig(char const * Config);
virtual ~MatchAgainstConfig();
/** \brief Returns \b true for a string matching one of the patterns */
bool Match(char const * str) const;
bool Match(std::string const &str) const { return Match(str.c_str()); };
/** \brief returns if the matcher setup was successful */
bool wasConstructedSuccessfully() const { return patterns.empty() == false; }
};
};
APT_PUBLIC extern Configuration *_config;
APT_PUBLIC bool ReadConfigFile(Configuration &Conf,const std::string &FName,
bool const &AsSectional = false,
unsigned const &Depth = 0);
APT_PUBLIC bool ReadConfigDir(Configuration &Conf,const std::string &Dir,
bool const &AsSectional = false,
unsigned const &Depth = 0);
#endif

332
apt-pkg/contrib/error.cc Normal file
View File

@ -0,0 +1,332 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Global Error Class - Global error mechanism
We use a simple STL vector to store each error record. A PendingFlag
is kept which indicates when the vector contains a Sever error.
This source is placed in the Public Domain, do with it what you will
It was originally written by Jason Gunthorpe.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <list>
#include <string>
#include <errno.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
/*}}}*/
// Global Error Object /*{{{*/
GlobalError *_GetErrorObj()
{
static thread_local GlobalError Obj;
return &Obj;
}
/*}}}*/
// GlobalError::GlobalError - Constructor /*{{{*/
GlobalError::GlobalError() : PendingFlag(false) {}
/*}}}*/
// GlobalError::FatalE, Errno, WarningE, NoticeE and DebugE - Add to the list/*{{{*/
#define GEMessage(NAME, TYPE) \
bool GlobalError::NAME (const char *Function, const char *Description,...) { \
va_list args; \
size_t msgSize = 400; \
int const errsv = errno; \
bool retry; \
do { \
va_start(args,Description); \
retry = InsertErrno(TYPE, Function, Description, args, errsv, msgSize); \
va_end(args); \
} while (retry); \
return false; \
}
GEMessage(FatalE, FATAL)
GEMessage(Errno, ERROR)
GEMessage(WarningE, WARNING)
GEMessage(NoticeE, NOTICE)
GEMessage(DebugE, DEBUG)
#undef GEMessage
/*}}}*/
// GlobalError::InsertErrno - Get part of the errortype string from errno/*{{{*/
bool GlobalError::InsertErrno(MsgType const &type, const char *Function,
const char *Description,...) {
va_list args;
size_t msgSize = 400;
int const errsv = errno;
bool retry;
do {
va_start(args,Description);
retry = InsertErrno(type, Function, Description, args, errsv, msgSize);
va_end(args);
} while (retry);
return false;
}
/*}}}*/
// GlobalError::InsertErrno - formats an error message with the errno /*{{{*/
bool GlobalError::InsertErrno(MsgType type, const char* Function,
const char* Description, va_list &args,
int const errsv, size_t &msgSize) {
char* S = (char*) malloc(msgSize);
int const n = snprintf(S, msgSize, "%s - %s (%i: %s)", Description,
Function, errsv, strerror(errsv));
if (n > -1 && ((unsigned int) n) < msgSize);
else {
if (n > -1)
msgSize = n + 1;
else
msgSize *= 2;
free(S);
return true;
}
bool const geins = Insert(type, S, args, msgSize);
free(S);
return geins;
}
/*}}}*/
// GlobalError::Fatal, Error, Warning, Notice and Debug - Add to the list/*{{{*/
#define GEMessage(NAME, TYPE) \
bool GlobalError::NAME (const char *Description,...) { \
va_list args; \
size_t msgSize = 400; \
bool retry; \
do { \
va_start(args,Description); \
retry = Insert(TYPE, Description, args, msgSize); \
va_end(args); \
} while (retry); \
return false; \
}
GEMessage(Fatal, FATAL)
GEMessage(Error, ERROR)
GEMessage(Warning, WARNING)
GEMessage(Notice, NOTICE)
GEMessage(Debug, DEBUG)
#undef GEMessage
/*}}}*/
// GlobalError::Insert - Add a errotype message to the list /*{{{*/
bool GlobalError::Insert(MsgType const &type, const char *Description,...)
{
va_list args;
size_t msgSize = 400;
bool retry;
do {
va_start(args,Description);
retry = Insert(type, Description, args, msgSize);
va_end(args);
} while (retry);
return false;
}
/*}}}*/
// GlobalError::Insert - Insert a new item at the end /*{{{*/
bool GlobalError::Insert(MsgType type, const char* Description,
va_list &args, size_t &msgSize) {
char* S = (char*) malloc(msgSize);
int const n = vsnprintf(S, msgSize, Description, args);
if (n > -1 && ((unsigned int) n) < msgSize);
else {
if (n > -1)
msgSize = n + 1;
else
msgSize *= 2;
free(S);
return true;
}
Item const m(S, type);
Messages.push_back(m);
if (type == ERROR || type == FATAL)
PendingFlag = true;
if (type == FATAL || type == DEBUG)
std::clog << m << std::endl;
free(S);
return false;
}
/*}}}*/
// GlobalError::PopMessage - Pulls a single message out /*{{{*/
bool GlobalError::PopMessage(std::string &Text) {
if (Messages.empty() == true)
return false;
Item const msg = Messages.front();
Messages.pop_front();
bool const Ret = (msg.Type == ERROR || msg.Type == FATAL);
Text = msg.Text;
if (PendingFlag == false || Ret == false)
return Ret;
// check if another error message is pending
for (std::list<Item>::const_iterator m = Messages.begin();
m != Messages.end(); ++m)
if (m->Type == ERROR || m->Type == FATAL)
return Ret;
PendingFlag = false;
return Ret;
}
/*}}}*/
// GlobalError::DumpErrors - Dump all of the errors/warns to cerr /*{{{*/
void GlobalError::DumpErrors(std::ostream &out, MsgType const &threshold,
bool const &mergeStack) {
if (mergeStack == true)
for (std::list<MsgStack>::const_reverse_iterator s = Stacks.rbegin();
s != Stacks.rend(); ++s)
std::copy(s->Messages.begin(), s->Messages.end(), std::front_inserter(Messages));
std::for_each(Messages.begin(), Messages.end(), [&threshold, &out](Item const &m) {
if (m.Type >= threshold)
out << m << std::endl;
});
Discard();
}
/*}}}*/
// GlobalError::Discard - Discard /*{{{*/
void GlobalError::Discard() {
Messages.clear();
PendingFlag = false;
}
/*}}}*/
// GlobalError::empty - does our error list include anything? /*{{{*/
bool GlobalError::empty(MsgType const &threshold) const {
if (PendingFlag == true)
return false;
if (Messages.empty() == true)
return true;
return std::find_if(Messages.begin(), Messages.end(), [&threshold](Item const &m) {
return m.Type >= threshold;
}) == Messages.end();
}
/*}}}*/
// GlobalError::PushToStack /*{{{*/
void GlobalError::PushToStack() {
Stacks.emplace_back(Messages, PendingFlag);
Discard();
}
/*}}}*/
// GlobalError::RevertToStack /*{{{*/
void GlobalError::RevertToStack() {
Discard();
MsgStack pack = Stacks.back();
Messages = pack.Messages;
PendingFlag = pack.PendingFlag;
Stacks.pop_back();
}
/*}}}*/
// GlobalError::MergeWithStack /*{{{*/
void GlobalError::MergeWithStack() {
MsgStack pack = Stacks.back();
Messages.splice(Messages.begin(), pack.Messages);
PendingFlag = PendingFlag || pack.PendingFlag;
Stacks.pop_back();
}
/*}}}*/
// GlobalError::Item::operator<< /*{{{*/
APT_HIDDEN std::ostream &operator<<(std::ostream &out, GlobalError::Item i)
{
static constexpr auto COLOR_RESET = "\033[0m";
static constexpr auto COLOR_NOTICE = "\033[33m"; // normal yellow
static constexpr auto COLOR_WARN = "\033[1;33m"; // bold yellow
static constexpr auto COLOR_ERROR = "\033[1;31m"; // bold red
bool use_color = _config->FindB("APT::Color", false);
if (use_color)
{
switch (i.Type)
{
case GlobalError::FATAL:
case GlobalError::ERROR:
out << COLOR_ERROR;
break;
case GlobalError::WARNING:
out << COLOR_WARN;
break;
case GlobalError::NOTICE:
out << COLOR_NOTICE;
break;
default:
break;
}
}
switch (i.Type)
{
case GlobalError::FATAL:
case GlobalError::ERROR:
out << 'E';
break;
case GlobalError::WARNING:
out << 'W';
break;
case GlobalError::NOTICE:
out << 'N';
break;
case GlobalError::DEBUG:
out << 'D';
break;
}
out << ": ";
if (use_color)
{
switch (i.Type)
{
case GlobalError::FATAL:
case GlobalError::ERROR:
case GlobalError::WARNING:
case GlobalError::NOTICE:
out << COLOR_RESET;
break;
default:
break;
}
}
std::string::size_type line_start = 0;
std::string::size_type line_end;
while ((line_end = i.Text.find_first_of("\n\r", line_start)) != std::string::npos)
{
if (line_start != 0)
out << std::endl
<< " ";
out << i.Text.substr(line_start, line_end - line_start);
line_start = i.Text.find_first_not_of("\n\r", line_end + 1);
if (line_start == std::string::npos)
break;
}
if (line_start == 0)
out << i.Text;
else if (line_start != std::string::npos)
out << std::endl
<< " " << i.Text.substr(line_start);
if (use_color)
out << COLOR_RESET;
return out;
}
/*}}}*/

345
apt-pkg/contrib/error.h Normal file
View File

@ -0,0 +1,345 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Global Error Class - Global error mechanism
This class has a single global instance. When a function needs to
generate an error condition, such as a read error, it calls a member
in this class to add the error to a stack of errors.
By using a stack the problem with a scheme like errno is removed and
it allows a very detailed account of what went wrong to be transmitted
to the UI for display. (Errno has problems because each function sets
errno to 0 if it didn't have an error thus eraseing erno in the process
of cleanup)
Several predefined error generators are provided to handle common
things like errno. The general idea is that all methods return a bool.
If the bool is true then things are OK, if it is false then things
should start being undone and the stack should unwind under program
control.
A Warning should not force the return of false. Things did not fail, but
they might have had unexpected problems. Errors are stored in a FIFO
so Pop will return the first item..
I have some thoughts about extending this into a more general UI<->
Engine interface, ie allowing the Engine to say 'The disk is full' in
a dialog that says 'Panic' and 'Retry'.. The error generator functions
like errno, Warning and Error return false always so this is normal:
if (open(..))
return _error->Errno(..);
This source is placed in the Public Domain, do with it what you will
It was originally written by Jason Gunthorpe.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_ERROR_H
#define PKGLIB_ERROR_H
#include <apt-pkg/macros.h>
#include <iostream>
#include <list>
#include <string>
#include <stdarg.h>
#include <stddef.h>
class APT_PUBLIC GlobalError /*{{{*/
{
public: /*{{{*/
/** \brief a message can have one of following severity */
enum MsgType {
/** \brief Message will be printed instantly as it is likely that
this error will lead to a complete crash */
FATAL = 40,
/** \brief An error does hinder the correct execution and should be corrected */
ERROR = 30,
/** \brief indicates problem that can lead to errors later on */
WARNING = 20,
/** \brief deprecation warnings, old fallback behavior, … */
NOTICE = 10,
/** \brief for developers only in areas it is hard to print something directly */
DEBUG = 0
};
/** \brief add a fatal error message with errno to the list
*
* \param Function name of the function generating the error
* \param Description format string for the error message
*
* \return \b false
*/
bool FatalE(const char *Function,const char *Description,...) APT_PRINTF(3) APT_COLD;
/** \brief add an Error message with errno to the list
*
* \param Function name of the function generating the error
* \param Description format string for the error message
*
* \return \b false
*/
bool Errno(const char *Function,const char *Description,...) APT_PRINTF(3) APT_COLD;
/** \brief add a warning message with errno to the list
*
* A warning should be considered less severe than an error and
* may be ignored by the client.
*
* \param Function Name of the function generates the warning.
* \param Description Format string for the warning message.
*
* \return \b false
*/
bool WarningE(const char *Function,const char *Description,...) APT_PRINTF(3) APT_COLD;
/** \brief add a notice message with errno to the list
*
* \param Function name of the function generating the error
* \param Description format string for the error message
*
* \return \b false
*/
bool NoticeE(const char *Function,const char *Description,...) APT_PRINTF(3) APT_COLD;
/** \brief add a debug message with errno to the list
*
* \param Function name of the function generating the error
* \param Description format string for the error message
*
* \return \b false
*/
bool DebugE(const char *Function,const char *Description,...) APT_PRINTF(3) APT_COLD;
/** \brief adds an errno message with the given type
*
* \param type of the error message
* \param Function which failed
* \param Description of the error
*/
bool InsertErrno(MsgType const &type, const char* Function,
const char* Description,...) APT_PRINTF(4) APT_COLD;
/** \brief adds an errno message with the given type
*
* args needs to be initialized with va_start and terminated
* with va_end by the caller. msgSize is also an out-parameter
* in case the msgSize was not enough to store the complete message.
*
* \param type of the error message
* \param Function which failed
* \param Description is the format string for args
* \param args list from a printf-like function
* \param errsv is the errno the error is for
* \param msgSize is the size of the char[] used to store message
* \return true if the message was added, false if not - the caller
* should call this method again in that case
*/
bool InsertErrno(MsgType type, const char* Function,
const char* Description, va_list &args,
int const errsv, size_t &msgSize) APT_COLD;
/** \brief add an fatal error message to the list
*
* Most of the stuff we consider as "error" is also "fatal" for
* the user as the application will not have the expected result,
* but a fatal message here means that it gets printed directly
* to stderr in addition to adding it to the list as the error
* leads sometimes to crashes and a maybe duplicated message
* is better than "Segfault" as the only displayed text
*
* \param Description Format string for the fatal error message.
*
* \return \b false
*/
bool Fatal(const char *Description,...) APT_PRINTF(2) APT_COLD;
/** \brief add an Error message to the list
*
* \param Description Format string for the error message.
*
* \return \b false
*/
bool Error(const char *Description,...) APT_PRINTF(2) APT_COLD;
/** \brief add a warning message to the list
*
* A warning should be considered less severe than an error and
* may be ignored by the client.
*
* \param Description Format string for the message
*
* \return \b false
*/
bool Warning(const char *Description,...) APT_PRINTF(2) APT_COLD;
/** \brief add a notice message to the list
*
* A notice should be considered less severe than an error or a
* warning and can be ignored by the client without further problems
* for some times, but he should consider fixing the problem.
* This error type can be used for e.g. deprecation warnings of options.
*
* \param Description Format string for the message
*
* \return \b false
*/
bool Notice(const char *Description,...) APT_PRINTF(2) APT_COLD;
/** \brief add a debug message to the list
*
* \param Description Format string for the message
*
* \return \b false
*/
bool Debug(const char *Description,...) APT_PRINTF(2) APT_COLD;
/** \brief adds an error message with the given type
*
* \param type of the error message
* \param Description of the error
*/
bool Insert(MsgType const &type, const char* Description,...) APT_PRINTF(3) APT_COLD;
/** \brief adds an error message with the given type
*
* args needs to be initialized with va_start and terminated
* with va_end by the caller. msgSize is also an out-parameter
* in case the msgSize was not enough to store the complete message.
*
* \param type of the error message
* \param Description is the format string for args
* \param args list from a printf-like function
* \param msgSize is the size of the char[] used to store message
* \return true if the message was added, false if not - the caller
* should call this method again in that case
*/
bool Insert(MsgType type, const char* Description,
va_list &args, size_t &msgSize) APT_COLD;
/** \brief is an error in the list?
*
* \return \b true if an error is included in the list, \b false otherwise
*/
inline bool PendingError() const APT_PURE {return PendingFlag;};
/** \brief is the list empty?
*
* Can be used to check if the current stack level doesn't include
* anything equal or more severe than a given threshold, defaulting
* to warning level for historic reasons.
*
* \param threshold minimum level considered
*
* \return \b true if the list is empty, \b false otherwise
*/
bool empty(MsgType const &threshold = WARNING) const APT_PURE;
/** \brief returns and removes the first (or last) message in the list
*
* \param[out] Text message of the first/last item
*
* \return \b true if the message was an error, \b false otherwise
*/
bool PopMessage(std::string &Text);
/** \brief clears the list of messages */
void Discard();
/** \brief outputs the list of messages to the given stream
*
* Note that all messages are discarded, even undisplayed ones.
*
* \param[out] out output stream to write the messages in
* \param threshold minimum level considered
* \param mergeStack if true recursively dumps the entire stack
*/
void DumpErrors(std::ostream &out, MsgType const &threshold = WARNING,
bool const &mergeStack = true);
/** \brief dumps the list of messages to std::cerr
*
* Note that all messages are discarded, also the notices
* displayed or not.
*
* \param threshold minimum level printed
*/
void inline DumpErrors(MsgType const &threshold) {
DumpErrors(std::cerr, threshold);
}
// mvo: we do this instead of using a default parameter in the
// previous declaration to avoid a (subtle) API break for
// e.g. sigc++ and mem_fun0
/** \brief dumps the messages of type WARNING or higher to std::cerr
*
* Note that all messages are discarded, displayed or not.
*
*/
void inline DumpErrors() {
DumpErrors(WARNING);
}
/** \brief put the current Messages into the stack
*
* All "old" messages will be pushed into a stack to
* them later back, but for now the Message query will be
* empty and performs as no messages were present before.
*
* The stack can be as deep as you want - all stack operations
* will only operate on the last element in the stack.
*/
void PushToStack();
/** \brief throw away all current messages */
void RevertToStack();
/** \brief merge current and stack together */
void MergeWithStack();
/** \brief return the deep of the stack */
size_t StackCount() const APT_PURE {
return Stacks.size();
}
GlobalError();
/*}}}*/
private: /*{{{*/
struct Item {
std::string Text;
MsgType Type;
Item(char const *Text, MsgType const &Type) :
Text(Text), Type(Type) {};
APT_HIDDEN friend std::ostream &operator<<(std::ostream &out, Item i);
};
APT_HIDDEN friend std::ostream &operator<<(std::ostream &out, Item i);
std::list<Item> Messages;
bool PendingFlag;
struct MsgStack {
std::list<Item> Messages;
bool const PendingFlag;
MsgStack(std::list<Item> const &Messages, bool const &Pending) :
Messages(Messages), PendingFlag(Pending) {};
};
std::list<MsgStack> Stacks;
/*}}}*/
};
/*}}}*/
// The 'extra-ansi' syntax is used to help with collisions.
APT_PUBLIC GlobalError *_GetErrorObj();
static struct {
inline GlobalError* operator ->() { return _GetErrorObj(); }
} _error APT_UNUSED;
#endif

View File

@ -0,0 +1,306 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Extract a Tar - Tar Extractor
Some performance measurements showed that zlib performed quite poorly
in comparison to a forked gzip process. This tar extractor makes use
of the fact that dup'd file descriptors have the same seek pointer
and that gzip will not read past the end of a compressed stream,
even if there is more data. We use the dup property to track extraction
progress and the gzip feature to just feed gzip a fd in the middle
of an AR file.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/dirstream.h>
#include <apt-pkg/error.h>
#include <apt-pkg/extracttar.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/strutl.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <fcntl.h>
#include <signal.h>
#include <string.h>
#include <unistd.h>
#include <apti18n.h>
/*}}}*/
using namespace std;
// The on disk header for a tar file.
struct ExtractTar::TarHeader
{
char Name[100];
char Mode[8];
char UserID[8];
char GroupID[8];
char Size[12];
char MTime[12];
char Checksum[8];
char LinkFlag;
char LinkName[100];
char MagicNumber[8];
char UserName[32];
char GroupName[32];
char Major[8];
char Minor[8];
};
// ExtractTar::ExtractTar - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
ExtractTar::ExtractTar(FileFd &Fd,unsigned long long Max,string DecompressionProgram)
: File(Fd), MaxInSize(Max), DecompressProg(DecompressionProgram)
{
GZPid = -1;
Eof = false;
}
/*}}}*/
// ExtractTar::ExtractTar - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
ExtractTar::~ExtractTar()
{
// Error close
Done();
}
/*}}}*/
// ExtractTar::Done - Reap the gzip sub process /*{{{*/
bool ExtractTar::Done()
{
return InFd.Close();
}
/*}}}*/
// ExtractTar::StartGzip - Startup gzip /*{{{*/
// ---------------------------------------------------------------------
/* This creates a gzip sub process that has its input as the file itself.
If this tar file is embedded into something like an ar file then
gzip will efficiently ignore the extra bits. */
bool ExtractTar::StartGzip()
{
if (DecompressProg.empty())
{
InFd.OpenDescriptor(File.Fd(), FileFd::ReadOnly, FileFd::None, false);
return true;
}
std::vector<APT::Configuration::Compressor> const compressors = APT::Configuration::getCompressors();
std::vector<APT::Configuration::Compressor>::const_iterator compressor = compressors.begin();
for (; compressor != compressors.end(); ++compressor) {
if (compressor->Name == DecompressProg) {
return InFd.OpenDescriptor(File.Fd(), FileFd::ReadOnly, *compressor, false);
}
}
return _error->Error(_("Cannot find a configured compressor for '%s'"),
DecompressProg.c_str());
}
/*}}}*/
// ExtractTar::Go - Perform extraction /*{{{*/
// ---------------------------------------------------------------------
/* This reads each 512 byte block from the archive and extracts the header
information into the Item structure. Then it resolves the UID/GID and
invokes the correct processing function. */
bool ExtractTar::Go(pkgDirStream &Stream)
{
if (StartGzip() == false)
return false;
// Loop over all blocks
string LastLongLink, ItemLink;
string LastLongName, ItemName;
while (1)
{
bool BadRecord = false;
unsigned char Block[512];
if (InFd.Read(Block,sizeof(Block),true) == false)
return false;
if (InFd.Eof() == true)
break;
// Get the checksum
TarHeader *Tar = (TarHeader *)Block;
unsigned long CheckSum;
if (StrToNum(Tar->Checksum,CheckSum,sizeof(Tar->Checksum),8) == false)
return _error->Error(_("Corrupted archive"));
/* Compute the checksum field. The actual checksum is blanked out
with spaces so it is not included in the computation */
unsigned long NewSum = 0;
memset(Tar->Checksum,' ',sizeof(Tar->Checksum));
for (int I = 0; I != sizeof(Block); I++)
NewSum += Block[I];
/* Check for a block of nulls - in this case we kill gzip, GNU tar
does this.. */
if (NewSum == ' '*sizeof(Tar->Checksum))
return Done();
if (NewSum != CheckSum)
return _error->Error(_("Tar checksum failed, archive corrupted"));
// Decode all of the fields
pkgDirStream::Item Itm;
if (StrToNum(Tar->Mode,Itm.Mode,sizeof(Tar->Mode),8) == false ||
(Base256ToNum(Tar->UserID,Itm.UID,8) == false &&
StrToNum(Tar->UserID,Itm.UID,sizeof(Tar->UserID),8) == false) ||
(Base256ToNum(Tar->GroupID,Itm.GID,8) == false &&
StrToNum(Tar->GroupID,Itm.GID,sizeof(Tar->GroupID),8) == false) ||
(Base256ToNum(Tar->Size,Itm.Size,12) == false &&
StrToNum(Tar->Size,Itm.Size,sizeof(Tar->Size),8) == false) ||
(Base256ToNum(Tar->MTime,Itm.MTime,12) == false &&
StrToNum(Tar->MTime,Itm.MTime,sizeof(Tar->MTime),8) == false) ||
StrToNum(Tar->Major,Itm.Major,sizeof(Tar->Major),8) == false ||
StrToNum(Tar->Minor,Itm.Minor,sizeof(Tar->Minor),8) == false)
return _error->Error(_("Corrupted archive"));
// Grab the filename and link target: use last long name if one was
// set, otherwise use the header value as-is, but remember that it may
// fill the entire 100-byte block and needs to be zero-terminated.
// See Debian Bug #689582.
if (LastLongName.empty() == false)
Itm.Name = (char *)LastLongName.c_str();
else
Itm.Name = (char *)ItemName.assign(Tar->Name, sizeof(Tar->Name)).c_str();
if (Itm.Name[0] == '.' && Itm.Name[1] == '/' && Itm.Name[2] != 0)
Itm.Name += 2;
if (LastLongLink.empty() == false)
Itm.LinkTarget = (char *)LastLongLink.c_str();
else
Itm.LinkTarget = (char *)ItemLink.assign(Tar->LinkName, sizeof(Tar->LinkName)).c_str();
// Convert the type over
switch (Tar->LinkFlag)
{
case NormalFile0:
case NormalFile:
Itm.Type = pkgDirStream::Item::File;
break;
case HardLink:
Itm.Type = pkgDirStream::Item::HardLink;
break;
case SymbolicLink:
Itm.Type = pkgDirStream::Item::SymbolicLink;
break;
case CharacterDevice:
Itm.Type = pkgDirStream::Item::CharDevice;
break;
case BlockDevice:
Itm.Type = pkgDirStream::Item::BlockDevice;
break;
case Directory:
Itm.Type = pkgDirStream::Item::Directory;
break;
case FIFO:
Itm.Type = pkgDirStream::Item::FIFO;
break;
case GNU_LongLink:
{
unsigned long long Length = Itm.Size;
unsigned char Block[512];
while (Length > 0)
{
if (InFd.Read(Block,sizeof(Block),true) == false)
return false;
if (Length <= sizeof(Block))
{
LastLongLink.append(Block,Block+sizeof(Block));
break;
}
LastLongLink.append(Block,Block+sizeof(Block));
Length -= sizeof(Block);
}
continue;
}
case GNU_LongName:
{
unsigned long long Length = Itm.Size;
unsigned char Block[512];
while (Length > 0)
{
if (InFd.Read(Block,sizeof(Block),true) == false)
return false;
if (Length < sizeof(Block))
{
LastLongName.append(Block,Block+sizeof(Block));
break;
}
LastLongName.append(Block,Block+sizeof(Block));
Length -= sizeof(Block);
}
continue;
}
default:
BadRecord = true;
_error->Warning(_("Unknown TAR header type %u, member %s"),(unsigned)Tar->LinkFlag,Tar->Name);
break;
}
int Fd = -1;
if (BadRecord == false)
if (Stream.DoItem(Itm,Fd) == false)
return false;
// Copy the file over the FD
unsigned long long Size = Itm.Size;
while (Size != 0)
{
unsigned char Junk[32*1024];
unsigned long Read = min(Size, (unsigned long long)sizeof(Junk));
if (InFd.Read(Junk,((Read+511)/512)*512) == false)
return false;
if (BadRecord == false)
{
if (Fd > 0)
{
if (write(Fd,Junk,Read) != (signed)Read)
return Stream.Fail(Itm,Fd);
}
else
{
/* An Fd of -2 means to send to a special processing
function */
if (Fd == -2)
if (Stream.Process(Itm,Junk,Read,Itm.Size - Size) == false)
return Stream.Fail(Itm,Fd);
}
}
Size -= Read;
}
// And finish up
if (BadRecord == false)
if (Stream.FinishedFile(Itm,Fd) == false)
return false;
LastLongName.erase();
LastLongLink.erase();
}
return Done();
}
/*}}}*/

View File

@ -0,0 +1,55 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Extract a Tar - Tar Extractor
The tar extractor takes an ordinary gzip compressed tar stream from
the given file and explodes it, passing the individual items to the
given Directory Stream for processing.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_EXTRACTTAR_H
#define PKGLIB_EXTRACTTAR_H
#include <apt-pkg/fileutl.h>
#include <apt-pkg/macros.h>
#include <string>
class pkgDirStream;
class APT_PUBLIC ExtractTar
{
protected:
struct TarHeader;
// The varios types items can be
enum ItemType {NormalFile0 = '\0',NormalFile = '0',HardLink = '1',
SymbolicLink = '2',CharacterDevice = '3',
BlockDevice = '4',Directory = '5',FIFO = '6',
GNU_LongLink = 'K',GNU_LongName = 'L'};
FileFd &File;
unsigned long long MaxInSize;
int GZPid;
FileFd InFd;
bool Eof;
std::string DecompressProg;
// Fork and reap gzip
bool StartGzip();
bool Done();
public:
bool Go(pkgDirStream &Stream);
ExtractTar(FileFd &Fd,unsigned long long Max,std::string DecompressionProgram);
virtual ~ExtractTar();
};
#endif

3469
apt-pkg/contrib/fileutl.cc Normal file

File diff suppressed because it is too large Load Diff

285
apt-pkg/contrib/fileutl.h Normal file
View File

@ -0,0 +1,285 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
File Utilities
CopyFile - Buffered copy of a single file
GetLock - dpkg compatible lock file manipulation (fcntl)
FileExists - Returns true if the file exists
SafeGetCWD - Returns the CWD in a string with overrun protection
The file class is a handy abstraction for various functions+classes
that need to accept filenames.
This source is placed in the Public Domain, do with it what you will
It was originally written by Jason Gunthorpe.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_FILEUTL_H
#define PKGLIB_FILEUTL_H
#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/macros.h>
#include <set>
#include <string>
#include <vector>
#include <time.h>
#include <zlib.h>
/* Define this for python-apt */
#define APT_HAS_GZIP 1
class FileFdPrivate;
class APT_PUBLIC FileFd
{
friend class FileFdPrivate;
friend class GzipFileFdPrivate;
friend class Bz2FileFdPrivate;
friend class LzmaFileFdPrivate;
friend class Lz4FileFdPrivate;
friend class ZstdFileFdPrivate;
friend class DirectFileFdPrivate;
friend class PipedFileFdPrivate;
protected:
int iFd;
enum LocalFlags {AutoClose = (1<<0),Fail = (1<<1),DelOnFail = (1<<2),
HitEof = (1<<3), Replace = (1<<4), Compressed = (1<<5) };
unsigned long Flags;
std::string FileName;
std::string TemporaryFileName;
public:
enum OpenMode {
ReadOnly = (1 << 0),
WriteOnly = (1 << 1),
ReadWrite = ReadOnly | WriteOnly,
Create = (1 << 2),
Exclusive = (1 << 3),
Atomic = Exclusive | (1 << 4),
Empty = (1 << 5),
BufferedWrite = (1 << 6),
WriteEmpty = ReadWrite | Create | Empty,
WriteExists = ReadWrite,
WriteAny = ReadWrite | Create,
WriteTemp = ReadWrite | Create | Exclusive,
ReadOnlyGzip,
WriteAtomic = ReadWrite | Create | Atomic
};
enum CompressMode
{
Auto = 'A',
None = 'N',
Extension = 'E',
Gzip = 'G',
Bzip2 = 'B',
Lzma = 'L',
Xz = 'X',
Lz4 = '4',
Zstd = 'Z'
};
inline bool Read(void *To,unsigned long long Size,bool AllowEof)
{
unsigned long long Jnk;
if (AllowEof)
return Read(To,Size,&Jnk);
return Read(To,Size);
}
bool Read(void *To,unsigned long long Size,unsigned long long *Actual = 0);
bool static Read(int const Fd, void *To, unsigned long long Size, unsigned long long * const Actual = 0);
/** read a complete line or until buffer is full
*
* The buffer will always be \\0 terminated, so at most Size-1 characters are read.
* If the buffer holds a complete line the last character (before \\0) will be
* the newline character \\n otherwise the line was longer than the buffer.
*
* @param To buffer which will hold the line
* @param Size of the buffer to fill
* @param \b nullptr is returned in error cases, otherwise
* the parameter \b To now filled with the line.
*/
char* ReadLine(char *To, unsigned long long const Size);
/** read a complete line from the file
*
* Similar to std::getline() the string does \b not include
* the newline, but just the content of the line as the newline
* is not needed to distinguish cases as for the other #ReadLine method.
*
* @param To string which will hold the line
* @return \b true if successful, otherwise \b false
*/
bool ReadLine(std::string &To);
bool Flush();
bool Write(const void *From,unsigned long long Size);
bool static Write(int Fd, const void *From, unsigned long long Size);
bool Seek(unsigned long long To);
bool Skip(unsigned long long To);
bool Truncate(unsigned long long To);
unsigned long long Tell();
// the size of the file content (compressed files will be uncompressed first)
unsigned long long Size();
// the size of the file itself
unsigned long long FileSize();
time_t ModificationTime();
bool Open(std::string FileName,unsigned int const Mode,CompressMode Compress,unsigned long const AccessMode = 0666);
bool Open(std::string FileName,unsigned int const Mode,APT::Configuration::Compressor const &compressor,unsigned long const AccessMode = 0666);
inline bool Open(std::string const &FileName,unsigned int const Mode, unsigned long const AccessMode = 0666) {
return Open(FileName, Mode, None, AccessMode);
};
bool OpenDescriptor(int Fd, unsigned int const Mode, CompressMode Compress, bool AutoClose=false);
bool OpenDescriptor(int Fd, unsigned int const Mode, APT::Configuration::Compressor const &compressor, bool AutoClose=false);
inline bool OpenDescriptor(int Fd, unsigned int const Mode, bool AutoClose=false) {
return OpenDescriptor(Fd, Mode, None, AutoClose);
};
bool Close();
bool Sync();
// Simple manipulators
inline int Fd() {return iFd;};
inline void Fd(int fd) { OpenDescriptor(fd, ReadWrite);};
inline bool IsOpen() {return iFd >= 0;};
inline bool Failed() {return (Flags & Fail) == Fail;};
inline void EraseOnFailure() {Flags |= DelOnFail;};
inline void OpFail() {Flags |= Fail;};
inline bool Eof() {return (Flags & HitEof) == HitEof;};
inline bool IsCompressed() {return (Flags & Compressed) == Compressed;};
inline std::string &Name() {return FileName;};
inline void SetFileName(std::string const &name) { FileName = name; };
FileFd(std::string FileName,unsigned int const Mode,unsigned long AccessMode = 0666);
FileFd(std::string FileName,unsigned int const Mode, CompressMode Compress, unsigned long AccessMode = 0666);
FileFd();
FileFd(int const Fd, unsigned int const Mode = ReadWrite, CompressMode Compress = None);
FileFd(int const Fd, bool const AutoClose);
virtual ~FileFd();
private:
FileFdPrivate * d;
APT_HIDDEN FileFd(const FileFd &);
APT_HIDDEN FileFd & operator=(const FileFd &);
APT_HIDDEN bool OpenInternDescriptor(unsigned int const Mode, APT::Configuration::Compressor const &compressor);
// private helpers to set Fail flag and call _error->Error
APT_HIDDEN bool FileFdErrno(const char* Function, const char* Description,...) APT_PRINTF(3) APT_COLD;
APT_HIDDEN bool FileFdError(const char* Description,...) APT_PRINTF(2) APT_COLD;
};
APT_PUBLIC bool RunScripts(const char *Cnf);
APT_PUBLIC bool CopyFile(FileFd &From,FileFd &To);
APT_PUBLIC bool RemoveFile(char const * const Function, std::string const &FileName);
APT_PUBLIC bool RemoveFileAt(char const * const Function, int const dirfd, std::string const &FileName);
APT_PUBLIC int GetLock(std::string File,bool Errors = true);
APT_PUBLIC bool FileExists(std::string File);
APT_PUBLIC bool RealFileExists(std::string File);
APT_PUBLIC bool DirectoryExists(std::string const &Path);
APT_PUBLIC bool CreateDirectory(std::string const &Parent, std::string const &Path);
APT_PUBLIC time_t GetModificationTime(std::string const &Path);
APT_PUBLIC bool Rename(std::string From, std::string To);
APT_PUBLIC std::string GetTempDir();
APT_PUBLIC std::string GetTempDir(std::string const &User);
APT_PUBLIC FileFd* GetTempFile(std::string const &Prefix = "",
bool ImmediateUnlink = true,
FileFd * const TmpFd = NULL);
// FIXME: GetTempFile should always return a buffered file
APT_HIDDEN FileFd* GetTempFile(std::string const &Prefix,
bool ImmediateUnlink ,
FileFd * const TmpFd,
bool Buffered);
/** \brief Ensure the existence of the given Path
*
* \param Parent directory of the Path directory - a trailing
* /apt/ will be removed before CreateDirectory call.
* \param Path which should exist after (successful) call
*/
APT_PUBLIC bool CreateAPTDirectoryIfNeeded(std::string const &Parent, std::string const &Path);
APT_PUBLIC std::vector<std::string> GetListOfFilesInDir(std::string const &Dir, std::string const &Ext,
bool const &SortList, bool const &AllowNoExt=false);
APT_PUBLIC std::vector<std::string> GetListOfFilesInDir(std::string const &Dir, std::vector<std::string> const &Ext,
bool const &SortList);
APT_PUBLIC std::vector<std::string> GetListOfFilesInDir(std::string const &Dir, bool SortList);
APT_PUBLIC std::string SafeGetCWD();
APT_PUBLIC void SetCloseExec(int Fd,bool Close);
APT_PUBLIC void SetNonBlock(int Fd,bool Block);
APT_PUBLIC bool WaitFd(int Fd,bool write = false,unsigned long timeout = 0);
APT_PUBLIC pid_t ExecFork();
APT_PUBLIC pid_t ExecFork(std::set<int> keep_fds);
APT_PUBLIC void MergeKeepFdsFromConfiguration(std::set<int> &keep_fds);
APT_PUBLIC bool ExecWait(pid_t Pid,const char *Name,bool Reap = false);
// check if the given file starts with a PGP cleartext signature
APT_PUBLIC bool StartsWithGPGClearTextSignature(std::string const &FileName);
/** change file attributes to requested known good values
*
* The method skips the user:group setting if not root.
*
* @param requester is printed as functionname in error cases
* @param file is the file to be modified
* @param user is the (new) owner of the file, e.g. _apt
* @param group is the (new) group owning the file, e.g. root
* @param mode is the access mode of the file, e.g. 0644
*/
APT_PUBLIC bool ChangeOwnerAndPermissionOfFile(char const * const requester, char const * const file, char const * const user, char const * const group, mode_t const mode);
/**
* \brief Drop privileges
*
* Drop the privileges to the user _apt (or the one specified in
* APT::Sandbox::User). This does not set the supplementary group
* ids up correctly, it only uses the default group. Also prevent
* the process from gaining any new privileges afterwards, at least
* on Linux.
*
* \return true on success, false on failure with _error set
*/
APT_PUBLIC bool DropPrivileges();
// File string manipulators
APT_PUBLIC std::string flNotDir(std::string File);
APT_PUBLIC std::string flNotFile(std::string File);
APT_PUBLIC std::string flNoLink(std::string File);
APT_PUBLIC std::string flExtension(std::string File);
APT_PUBLIC std::string flCombine(std::string Dir,std::string File);
/** \brief Takes a file path and returns the absolute path
*/
APT_PUBLIC std::string flAbsPath(std::string File);
/** \brief removes superfluous /./ and // from path */
APT_HIDDEN std::string flNormalize(std::string file);
// simple c++ glob
APT_PUBLIC std::vector<std::string> Glob(std::string const &pattern, int flags=0);
/** \brief Popen() implementation that execv() instead of using a shell
*
* \param Args the execv style command to run
* \param FileFd is a reference to the FileFd to use for input or output
* \param Child a reference to the integer that stores the child pid
* Note that you must call ExecWait() or similar to cleanup
* \param Mode is either FileFd::ReadOnly or FileFd::WriteOnly
* \param CaptureStderr True if we should capture stderr in addition to stdout.
* (default: True).
* \param Sandbox True if this should run sandboxed
* \return true on success, false on failure with _error set
*/
APT_PUBLIC bool Popen(const char *Args[], FileFd &Fd, pid_t &Child, FileFd::OpenMode Mode, bool CaptureStderr = true, bool Sandbox = false);
APT_HIDDEN bool OpenConfigurationFileFd(std::string const &File, FileFd &Fd);
APT_HIDDEN int Inhibit(const char *what, const char *who, const char *why, const char *mode);
#endif

580
apt-pkg/contrib/gpgv.cc Normal file
View File

@ -0,0 +1,580 @@
// -*- mode: cpp; mode: fold -*-
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/gpgv.h>
#include <apt-pkg/strutl.h>
#include <errno.h>
#include <fcntl.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/wait.h>
#include <unistd.h>
#include <algorithm>
#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include <vector>
#include <apti18n.h>
/*}}}*/
// syntactic sugar to wrap a raw pointer with a custom deleter in a std::unique_ptr
static std::unique_ptr<char, decltype(&free)> make_unique_char(void *const str = nullptr)
{
return {static_cast<char *>(str), &free};
}
static std::unique_ptr<FILE, decltype(&fclose)> make_unique_FILE(std::string const &filename, char const *const mode)
{
return {fopen(filename.c_str(), mode), &fclose};
}
class LineBuffer /*{{{*/
{
char *buffer = nullptr;
size_t buffer_size = 0;
int line_length = 0;
// a "normal" find_last_not_of returns npos if not found
int find_last_not_of_length(APT::StringView const bad) const
{
for (int result = line_length - 1; result >= 0; --result)
if (bad.find(buffer[result]) == APT::StringView::npos)
return result + 1;
return 0;
}
public:
bool empty() const noexcept { return view().empty(); }
APT::StringView view() const noexcept { return {buffer, static_cast<size_t>(line_length)}; }
bool starts_with(APT::StringView const start) const { return view().substr(0, start.size()) == start; }
bool writeTo(FileFd *const to, size_t offset = 0) const
{
if (to == nullptr)
return true;
return to->Write(buffer + offset, line_length - offset);
}
bool writeLineTo(FileFd *const to) const
{
if (to == nullptr)
return true;
buffer[line_length] = '\n';
bool const result = to->Write(buffer, line_length + 1);
buffer[line_length] = '\0';
return result;
}
bool writeNewLineIf(FileFd *const to, bool const condition) const
{
if (not condition || to == nullptr)
return true;
return to->Write("\n", 1);
}
bool readFrom(FILE *stream, std::string const &InFile, bool acceptEoF = false)
{
errno = 0;
line_length = getline(&buffer, &buffer_size, stream);
if (errno != 0)
return _error->Errno("getline", "Could not read from %s", InFile.c_str());
if (line_length == -1)
{
if (acceptEoF)
return false;
return _error->Error("Splitting of clearsigned file %s failed as it doesn't contain all expected parts", InFile.c_str());
}
// a) remove newline characters, so we can work consistently with lines
line_length = find_last_not_of_length("\n\r");
// b) remove trailing whitespaces as defined by rfc4880 §7.1
line_length = find_last_not_of_length(" \t");
buffer[line_length] = '\0';
return true;
}
~LineBuffer() { free(buffer); }
};
static bool operator==(LineBuffer const &buf, APT::StringView const exp) noexcept
{
return buf.view() == exp;
}
static bool operator!=(LineBuffer const &buf, APT::StringView const exp) noexcept
{
return buf.view() != exp;
}
/*}}}*/
// ExecGPGV - returns the command needed for verify /*{{{*/
// ---------------------------------------------------------------------
/* Generating the commandline for calling gpg is somehow complicated as
we need to add multiple keyrings and user supplied options.
Also, as gpg has no options to enforce a certain reduced style of
clear-signed files (=the complete content of the file is signed and
the content isn't encoded) we do a divide and conquer approach here
and split up the clear-signed file in message and signature for gpg.
And as a cherry on the cake, we use our apt-key wrapper to do part
of the lifting in regards to merging keyrings. Fun for the whole family.
*/
static bool iovprintf(std::ostream &out, const char *format,
va_list &args, ssize_t &size) {
auto S = make_unique_char(malloc(size));
ssize_t const n = vsnprintf(S.get(), size, format, args);
if (n > -1 && n < size) {
out << S.get();
return true;
} else {
if (n > -1)
size = n + 1;
else
size *= 2;
}
return false;
}
static void APT_PRINTF(4) apt_error(std::ostream &outterm, int const statusfd, int fd[2], const char *format, ...)
{
std::ostringstream outstr;
std::ostream &out = (statusfd == -1) ? outterm : outstr;
va_list args;
ssize_t size = 400;
while (true) {
bool ret;
va_start(args,format);
ret = iovprintf(out, format, args, size);
va_end(args);
if (ret)
break;
}
if (statusfd != -1)
{
auto const errtag = "[APTKEY:] ERROR ";
outstr << '\n';
auto const errtext = outstr.str();
if (not FileFd::Write(fd[1], errtag, strlen(errtag)) ||
not FileFd::Write(fd[1], errtext.data(), errtext.size()))
outterm << errtext << std::flush;
}
}
void ExecGPGV(std::string const &File, std::string const &FileGPG,
int const &statusfd, int fd[2], std::string const &key)
{
#define EINTERNAL 111
std::string const aptkey = _config->Find("Dir::Bin::apt-key", CMAKE_INSTALL_FULL_BINDIR "/apt-key");
bool const Debug = _config->FindB("Debug::Acquire::gpgv", false);
struct exiter {
std::vector<const char *> files;
void operator ()(int code) APT_NORETURN {
std::for_each(files.begin(), files.end(), unlink);
exit(code);
}
} local_exit;
std::vector<const char *> Args;
Args.reserve(10);
Args.push_back(aptkey.c_str());
Args.push_back("--quiet");
Args.push_back("--readonly");
auto const keysFileFpr = VectorizeString(key, ',');
for (auto const &k: keysFileFpr)
{
if (unlikely(k.empty()))
continue;
if (k[0] == '/')
{
Args.push_back("--keyring");
Args.push_back(k.c_str());
}
else
{
Args.push_back("--keyid");
Args.push_back(k.c_str());
}
}
Args.push_back("verify");
char statusfdstr[10];
if (statusfd != -1)
{
Args.push_back("--status-fd");
snprintf(statusfdstr, sizeof(statusfdstr), "%i", statusfd);
Args.push_back(statusfdstr);
}
Configuration::Item const *Opts;
Opts = _config->Tree("Acquire::gpgv::Options");
if (Opts != 0)
{
Opts = Opts->Child;
for (; Opts != 0; Opts = Opts->Next)
{
if (Opts->Value.empty())
continue;
Args.push_back(Opts->Value.c_str());
}
}
enum { DETACHED, CLEARSIGNED } releaseSignature = (FileGPG != File) ? DETACHED : CLEARSIGNED;
auto sig = make_unique_char();
auto data = make_unique_char();
auto conf = make_unique_char();
// Dump the configuration so apt-key picks up the correct Dir values
{
{
std::string tmpfile;
strprintf(tmpfile, "%s/apt.conf.XXXXXX", GetTempDir().c_str());
conf.reset(strdup(tmpfile.c_str()));
}
if (conf == nullptr) {
apt_error(std::cerr, statusfd, fd, "Couldn't create tempfile names for passing config to apt-key");
local_exit(EINTERNAL);
}
int confFd = mkstemp(conf.get());
if (confFd == -1) {
apt_error(std::cerr, statusfd, fd, "Couldn't create temporary file %s for passing config to apt-key", conf.get());
local_exit(EINTERNAL);
}
local_exit.files.push_back(conf.get());
std::ofstream confStream(conf.get());
close(confFd);
_config->Dump(confStream);
confStream.close();
setenv("APT_CONFIG", conf.get(), 1);
}
if (releaseSignature == DETACHED)
{
auto detached = make_unique_FILE(FileGPG, "r");
if (detached.get() == nullptr)
{
apt_error(std::cerr, statusfd, fd, "Detached signature file '%s' could not be opened", FileGPG.c_str());
local_exit(EINTERNAL);
}
LineBuffer buf;
bool open_signature = false;
bool found_badcontent = false;
size_t found_signatures = 0;
while (buf.readFrom(detached.get(), FileGPG, true))
{
if (open_signature)
{
if (buf == "-----END PGP SIGNATURE-----")
open_signature = false;
else if (buf.starts_with("-"))
{
// the used Radix-64 is not using dash for any value, so a valid line can't
// start with one. Header keys could, but no existent one does and seems unlikely.
// Instead it smells a lot like a header the parser didn't recognize.
apt_error(std::cerr, statusfd, fd, "Detached signature file '%s' contains unexpected line starting with a dash", FileGPG.c_str());
local_exit(112);
}
}
else //if (not open_signature)
{
if (buf == "-----BEGIN PGP SIGNATURE-----")
{
open_signature = true;
++found_signatures;
if (found_badcontent)
break;
}
else
{
found_badcontent = true;
if (found_signatures != 0)
break;
}
}
}
if (found_signatures == 0 && statusfd != -1)
{
auto const errtag = "[GNUPG:] NODATA\n";
FileFd::Write(fd[1], errtag, strlen(errtag));
// guess if this is a binary signature, we never officially supported them,
// but silently accepted them via passing them unchecked to gpgv
if (found_badcontent)
{
rewind(detached.get());
auto ptag = fgetc(detached.get());
// §4.2 says that the first bit is always set and gpg seems to generate
// only old format which is indicated by the second bit not set
if (ptag != EOF && (ptag & 0x80) != 0 && (ptag & 0x40) == 0)
{
apt_error(std::cerr, statusfd, fd, "Detached signature file '%s' is in unsupported binary format", FileGPG.c_str());
local_exit(112);
}
}
// This is not an attack attempt but a file even gpgv would complain about
// likely the result of a paywall which is covered by the gpgv method
local_exit(113);
}
else if (found_badcontent)
{
apt_error(std::cerr, statusfd, fd, "Detached signature file '%s' contains lines not belonging to a signature", FileGPG.c_str());
local_exit(112);
}
if (open_signature)
{
apt_error(std::cerr, statusfd, fd, "Detached signature file '%s' contains unclosed signatures", FileGPG.c_str());
local_exit(112);
}
Args.push_back(FileGPG.c_str());
Args.push_back(File.c_str());
}
else // clear-signed file
{
FileFd signature;
if (GetTempFile("apt.sig", false, &signature) == nullptr)
local_exit(EINTERNAL);
sig.reset(strdup(signature.Name().c_str()));
local_exit.files.push_back(sig.get());
FileFd message;
if (GetTempFile("apt.data", false, &message) == nullptr)
local_exit(EINTERNAL);
data.reset(strdup(message.Name().c_str()));
local_exit.files.push_back(data.get());
if (signature.Failed() || message.Failed() ||
not SplitClearSignedFile(File, &message, nullptr, &signature))
{
apt_error(std::cerr, statusfd, fd, "Splitting up %s into data and signature failed", File.c_str());
local_exit(112);
}
Args.push_back(sig.get());
Args.push_back(data.get());
}
Args.push_back(NULL);
if (Debug)
{
std::clog << "Preparing to exec: ";
for (std::vector<const char *>::const_iterator a = Args.begin(); *a != NULL; ++a)
std::clog << " " << *a;
std::clog << std::endl;
}
if (statusfd != -1)
{
int const nullfd = open("/dev/null", O_WRONLY);
close(fd[0]);
// Redirect output to /dev/null; we read from the status fd
if (statusfd != STDOUT_FILENO)
dup2(nullfd, STDOUT_FILENO);
if (statusfd != STDERR_FILENO)
dup2(nullfd, STDERR_FILENO);
// Redirect the pipe to the status fd (3)
dup2(fd[1], statusfd);
putenv((char *)"LANG=");
putenv((char *)"LC_ALL=");
putenv((char *)"LC_MESSAGES=");
}
// We have created tempfiles we have to clean up
// and we do an additional check, so fork yet another time …
pid_t pid = ExecFork();
if(pid < 0) {
apt_error(std::cerr, statusfd, fd, "Fork failed for %s to check %s", Args[0], File.c_str());
local_exit(EINTERNAL);
}
if(pid == 0)
{
if (statusfd != -1)
dup2(fd[1], statusfd);
execvp(Args[0], (char **) &Args[0]);
apt_error(std::cerr, statusfd, fd, "Couldn't execute %s to check %s", Args[0], File.c_str());
local_exit(EINTERNAL);
}
// Wait and collect the error code - taken from WaitPid as we need the exact Status
int Status;
while (waitpid(pid,&Status,0) != pid)
{
if (errno == EINTR)
continue;
apt_error(std::cerr, statusfd, fd, _("Waited for %s but it wasn't there"), "apt-key");
local_exit(EINTERNAL);
}
// check if it exit'ed normally …
if (not WIFEXITED(Status))
{
apt_error(std::cerr, statusfd, fd, _("Sub-process %s exited unexpectedly"), "apt-key");
local_exit(EINTERNAL);
}
// … and with a good exit code
if (WEXITSTATUS(Status) != 0)
{
// we forward the statuscode, so don't generate a message on the fd in this case
apt_error(std::cerr, -1, fd, _("Sub-process %s returned an error code (%u)"), "apt-key", WEXITSTATUS(Status));
local_exit(WEXITSTATUS(Status));
}
// everything fine
local_exit(0);
}
/*}}}*/
// SplitClearSignedFile - split message into data/signature /*{{{*/
bool SplitClearSignedFile(std::string const &InFile, FileFd * const ContentFile,
std::vector<std::string> * const ContentHeader, FileFd * const SignatureFile)
{
auto in = make_unique_FILE(InFile, "r");
if (in.get() == nullptr)
return _error->Errno("fopen", "can not open %s", InFile.c_str());
struct ScopedErrors
{
ScopedErrors() { _error->PushToStack(); }
~ScopedErrors() { _error->MergeWithStack(); }
} scoped;
LineBuffer buf;
// start of the message
if (not buf.readFrom(in.get(), InFile))
return false; // empty or read error
if (buf != "-----BEGIN PGP SIGNED MESSAGE-----")
{
// this might be an unsigned file we don't want to report errors for,
// but still finish unsuccessful none the less.
while (buf.readFrom(in.get(), InFile, true))
if (buf == "-----BEGIN PGP SIGNED MESSAGE-----")
return _error->Error("Clearsigned file '%s' does not start with a signed message block.", InFile.c_str());
return false;
}
// save "Hash" Armor Headers
while (true)
{
if (not buf.readFrom(in.get(), InFile))
return false;
if (buf.empty())
break; // empty line ends the Armor Headers
if (buf.starts_with("-"))
// § 6.2 says unknown keys should be reported to the user. We don't go that far,
// but we assume that there will never be a header key starting with a dash
return _error->Error("Clearsigned file '%s' contains unexpected line starting with a dash (%s)", InFile.c_str(), "armor");
if (ContentHeader != nullptr && buf.starts_with("Hash: "))
ContentHeader->push_back(buf.view().to_string());
}
// the message itself
bool first_line = true;
while (true)
{
if (not buf.readFrom(in.get(), InFile))
return false;
if (buf.starts_with("-"))
{
if (buf == "-----BEGIN PGP SIGNATURE-----")
{
if (not buf.writeLineTo(SignatureFile))
return false;
break;
}
else if (buf.starts_with("- "))
{
// we don't have any fields which need to be dash-escaped,
// but implementations are free to escape all lines …
if (not buf.writeNewLineIf(ContentFile, not first_line) || not buf.writeTo(ContentFile, 2))
return false;
}
else
// § 7.1 says a client should warn, but we don't really work with files which
// should contain lines starting with a dash, so it is a lot more likely that
// this is an attempt to trick our parser vs. gpgv parser into ignoring a header
return _error->Error("Clearsigned file '%s' contains unexpected line starting with a dash (%s)", InFile.c_str(), "msg");
}
else if (not buf.writeNewLineIf(ContentFile, not first_line) || not buf.writeTo(ContentFile))
return false;
first_line = false;
}
// collect all signatures
bool open_signature = true;
while (true)
{
if (not buf.readFrom(in.get(), InFile, true))
break;
if (open_signature)
{
if (buf == "-----END PGP SIGNATURE-----")
open_signature = false;
else if (buf.starts_with("-"))
// the used Radix-64 is not using dash for any value, so a valid line can't
// start with one. Header keys could, but no existent one does and seems unlikely.
// Instead it smells a lot like a header the parser didn't recognize.
return _error->Error("Clearsigned file '%s' contains unexpected line starting with a dash (%s)", InFile.c_str(), "sig");
}
else //if (not open_signature)
{
if (buf == "-----BEGIN PGP SIGNATURE-----")
open_signature = true;
else
return _error->Error("Clearsigned file '%s' contains unsigned lines.", InFile.c_str());
}
if (not buf.writeLineTo(SignatureFile))
return false;
}
if (open_signature)
return _error->Error("Signature in file %s wasn't closed", InFile.c_str());
// Flush the files
if (SignatureFile != nullptr)
SignatureFile->Flush();
if (ContentFile != nullptr)
ContentFile->Flush();
// Catch-all for "unhandled" read/sync errors
if (_error->PendingError())
return false;
return true;
}
/*}}}*/
bool OpenMaybeClearSignedFile(std::string const &ClearSignedFileName, FileFd &MessageFile) /*{{{*/
{
// Buffered file
if (GetTempFile("clearsigned.message", true, &MessageFile, true) == nullptr)
return false;
if (MessageFile.Failed())
return _error->Error("Couldn't open temporary file to work with %s", ClearSignedFileName.c_str());
_error->PushToStack();
bool const splitDone = SplitClearSignedFile(ClearSignedFileName, &MessageFile, NULL, NULL);
bool const errorDone = _error->PendingError();
_error->MergeWithStack();
if (not splitDone)
{
MessageFile.Close();
if (errorDone)
return false;
// we deal with an unsigned file
MessageFile.Open(ClearSignedFileName, FileFd::ReadOnly);
}
else // clear-signed
{
if (not MessageFile.Seek(0))
return _error->Errno("lseek", "Unable to seek back in message for file %s", ClearSignedFileName.c_str());
}
return not MessageFile.Failed();
}
/*}}}*/

89
apt-pkg/contrib/gpgv.h Normal file
View File

@ -0,0 +1,89 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Helpers to deal with gpgv better and more easily
##################################################################### */
/*}}}*/
#ifndef CONTRIB_GPGV_H
#define CONTRIB_GPGV_H
#include <apt-pkg/macros.h>
#include <string>
#include <vector>
class FileFd;
/** \brief generates and run the command to verify a file with gpgv
*
* If File and FileSig specify the same file it is assumed that we
* deal with a clear-signed message. Note that the method will accept
* and validate files which include additional (unsigned) messages
* without complaining. Do NOT open files accepted by this method
* for reading. Use #OpenMaybeClearSignedFile to access the message
* instead to ensure you are only reading signed data.
*
* The method does not return, but has some notable exit-codes:
* 111 signals an internal error like the inability to execute gpgv,
* 112 indicates a clear-signed file which doesn't include a message,
* which can happen if APT is run while on a network requiring
* authentication before usage (e.g. in hotels)
* All other exit-codes are passed-through from gpgv.
*
* @param File is the message (unsigned or clear-signed)
* @param FileSig is the signature (detached or clear-signed)
* @param statusfd is the fd given to gpgv as --status-fd
* @param fd is used as a pipe for the standard output of gpgv
* @param key is the specific one to be used instead of using all
*/
APT_PUBLIC void ExecGPGV(std::string const &File, std::string const &FileSig,
int const &statusfd, int fd[2], std::string const &Key = "") APT_NORETURN;
inline APT_NORETURN void ExecGPGV(std::string const &File, std::string const &FileSig,
int const &statusfd = -1) {
int fd[2];
ExecGPGV(File, FileSig, statusfd, fd);
}
/** \brief Split an inline signature into message and signature
*
* Takes a clear-signed message and puts the first signed message
* in the content file and all signatures following it into the
* second. Unsigned messages, additional messages as well as
* whitespaces are discarded. The resulting files are suitable to
* be checked with gpgv.
*
* If a FileFd pointers is NULL it will not be used and the content
* which would have been written to it is silently discarded.
*
* The content of the split files is undefined if the splitting was
* unsuccessful.
*
* Note that trying to split an unsigned file will fail, but
* not generate an error message.
*
* @param InFile is the clear-signed file
* @param ContentFile is the FileFd the message will be written to
* @param ContentHeader is a list of all required Amored Headers for the message
* @param SignatureFile is the FileFd all signatures will be written to
* @return true if the splitting was successful, false otherwise
*/
APT_PUBLIC bool SplitClearSignedFile(std::string const &InFile, FileFd * const ContentFile,
std::vector<std::string> * const ContentHeader, FileFd * const SignatureFile);
/** \brief open a file which might be clear-signed
*
* This method tries to extract the (signed) message of a file.
* If the file isn't signed it will just open the given filename.
* Otherwise the message is extracted to a temporary file which
* will be opened instead.
*
* @param ClearSignedFileName is the name of the file to open
* @param[out] MessageFile is the FileFd in which the file will be opened
* @return true if opening was successful, otherwise false
*/
APT_PUBLIC bool OpenMaybeClearSignedFile(std::string const &ClearSignedFileName, FileFd &MessageFile);
#endif

452
apt-pkg/contrib/hashes.cc Normal file
View File

@ -0,0 +1,452 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Hashes - Simple wrapper around the hash functions
This is just used to make building the methods simpler, this is the
only interface required..
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/hashes.h>
#include <apt-pkg/macros.h>
#include <apt-pkg/strutl.h>
#include <algorithm>
#include <iostream>
#include <string>
#include <assert.h>
#include <stddef.h>
#include <stdlib.h>
#include <unistd.h>
#include <gcrypt.h>
/*}}}*/
static const constexpr struct HashAlgo
{
const char *name;
int gcryAlgo;
Hashes::SupportedHashes ourAlgo;
} Algorithms[] = {
{"MD5Sum", GCRY_MD_MD5, Hashes::MD5SUM},
{"SHA1", GCRY_MD_SHA1, Hashes::SHA1SUM},
{"SHA256", GCRY_MD_SHA256, Hashes::SHA256SUM},
{"SHA512", GCRY_MD_SHA512, Hashes::SHA512SUM},
};
const char * HashString::_SupportedHashes[] =
{
"SHA512", "SHA256", "SHA1", "MD5Sum", "Checksum-FileSize", NULL
};
HashString::HashString()
{
}
HashString::HashString(std::string Type, std::string Hash) : Type(Type), Hash(Hash)
{
}
HashString::HashString(std::string StringedHash) /*{{{*/
{
if (StringedHash.find(":") == std::string::npos)
{
// legacy: md5sum without "MD5Sum:" prefix
if (StringedHash.size() == 32)
{
Type = "MD5Sum";
Hash = StringedHash;
}
if(_config->FindB("Debug::Hashes",false) == true)
std::clog << "HashString(string): invalid StringedHash " << StringedHash << std::endl;
return;
}
std::string::size_type pos = StringedHash.find(":");
Type = StringedHash.substr(0,pos);
Hash = StringedHash.substr(pos+1, StringedHash.size() - pos);
if(_config->FindB("Debug::Hashes",false) == true)
std::clog << "HashString(string): " << Type << " : " << Hash << std::endl;
}
/*}}}*/
bool HashString::VerifyFile(std::string filename) const /*{{{*/
{
std::string fileHash = GetHashForFile(filename);
if(_config->FindB("Debug::Hashes",false) == true)
std::clog << "HashString::VerifyFile: got: " << fileHash << " expected: " << toStr() << std::endl;
return (fileHash == Hash);
}
/*}}}*/
bool HashString::FromFile(std::string filename) /*{{{*/
{
// pick the strongest hash
if (Type == "")
Type = _SupportedHashes[0];
Hash = GetHashForFile(filename);
return true;
}
/*}}}*/
std::string HashString::GetHashForFile(std::string filename) const /*{{{*/
{
std::string fileHash;
FileFd Fd(filename, FileFd::ReadOnly);
if(strcasecmp(Type.c_str(), "MD5Sum") == 0)
{
Hashes MD5(Hashes::MD5SUM);
MD5.AddFD(Fd);
fileHash = MD5.GetHashString(Hashes::MD5SUM).Hash;
}
else if (strcasecmp(Type.c_str(), "SHA1") == 0)
{
Hashes SHA1(Hashes::SHA1SUM);
SHA1.AddFD(Fd);
fileHash = SHA1.GetHashString(Hashes::SHA1SUM).Hash;
}
else if (strcasecmp(Type.c_str(), "SHA256") == 0)
{
Hashes SHA256(Hashes::SHA256SUM);
SHA256.AddFD(Fd);
fileHash = SHA256.GetHashString(Hashes::SHA256SUM).Hash;
}
else if (strcasecmp(Type.c_str(), "SHA512") == 0)
{
Hashes SHA512(Hashes::SHA512SUM);
SHA512.AddFD(Fd);
fileHash = SHA512.GetHashString(Hashes::SHA512SUM).Hash;
}
else if (strcasecmp(Type.c_str(), "Checksum-FileSize") == 0)
strprintf(fileHash, "%llu", Fd.FileSize());
Fd.Close();
return fileHash;
}
/*}}}*/
const char** HashString::SupportedHashes() /*{{{*/
{
return _SupportedHashes;
}
/*}}}*/
APT_PURE bool HashString::empty() const /*{{{*/
{
return (Type.empty() || Hash.empty());
}
/*}}}*/
APT_PURE static bool IsConfigured(const char *name, const char *what)
{
std::string option;
strprintf(option, "APT::Hashes::%s::%s", name, what);
return _config->FindB(option, false);
}
APT_PURE bool HashString::usable() const /*{{{*/
{
return (
(Type != "Checksum-FileSize") &&
(Type != "MD5Sum") &&
(Type != "SHA1") &&
!IsConfigured(Type.c_str(), "Untrusted")
);
}
/*}}}*/
std::string HashString::toStr() const /*{{{*/
{
return Type + ":" + Hash;
}
/*}}}*/
APT_PURE bool HashString::operator==(HashString const &other) const /*{{{*/
{
return (strcasecmp(Type.c_str(), other.Type.c_str()) == 0 && Hash == other.Hash);
}
APT_PURE bool HashString::operator!=(HashString const &other) const
{
return !(*this == other);
}
/*}}}*/
bool HashStringList::usable() const /*{{{*/
{
if (empty() == true)
return false;
std::string const forcedType = _config->Find("Acquire::ForceHash", "");
if (forcedType.empty() == true)
{
// See if there is at least one usable hash
return std::any_of(list.begin(), list.end(), [](auto const &hs) { return hs.usable(); });
}
return find(forcedType) != NULL;
}
/*}}}*/
HashString const * HashStringList::find(char const * const type) const /*{{{*/
{
if (type == NULL || type[0] == '\0')
{
std::string const forcedType = _config->Find("Acquire::ForceHash", "");
if (forcedType.empty() == false)
return find(forcedType.c_str());
for (char const * const * t = HashString::SupportedHashes(); *t != NULL; ++t)
for (std::vector<HashString>::const_iterator hs = list.begin(); hs != list.end(); ++hs)
if (strcasecmp(hs->HashType().c_str(), *t) == 0)
return &*hs;
return NULL;
}
for (std::vector<HashString>::const_iterator hs = list.begin(); hs != list.end(); ++hs)
if (strcasecmp(hs->HashType().c_str(), type) == 0)
return &*hs;
return NULL;
}
/*}}}*/
unsigned long long HashStringList::FileSize() const /*{{{*/
{
HashString const * const hsf = find("Checksum-FileSize");
if (hsf == NULL)
return 0;
std::string const hv = hsf->HashValue();
return strtoull(hv.c_str(), NULL, 10);
}
/*}}}*/
bool HashStringList::FileSize(unsigned long long const Size) /*{{{*/
{
return push_back(HashString("Checksum-FileSize", std::to_string(Size)));
}
/*}}}*/
bool HashStringList::supported(char const * const type) /*{{{*/
{
for (char const * const * t = HashString::SupportedHashes(); *t != NULL; ++t)
if (strcasecmp(*t, type) == 0)
return true;
return false;
}
/*}}}*/
bool HashStringList::push_back(const HashString &hashString) /*{{{*/
{
if (hashString.HashType().empty() == true ||
hashString.HashValue().empty() == true ||
supported(hashString.HashType().c_str()) == false)
return false;
// ensure that each type is added only once
HashString const * const hs = find(hashString.HashType().c_str());
if (hs != NULL)
return *hs == hashString;
list.push_back(hashString);
return true;
}
/*}}}*/
bool HashStringList::VerifyFile(std::string filename) const /*{{{*/
{
if (usable() == false)
return false;
Hashes hashes(*this);
FileFd file(filename, FileFd::ReadOnly);
HashString const * const hsf = find("Checksum-FileSize");
if (hsf != NULL)
{
std::string fileSize;
strprintf(fileSize, "%llu", file.FileSize());
if (hsf->HashValue() != fileSize)
return false;
}
hashes.AddFD(file);
HashStringList const hsl = hashes.GetHashStringList();
return hsl == *this;
}
/*}}}*/
bool HashStringList::operator==(HashStringList const &other) const /*{{{*/
{
std::string const forcedType = _config->Find("Acquire::ForceHash", "");
if (forcedType.empty() == false)
{
HashString const * const hs = find(forcedType);
HashString const * const ohs = other.find(forcedType);
if (hs == NULL || ohs == NULL)
return false;
return *hs == *ohs;
}
short matches = 0;
for (const_iterator hs = begin(); hs != end(); ++hs)
{
HashString const * const ohs = other.find(hs->HashType());
if (ohs == NULL)
continue;
if (*hs != *ohs)
return false;
++matches;
}
if (matches == 0)
return false;
return true;
}
bool HashStringList::operator!=(HashStringList const &other) const
{
return !(*this == other);
}
/*}}}*/
// PrivateHashes /*{{{*/
class PrivateHashes {
public:
unsigned long long FileSize;
gcry_md_hd_t hd;
void maybeInit()
{
// Yikes, we got to initialize libgcrypt, or we get warnings. But we
// abstract away libgcrypt in Hashes from our users - they are not
// supposed to know what the hashing backend is, so we can't force
// them to init themselves as libgcrypt folks want us to. So this
// only leaves us with this option...
if (!gcry_control(GCRYCTL_INITIALIZATION_FINISHED_P))
{
if (!gcry_check_version(nullptr))
{
fprintf(stderr, "libgcrypt is too old (need %s, have %s)\n",
"nullptr", gcry_check_version(NULL));
exit(2);
}
gcry_control(GCRYCTL_INITIALIZATION_FINISHED, 0);
}
}
explicit PrivateHashes(unsigned int const CalcHashes) : FileSize(0)
{
maybeInit();
gcry_md_open(&hd, 0, 0);
for (auto & Algo : Algorithms)
{
if ((CalcHashes & Algo.ourAlgo) == Algo.ourAlgo)
gcry_md_enable(hd, Algo.gcryAlgo);
}
}
explicit PrivateHashes(HashStringList const &Hashes) : FileSize(0) {
maybeInit();
gcry_md_open(&hd, 0, 0);
for (auto & Algo : Algorithms)
{
if (not Hashes.usable() || Hashes.find(Algo.name) != NULL)
gcry_md_enable(hd, Algo.gcryAlgo);
}
}
~PrivateHashes()
{
gcry_md_close(hd);
}
};
/*}}}*/
// Hashes::Add* - Add the contents of data or FD /*{{{*/
bool Hashes::Add(const unsigned char * const Data, unsigned long long const Size)
{
if (Size != 0)
{
gcry_md_write(d->hd, Data, Size);
d->FileSize += Size;
}
return true;
}
bool Hashes::AddFD(int const Fd,unsigned long long Size)
{
unsigned char Buf[APT_BUFFER_SIZE];
bool const ToEOF = (Size == UntilEOF);
while (Size != 0 || ToEOF)
{
decltype(Size) n = sizeof(Buf);
if (!ToEOF) n = std::min(Size, n);
ssize_t const Res = read(Fd,Buf,n);
if (Res < 0 || (!ToEOF && Res != (ssize_t) n)) // error, or short read
return false;
if (ToEOF && Res == 0) // EOF
break;
Size -= Res;
if (Add(Buf, Res) == false)
return false;
}
return true;
}
bool Hashes::AddFD(FileFd &Fd,unsigned long long Size)
{
unsigned char Buf[APT_BUFFER_SIZE];
bool const ToEOF = (Size == 0);
while (Size != 0 || ToEOF)
{
decltype(Size) n = sizeof(Buf);
if (!ToEOF) n = std::min(Size, n);
decltype(Size) a = 0;
if (Fd.Read(Buf, n, &a) == false) // error
return false;
if (ToEOF == false)
{
if (a != n) // short read
return false;
}
else if (a == 0) // EOF
break;
Size -= a;
if (Add(Buf, a) == false)
return false;
}
return true;
}
/*}}}*/
static APT_PURE std::string HexDigest(gcry_md_hd_t hd, int algo)
{
char Conv[16] =
{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b',
'c', 'd', 'e', 'f'};
auto Size = gcry_md_get_algo_dlen(algo);
char Result[((Size)*2) + 1];
Result[(Size)*2] = 0;
auto Sum = gcry_md_read(hd, algo);
// Convert each char into two letters
size_t J = 0;
size_t I = 0;
for (; I != (Size)*2; J++, I += 2)
{
Result[I] = Conv[Sum[J] >> 4];
Result[I + 1] = Conv[Sum[J] & 0xF];
}
return std::string(Result);
};
HashStringList Hashes::GetHashStringList()
{
HashStringList hashes;
for (auto & Algo : Algorithms)
if (gcry_md_is_enabled(d->hd, Algo.gcryAlgo))
hashes.push_back(HashString(Algo.name, HexDigest(d->hd, Algo.gcryAlgo)));
hashes.FileSize(d->FileSize);
return hashes;
}
HashString Hashes::GetHashString(SupportedHashes hash)
{
for (auto & Algo : Algorithms)
if (hash == Algo.ourAlgo)
return HashString(Algo.name, HexDigest(d->hd, Algo.gcryAlgo));
abort();
}
Hashes::Hashes() : d(new PrivateHashes(~0)) { }
Hashes::Hashes(unsigned int const Hashes) : d(new PrivateHashes(Hashes)) {}
Hashes::Hashes(HashStringList const &Hashes) : d(new PrivateHashes(Hashes)) {}
Hashes::~Hashes() { delete d; }

208
apt-pkg/contrib/hashes.h Normal file
View File

@ -0,0 +1,208 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Hashes - Simple wrapper around the hash functions
This is just used to make building the methods simpler, this is the
only interface required..
##################################################################### */
/*}}}*/
#ifndef APTPKG_HASHES_H
#define APTPKG_HASHES_H
#include <apt-pkg/macros.h>
#include <cstring>
#include <string>
#include <vector>
class FileFd;
// helper class that contains hash function name
// and hash
class APT_PUBLIC HashString
{
protected:
std::string Type;
std::string Hash;
static const char * _SupportedHashes[10];
// internal helper
std::string GetHashForFile(std::string filename) const;
public:
HashString(std::string Type, std::string Hash);
explicit HashString(std::string StringedHashString); // init from str as "type:hash"
HashString();
// get hash type used
std::string HashType() const { return Type; };
std::string HashValue() const { return Hash; };
// verify the given filename against the currently loaded hash
bool VerifyFile(std::string filename) const;
// generate a hash string from the given filename
bool FromFile(std::string filename);
// helper
std::string toStr() const; // convert to str as "type:hash"
bool empty() const;
bool usable() const;
bool operator==(HashString const &other) const;
bool operator!=(HashString const &other) const;
// return the list of hashes we support
static APT_PURE const char** SupportedHashes();
};
class APT_PUBLIC HashStringList
{
public:
/** find best hash if no specific one is requested
*
* @param type of the checksum to return, can be \b NULL
* @return If type is \b NULL (or the empty string) it will
* return the 'best' hash; otherwise the hash which was
* specifically requested. If no hash is found \b NULL will be returned.
*/
HashString const * find(char const * const type) const;
HashString const * find(std::string const &type) const { return find(type.c_str()); }
/** finds the filesize hash and returns it as number
*
* @return beware: if the size isn't known we return \b 0 here,
* just like we would do for an empty file. If that is a problem
* for you have to get the size manually out of the list.
*/
unsigned long long FileSize() const;
/** sets the filesize hash
*
* @param Size of the file
* @return @see #push_back
*/
bool FileSize(unsigned long long const Size);
/** check if the given hash type is supported
*
* @param type to check
* @return true if supported, otherwise false
*/
static APT_PURE bool supported(char const * const type);
/** add the given #HashString to the list
*
* @param hashString to add
* @return true if the hash is added because it is supported and
* not already a different hash of the same type included, otherwise false
*/
bool push_back(const HashString &hashString);
/** @return size of the list of HashStrings */
size_t size() const { return list.size(); }
/** verify file against all hashes in the list
*
* @param filename to verify
* @return true if the file matches the hashsum, otherwise false
*/
bool VerifyFile(std::string filename) const;
/** is the list empty ?
*
* @return \b true if the list is empty, otherwise \b false
*/
bool empty() const { return list.empty(); }
/** has the list at least one good entry
*
* similar to #empty, but handles forced hashes.
*
* @return if no hash is forced, same result as #empty,
* if one is forced \b true if this has is available, \b false otherwise
*/
bool usable() const;
typedef std::vector<HashString>::const_iterator const_iterator;
/** iterator to the first element */
const_iterator begin() const { return list.begin(); }
/** iterator to the end element */
const_iterator end() const { return list.end(); }
/** start fresh with a clear list */
void clear() { list.clear(); }
/** compare two HashStringList for similarity.
*
* Two lists are similar if at least one hashtype is in both lists
* and the hashsum matches. All hashes are checked by default,
* if one doesn't match false is returned regardless of how many
* matched before. If a hash is forced, only this hash is compared,
* all others are ignored.
*/
bool operator==(HashStringList const &other) const;
bool operator!=(HashStringList const &other) const;
HashStringList() {}
// simplifying API-compatibility constructors
explicit HashStringList(std::string const &hash) {
if (hash.empty() == false)
list.push_back(HashString(hash));
}
explicit HashStringList(char const * const hash) {
if (hash != NULL && hash[0] != '\0')
list.push_back(HashString(hash));
}
private:
std::vector<HashString> list;
};
class PrivateHashes;
class APT_PUBLIC Hashes
{
PrivateHashes * const d;
public:
static const int UntilEOF = 0;
bool Add(const unsigned char * const Data, unsigned long long const Size) APT_NONNULL(2);
inline bool Add(const char * const Data) APT_NONNULL(2)
{return Add(reinterpret_cast<unsigned char const *>(Data),strlen(Data));};
inline bool Add(const char *const Data, unsigned long long const Size) APT_NONNULL(2)
{
return Add(reinterpret_cast<unsigned char const *>(Data), Size);
};
inline bool Add(const unsigned char * const Beg,const unsigned char * const End) APT_NONNULL(2,3)
{return Add(Beg,End-Beg);};
enum SupportedHashes { MD5SUM = (1 << 0), SHA1SUM = (1 << 1), SHA256SUM = (1 << 2),
SHA512SUM = (1 << 3) };
bool AddFD(int const Fd,unsigned long long Size = 0);
bool AddFD(FileFd &Fd,unsigned long long Size = 0);
HashStringList GetHashStringList();
/** Get a specific hash. It is an error to use a hash that was not hashes */
HashString GetHashString(SupportedHashes hash);
/** create a Hashes object to calculate all supported hashes
*
* If ALL is too much, you can limit which Hashes are calculated
* with the following other constructors which mention explicitly
* which hashes to generate. */
Hashes();
/** @param Hashes bitflag composed of #SupportedHashes */
explicit Hashes(unsigned int const Hashes);
/** @param Hashes is a list of hashes */
explicit Hashes(HashStringList const &Hashes);
virtual ~Hashes();
};
#endif

127
apt-pkg/contrib/macros.h Normal file
View File

@ -0,0 +1,127 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Macros Header - Various useful macro definitions
This source is placed in the Public Domain, do with it what you will
It was originally written by Brian C. White.
##################################################################### */
/*}}}*/
// Private header
#ifndef MACROS_H
#define MACROS_H
/* Useful count macro, use on an array of things and it will return the
number of items in the array */
#define APT_ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
// Flag Macros
#define FLAG(f) (1L << (f))
#define SETFLAG(v,f) ((v) |= FLAG(f))
#define CLRFLAG(v,f) ((v) &=~FLAG(f))
#define CHKFLAG(v,f) ((v) & FLAG(f) ? true : false)
#ifdef __GNUC__
#define APT_GCC_VERSION (__GNUC__ << 8 | __GNUC_MINOR__)
#else
#define APT_GCC_VERSION 0
#endif
#ifdef APT_COMPILING_APT
/* likely() and unlikely() can be used to mark boolean expressions
as (not) likely true which will help the compiler to optimise */
#if APT_GCC_VERSION >= 0x0300
#define likely(x) __builtin_expect (!!(x), 1)
#define unlikely(x) __builtin_expect (!!(x), 0)
#else
#define likely(x) (x)
#define unlikely(x) (x)
#endif
#endif
#if APT_GCC_VERSION >= 0x0300
#define APT_DEPRECATED __attribute__ ((deprecated))
#define APT_DEPRECATED_MSG(X) __attribute__ ((deprecated(X)))
// __attribute__((const)) is too dangerous for us, we end up using it wrongly
#define APT_PURE __attribute__((pure))
#define APT_NORETURN __attribute__((noreturn))
#define APT_PRINTF(n) __attribute__((format(printf, n, n + 1)))
#define APT_WEAK __attribute__((weak));
#define APT_UNUSED __attribute__((unused))
#else
#define APT_DEPRECATED
#define APT_DEPRECATED_MSG
#define APT_PURE
#define APT_NORETURN
#define APT_PRINTF(n)
#define APT_WEAK
#define APT_UNUSED
#endif
#if APT_GCC_VERSION > 0x0302
#define APT_NONNULL(...) __attribute__((nonnull(__VA_ARGS__)))
#define APT_MUSTCHECK __attribute__((warn_unused_result))
#else
#define APT_NONNULL(...)
#define APT_MUSTCHECK
#endif
#if APT_GCC_VERSION >= 0x0400
#define APT_SENTINEL __attribute__((sentinel))
#define APT_PUBLIC __attribute__ ((visibility ("default")))
#define APT_HIDDEN __attribute__ ((visibility ("hidden")))
#else
#define APT_SENTINEL
#define APT_PUBLIC
#define APT_HIDDEN
#endif
// cold functions are unlikely() to be called
#if APT_GCC_VERSION >= 0x0403
#define APT_COLD __attribute__ ((__cold__))
#define APT_HOT __attribute__ ((__hot__))
#else
#define APT_COLD
#define APT_HOT
#endif
#if __GNUC__ >= 4
#define APT_IGNORE_DEPRECATED_PUSH \
_Pragma("GCC diagnostic push") \
_Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
#define APT_IGNORE_DEPRECATED_POP \
_Pragma("GCC diagnostic pop")
/* gcc has various problems with this shortcut, so prefer the long form */
#define APT_IGNORE_DEPRECATED(XXX) \
APT_IGNORE_DEPRECATED_PUSH \
XXX \
APT_IGNORE_DEPRECATED_POP
#else
#define APT_IGNORE_DEPRECATED_PUSH
#define APT_IGNORE_DEPRECATED_POP
#define APT_IGNORE_DEPRECATED(XXX) XXX
#endif
#if __cplusplus >= 201103L
#define APT_OVERRIDE override
#else
#define APT_OVERRIDE /* no c++11 standard */
#endif
// These lines are extracted by the makefiles and the buildsystem
// Increasing MAJOR or MINOR results in the need of recompiling all
// reverse-dependencies of libapt-pkg against the new SONAME.
// Non-ABI-Breaks should only increase RELEASE number.
// See also buildlib/libversion.mak
#define APT_PKG_MAJOR 6
#define APT_PKG_MINOR 0
#define APT_PKG_RELEASE 0
#define APT_PKG_ABI ((APT_PKG_MAJOR * 100) + APT_PKG_MINOR)
/* Should be a multiple of the common page size (4096) */
static constexpr unsigned long long APT_BUFFER_SIZE = 64 * 1024;
#endif

503
apt-pkg/contrib/mmap.cc Normal file
View File

@ -0,0 +1,503 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
MMap Class - Provides 'real' mmap or a faked mmap using read().
MMap cover class.
Some broken versions of glibc2 (libc6) have a broken definition
of mmap that accepts a char * -- all other systems (and libc5) use
void *. We can't safely do anything here that would be portable, so
libc6 generates warnings -- which should be errors, g++ isn't properly
strict.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#define _DEFAULT_SOURCE
#include <config.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/macros.h>
#include <apt-pkg/mmap.h>
#include <cstring>
#include <string>
#include <errno.h>
#include <stdlib.h>
#include <unistd.h>
#include <apti18n.h>
/*}}}*/
// MMap::MMap - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
MMap::MMap(FileFd &F,unsigned long Flags) : Flags(Flags), iSize(0),
Base(nullptr), SyncToFd(nullptr)
{
if ((Flags & NoImmMap) != NoImmMap)
Map(F);
}
/*}}}*/
// MMap::MMap - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
MMap::MMap(unsigned long Flags) : Flags(Flags), iSize(0),
Base(nullptr), SyncToFd(nullptr)
{
}
/*}}}*/
// MMap::~MMap - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* */
MMap::~MMap()
{
Close();
}
/*}}}*/
// MMap::Map - Perform the mapping /*{{{*/
// ---------------------------------------------------------------------
/* */
bool MMap::Map(FileFd &Fd)
{
iSize = Fd.Size();
// Set the permissions.
int Prot = PROT_READ;
int Map = MAP_SHARED;
if ((Flags & ReadOnly) != ReadOnly)
Prot |= PROT_WRITE;
if ((Flags & Public) != Public)
Map = MAP_PRIVATE;
if (iSize == 0)
return _error->Error(_("Can't mmap an empty file"));
// We can't mmap compressed fd's directly, so we need to read it completely
if (Fd.IsCompressed() == true)
{
if ((Flags & ReadOnly) != ReadOnly)
return _error->Error("Compressed file %s can only be mapped readonly", Fd.Name().c_str());
Base = malloc(iSize);
if (unlikely(Base == nullptr))
return _error->Errno("MMap-compressed-malloc", _("Couldn't make mmap of %llu bytes"), iSize);
SyncToFd = new FileFd();
if (Fd.Seek(0L) == false || Fd.Read(Base, iSize) == false)
return _error->Error("Compressed file %s can't be read into mmap", Fd.Name().c_str());
return true;
}
// Map it.
Base = (Flags & Fallback) ? MAP_FAILED : mmap(0,iSize,Prot,Map,Fd.Fd(),0);
if (Base == MAP_FAILED)
{
if (errno == ENODEV || errno == EINVAL || (Flags & Fallback))
{
// The filesystem doesn't support this particular kind of mmap.
// So we allocate a buffer and read the whole file into it.
if ((Flags & ReadOnly) == ReadOnly)
{
// for readonly, we don't need sync, so make it simple
Base = malloc(iSize);
if (unlikely(Base == nullptr))
return _error->Errno("MMap-malloc", _("Couldn't make mmap of %llu bytes"), iSize);
SyncToFd = new FileFd();
return Fd.Read(Base, iSize);
}
// FIXME: Writing to compressed fd's ?
int const dupped_fd = dup(Fd.Fd());
if (dupped_fd == -1)
return _error->Errno("mmap", _("Couldn't duplicate file descriptor %i"), Fd.Fd());
Base = calloc(iSize, 1);
if (unlikely(Base == nullptr))
return _error->Errno("MMap-calloc", _("Couldn't make mmap of %llu bytes"), iSize);
SyncToFd = new FileFd (dupped_fd);
if (!SyncToFd->Seek(0L) || !SyncToFd->Read(Base, iSize))
return false;
}
else
return _error->Errno("MMap-mmap", _("Couldn't make mmap of %llu bytes"), iSize);
}
return true;
}
/*}}}*/
// MMap::Close - Close the map /*{{{*/
// ---------------------------------------------------------------------
/* */
bool MMap::Close(bool DoSync)
{
if ((Flags & UnMapped) == UnMapped || validData() == false || iSize == 0)
return true;
if (DoSync == true)
Sync();
if (SyncToFd != NULL)
{
free(Base);
delete SyncToFd;
SyncToFd = NULL;
}
else
{
if (munmap((char *)Base, iSize) != 0)
_error->WarningE("mmap", _("Unable to close mmap"));
}
iSize = 0;
Base = 0;
return true;
}
/*}}}*/
// MMap::Sync - Synchronize the map with the disk /*{{{*/
// ---------------------------------------------------------------------
/* This is done in synchronous mode - the docs indicate that this will
not return till all IO is complete */
bool MMap::Sync()
{
if ((Flags & UnMapped) == UnMapped)
return true;
if ((Flags & ReadOnly) != ReadOnly)
{
if (SyncToFd != NULL)
{
if (!SyncToFd->Seek(0) || !SyncToFd->Write(Base, iSize))
return false;
}
else
{
#ifdef _POSIX_SYNCHRONIZED_IO
if (msync((char *)Base, iSize, MS_SYNC) < 0)
return _error->Errno("msync", _("Unable to synchronize mmap"));
#endif
}
}
return true;
}
/*}}}*/
// MMap::Sync - Synchronize a section of the file to disk /*{{{*/
// ---------------------------------------------------------------------
/* */
bool MMap::Sync(unsigned long Start,unsigned long Stop)
{
if ((Flags & UnMapped) == UnMapped)
return true;
if ((Flags & ReadOnly) != ReadOnly)
{
if (SyncToFd != 0)
{
if (!SyncToFd->Seek(0) ||
!SyncToFd->Write (((char *)Base)+Start, Stop-Start))
return false;
}
else
{
#ifdef _POSIX_SYNCHRONIZED_IO
unsigned long long const PSize = sysconf(_SC_PAGESIZE);
if (msync((char *)Base+(Start/PSize)*PSize, Stop - Start, MS_SYNC) < 0)
return _error->Errno("msync", _("Unable to synchronize mmap"));
#endif
}
}
return true;
}
/*}}}*/
// DynamicMMap::DynamicMMap - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
DynamicMMap::DynamicMMap(FileFd &F,unsigned long Flags,unsigned long const &Workspace,
unsigned long const &Grow, unsigned long const &Limit) :
MMap(F,Flags | NoImmMap), Fd(&F), WorkSpace(Workspace),
GrowFactor(Grow), Limit(Limit)
{
// disable Moveable if we don't grow
if (Grow == 0)
this->Flags &= ~Moveable;
#ifndef __linux__
// kfreebsd doesn't have mremap, so we use the fallback
if ((this->Flags & Moveable) == Moveable)
this->Flags |= Fallback;
#endif
unsigned long long EndOfFile = Fd->Size();
if (EndOfFile > WorkSpace)
WorkSpace = EndOfFile;
else if(WorkSpace > 0)
{
Fd->Seek(WorkSpace - 1);
char C = 0;
Fd->Write(&C,sizeof(C));
}
Map(F);
iSize = EndOfFile;
}
/*}}}*/
// DynamicMMap::DynamicMMap - Constructor for a non-file backed map /*{{{*/
// ---------------------------------------------------------------------
/* We try here to use mmap to reserve some space - this is much more
cooler than the fallback solution to simply allocate a char array
and could come in handy later than we are able to grow such an mmap */
DynamicMMap::DynamicMMap(unsigned long Flags,unsigned long const &WorkSpace,
unsigned long const &Grow, unsigned long const &Limit) :
MMap(Flags | NoImmMap | UnMapped), Fd(0), WorkSpace(WorkSpace),
GrowFactor(Grow), Limit(Limit)
{
// disable Moveable if we don't grow
if (Grow == 0)
this->Flags &= ~Moveable;
#ifndef __linux__
// kfreebsd doesn't have mremap, so we use the fallback
if ((this->Flags & Moveable) == Moveable)
this->Flags |= Fallback;
#endif
#ifdef _POSIX_MAPPED_FILES
if ((this->Flags & Fallback) != Fallback) {
// Set the permissions.
int Prot = PROT_READ;
#ifdef MAP_ANONYMOUS
int Map = MAP_PRIVATE | MAP_ANONYMOUS;
#else
int Map = MAP_PRIVATE | MAP_ANON;
#endif
if ((this->Flags & ReadOnly) != ReadOnly)
Prot |= PROT_WRITE;
if ((this->Flags & Public) == Public)
#ifdef MAP_ANONYMOUS
Map = MAP_SHARED | MAP_ANONYMOUS;
#else
Map = MAP_SHARED | MAP_ANON;
#endif
// use anonymous mmap() to get the memory
Base = (unsigned char*) mmap(0, WorkSpace, Prot, Map, -1, 0);
if(Base == MAP_FAILED)
_error->Errno("DynamicMMap",_("Couldn't make mmap of %lu bytes"),WorkSpace);
iSize = 0;
return;
}
#endif
// fallback to a static allocated space
Base = calloc(WorkSpace, 1);
iSize = 0;
}
/*}}}*/
// DynamicMMap::~DynamicMMap - Destructor /*{{{*/
// ---------------------------------------------------------------------
/* We truncate the file to the size of the memory data set */
DynamicMMap::~DynamicMMap()
{
if (Fd == 0)
{
if (validData() == false)
return;
#ifdef _POSIX_MAPPED_FILES
munmap(Base, WorkSpace);
#else
free(Base);
#endif
return;
}
unsigned long long EndOfFile = iSize;
iSize = WorkSpace;
Close(false);
if(ftruncate(Fd->Fd(),EndOfFile) < 0)
_error->Errno("ftruncate", _("Failed to truncate file"));
}
/*}}}*/
// DynamicMMap::RawAllocate - Allocate a raw chunk of unaligned space /*{{{*/
// ---------------------------------------------------------------------
/* This allocates a block of memory aligned to the given size */
unsigned long DynamicMMap::RawAllocate(unsigned long long Size,unsigned long Aln)
{
unsigned long long Result = iSize;
if (Aln != 0)
Result += Aln - (iSize%Aln);
iSize = Result + Size;
// try to grow the buffer
while(Result + Size > WorkSpace)
{
if(!Grow())
{
_error->Fatal(_("Dynamic MMap ran out of room. Please increase the size "
"of APT::Cache-Start. Current value: %lu. (man 5 apt.conf)"), WorkSpace);
return 0;
}
}
return Result;
}
/*}}}*/
// DynamicMMap::Allocate - Pooled aligned allocation /*{{{*/
// ---------------------------------------------------------------------
/* This allocates an Item of size ItemSize so that it is aligned to its
size in the file. */
unsigned long DynamicMMap::Allocate(unsigned long ItemSize)
{
if (unlikely(ItemSize == 0))
{
_error->Fatal("Can't allocate an item of size zero");
return 0;
}
// Look for a matching pool entry
Pool *I;
for (I = Pools; I != Pools + PoolCount; ++I)
{
if (I->ItemSize == ItemSize)
break;
}
// No pool is allocated, use an unallocated one.
if (unlikely(I == Pools + PoolCount))
{
for (I = Pools; I != Pools + PoolCount; ++I)
{
if (I->ItemSize == 0)
break;
}
// Woops, we ran out, the calling code should allocate more.
if (I == Pools + PoolCount)
{
_error->Error("Ran out of allocation pools");
return 0;
}
I->ItemSize = ItemSize;
I->Count = 0;
}
unsigned long Result = 0;
// Out of space, allocate some more
if (I->Count == 0)
{
const unsigned long size = 20*1024;
I->Count = size/ItemSize;
Pool* oldPools = Pools;
_error->PushToStack();
Result = RawAllocate(size,ItemSize);
bool const newError = _error->PendingError();
_error->MergeWithStack();
if (Pools != oldPools)
I += Pools - oldPools;
// Does the allocation failed ?
if (Result == 0 && newError)
return 0;
I->Start = Result;
}
else
Result = I->Start;
I->Count--;
I->Start += ItemSize;
return Result/ItemSize;
}
/*}}}*/
// DynamicMMap::WriteString - Write a string to the file /*{{{*/
// ---------------------------------------------------------------------
/* Strings are aligned to 16 bytes */
unsigned long DynamicMMap::WriteString(const char *String,
unsigned long Len)
{
if (Len == std::numeric_limits<unsigned long>::max())
Len = strlen(String);
_error->PushToStack();
unsigned long Result = RawAllocate(Len+1+sizeof(uint16_t),sizeof(uint16_t));
bool const newError = _error->PendingError();
_error->MergeWithStack();
if (Base == NULL || (Result == 0 && newError))
return 0;
if (Len >= std::numeric_limits<uint16_t>::max())
abort();
uint16_t LenToWrite = Len;
memcpy((char *)Base + Result, &LenToWrite, sizeof(LenToWrite));
Result += + sizeof(LenToWrite);
memcpy((char *)Base + Result,String,Len);
((char *)Base)[Result + Len] = 0;
return Result;
}
/*}}}*/
// DynamicMMap::Grow - Grow the mmap /*{{{*/
// ---------------------------------------------------------------------
/* This method is a wrapper around different methods to (try to) grow
a mmap (or our char[]-fallback). Encounterable environments:
1. Moveable + !Fallback + linux -> mremap with MREMAP_MAYMOVE
2. Moveable + !Fallback + !linux -> not possible (forbidden by constructor)
3. Moveable + Fallback -> realloc
4. !Moveable + !Fallback + linux -> mremap alone - which will fail in 99,9%
5. !Moveable + !Fallback + !linux -> not possible (forbidden by constructor)
6. !Moveable + Fallback -> not possible
[ While Moveable and Fallback stands for the equally named flags and
"linux" indicates a linux kernel instead of a freebsd kernel. ]
So what you can see here is, that a MMAP which want to be growable need
to be moveable to have a real chance but that this method will at least try
the nearly impossible 4 to grow it before it finally give up: Never say never. */
bool DynamicMMap::Grow() {
if (Limit != 0 && WorkSpace >= Limit)
return _error->Error(_("Unable to increase the size of the MMap as the "
"limit of %lu bytes is already reached."), Limit);
if (GrowFactor <= 0)
return _error->Error(_("Unable to increase size of the MMap as automatic growing is disabled by user."));
unsigned long long const newSize = WorkSpace + GrowFactor;
if(Fd != 0) {
Fd->Seek(newSize - 1);
char C = 0;
Fd->Write(&C,sizeof(C));
}
unsigned long const poolOffset = Pools - ((Pool*) Base);
if ((Flags & Fallback) != Fallback) {
#if defined(_POSIX_MAPPED_FILES) && defined(__linux__)
#ifdef MREMAP_MAYMOVE
if ((Flags & Moveable) == Moveable)
Base = mremap(Base, WorkSpace, newSize, MREMAP_MAYMOVE);
else
#endif
Base = mremap(Base, WorkSpace, newSize, 0);
if(Base == MAP_FAILED)
return false;
#else
return false;
#endif
} else {
if ((Flags & Moveable) != Moveable)
return false;
Base = realloc(Base, newSize);
if (Base == NULL)
return false;
else
/* Set new memory to 0 */
memset((char*)Base + WorkSpace, 0, newSize - WorkSpace);
}
Pools =(Pool*) Base + poolOffset;
WorkSpace = newSize;
return true;
}
/*}}}*/

116
apt-pkg/contrib/mmap.h Normal file
View File

@ -0,0 +1,116 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
MMap Class - Provides 'real' mmap or a faked mmap using read().
The purpose of this code is to provide a generic way for clients to
access the mmap function. In environments that do not support mmap
from file fd's this function will use read and normal allocated
memory.
Writing to a public mmap will always fully commit all changes when the
class is deleted. Ie it will rewrite the file, unless it is readonly
The DynamicMMap class is used to help the on-disk data structure
generators. It provides a large allocated workspace and members
to allocate space from the workspace in an efficient fashion.
This source is placed in the Public Domain, do with it what you will
It was originally written by Jason Gunthorpe.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_MMAP_H
#define PKGLIB_MMAP_H
#include <string>
#include <limits>
#include <sys/mman.h>
class FileFd;
/* This should be a 32 bit type, larger types use too much ram and smaller
types are too small. Where ever possible 'unsigned long' should be used
instead of this internal type */
typedef unsigned int map_ptrloc;
class MMap
{
protected:
unsigned long Flags;
unsigned long long iSize;
void *Base;
// In case mmap can not be used, we keep a dup of the file
// descriptor that should have been mmaped so that we can write to
// the file in Sync().
FileFd *SyncToFd;
bool Map(FileFd &Fd);
bool Close(bool DoSync = true);
public:
enum OpenFlags {NoImmMap = (1<<0),Public = (1<<1),ReadOnly = (1<<2),
UnMapped = (1<<3), Moveable = (1<<4), Fallback = (1 << 5)};
// Simple accessors
inline operator void *() {return Base;};
inline void *Data() {return Base;};
inline unsigned long long Size() {return iSize;};
inline void AddSize(unsigned long long const size) {iSize += size;};
inline bool validData() const { return Base != MAP_FAILED && Base != 0; };
// File manipulators
bool Sync();
bool Sync(unsigned long Start,unsigned long Stop);
MMap(FileFd &F,unsigned long Flags);
explicit MMap(unsigned long Flags);
virtual ~MMap();
};
class DynamicMMap : public MMap
{
public:
// This is the allocation pool structure
struct Pool
{
unsigned long ItemSize;
unsigned long Start;
unsigned long Count;
};
protected:
FileFd *Fd;
unsigned long WorkSpace;
unsigned long const GrowFactor;
unsigned long const Limit;
Pool *Pools;
unsigned int PoolCount;
bool Grow();
public:
// Allocation
unsigned long RawAllocate(unsigned long long Size,unsigned long Aln = 0);
unsigned long Allocate(unsigned long ItemSize);
unsigned long WriteString(const char *String,unsigned long Len = std::numeric_limits<unsigned long>::max());
inline unsigned long WriteString(const std::string &S) {return WriteString(S.c_str(),S.length());};
void UsePools(Pool &P,unsigned int Count) {Pools = &P; PoolCount = Count;};
DynamicMMap(FileFd &F,unsigned long Flags,unsigned long const &WorkSpace = 2*1024*1024,
unsigned long const &Grow = 1024*1024, unsigned long const &Limit = 0);
DynamicMMap(unsigned long Flags,unsigned long const &WorkSpace = 2*1024*1024,
unsigned long const &Grow = 1024*1024, unsigned long const &Limit = 0);
virtual ~DynamicMMap();
};
#endif

169
apt-pkg/contrib/netrc.cc Normal file
View File

@ -0,0 +1,169 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
netrc file parser - returns the login and password of a give host in
a specified netrc-type file
Originally written by Daniel Stenberg, <daniel@haxx.se>, et al. and
placed into the Public Domain, do with it what you will.
##################################################################### */
/*}}}*/
#include <config.h>
#include <apti18n.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/strutl.h>
#include <iostream>
#include "netrc.h"
/* Get user and password from .netrc when given a machine name */
bool MaybeAddAuth(FileFd &NetRCFile, URI &Uri)
{
if (Uri.User.empty() == false || Uri.Password.empty() == false)
return true;
if (NetRCFile.IsOpen() == false || NetRCFile.Failed())
return false;
auto const Debug = _config->FindB("Debug::Acquire::netrc", false);
std::string lookfor;
if (Uri.Port != 0)
strprintf(lookfor, "%s:%i%s", Uri.Host.c_str(), Uri.Port, Uri.Path.c_str());
else
lookfor.append(Uri.Host).append(Uri.Path);
enum
{
NO,
MACHINE,
GOOD_MACHINE,
LOGIN,
PASSWORD
} active_token = NO;
std::string line;
while (NetRCFile.Eof() == false || line.empty() == false)
{
bool protocolSpecified = false;
if (line.empty())
{
if (NetRCFile.ReadLine(line) == false)
break;
else if (line.empty())
continue;
}
auto tokenend = line.find_first_of("\t ");
std::string token;
if (tokenend != std::string::npos)
{
token = line.substr(0, tokenend);
line.erase(0, tokenend + 1);
}
else
std::swap(line, token);
if (token.empty())
continue;
switch (active_token)
{
case NO:
if (token == "machine")
active_token = MACHINE;
break;
case MACHINE:
// If token contains a protocol: Check it first, and strip it away if
// it matches. If it does not match, ignore this stanza.
// If there is no protocol, only allow https protocols.
protocolSpecified = token.find("://") != std::string::npos;
if (protocolSpecified)
{
if (not APT::String::Startswith(token, Uri.Access + "://"))
{
active_token = NO;
break;
}
token.erase(0, Uri.Access.length() + 3);
}
if (token.find('/') == std::string::npos)
{
if (Uri.Port != 0 && Uri.Host == token)
active_token = GOOD_MACHINE;
else if (lookfor.compare(0, lookfor.length() - Uri.Path.length(), token) == 0)
active_token = GOOD_MACHINE;
else
active_token = NO;
}
else
{
if (APT::String::Startswith(lookfor, token))
active_token = GOOD_MACHINE;
else
active_token = NO;
}
if (active_token == GOOD_MACHINE && not protocolSpecified)
{
if (Uri.Access != "https" && Uri.Access != "tor+https")
{
_error->Warning(_("%s: Credentials for %s match, but the protocol is not encrypted. Annotate with %s:// to use."), NetRCFile.Name().c_str(), token.c_str(), Uri.Access.c_str());
active_token = NO;
}
}
break;
case GOOD_MACHINE:
if (token == "login")
active_token = LOGIN;
else if (token == "password")
active_token = PASSWORD;
else if (token == "machine")
{
if (Debug)
std::clog << "MaybeAddAuth: Found matching host adding '" << Uri.User << "' and '" << Uri.Password << "' for "
<< (std::string)Uri << " from " << NetRCFile.Name() << std::endl;
return true;
}
break;
case LOGIN:
std::swap(Uri.User, token);
active_token = GOOD_MACHINE;
break;
case PASSWORD:
std::swap(Uri.Password, token);
active_token = GOOD_MACHINE;
break;
}
}
if (active_token == GOOD_MACHINE)
{
if (Debug)
std::clog << "MaybeAddAuth: Found matching host adding '" << Uri.User << "' and '" << Uri.Password << "' for "
<< (std::string)Uri << " from " << NetRCFile.Name() << std::endl;
return true;
}
else if (active_token == NO)
{
if (Debug)
std::clog << "MaybeAddAuth: Found no matching host for "
<< (std::string)Uri << " from " << NetRCFile.Name() << std::endl;
return true;
}
else if (Debug)
{
std::clog << "MaybeAddAuth: Found no matching host (syntax error: token:";
switch (active_token)
{
case NO: std::clog << "NO"; break;
case MACHINE: std::clog << "MACHINE"; break;
case GOOD_MACHINE: std::clog << "GOOD_MACHINE"; break;
case LOGIN: std::clog << "LOGIN"; break;
case PASSWORD: std::clog << "PASSWORD"; break;
}
std::clog << ") for " << (std::string)Uri << " from " << NetRCFile.Name() << std::endl;
}
return false;
}

26
apt-pkg/contrib/netrc.h Normal file
View File

@ -0,0 +1,26 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
netrc file parser - returns the login and password of a give host in
a specified netrc-type file
Originally written by Daniel Stenberg, <daniel@haxx.se>, et al. and
placed into the Public Domain, do with it what you will.
##################################################################### */
/*}}}*/
#ifndef NETRC_H
#define NETRC_H
#include <string>
#include <apt-pkg/macros.h>
class URI;
class FileFd;
APT_PUBLIC bool MaybeAddAuth(FileFd &NetRCFile, URI &Uri);
#endif

229
apt-pkg/contrib/progress.cc Normal file
View File

@ -0,0 +1,229 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
OpProgress - Operation Progress
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/progress.h>
#include <cmath>
#include <chrono>
#include <cstring>
#include <iostream>
#include <string>
#include <stdio.h>
#include <sys/time.h>
#include <apti18n.h>
/*}}}*/
using namespace std;
// OpProgress::OpProgress - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
OpProgress::OpProgress() : Current(0), Total(0), Size(0), SubTotal(1),
LastPercent(0), Percent(0)
{
memset(&LastTime,0,sizeof(LastTime));
}
/*}}}*/
// OpProgress::Progress - Sub progress with no state change /*{{{*/
// ---------------------------------------------------------------------
/* Current is the Base Overall progress in units of Total. Cur is the sub
progress in units of SubTotal. Size is a scaling factor that says what
percent of Total SubTotal is. */
void OpProgress::Progress(unsigned long long Cur)
{
if (Total == 0 || Size == 0 || SubTotal == 0)
Percent = 0;
else
Percent = (Current + Cur/((double)SubTotal)*Size)*100.0/Total;
Update();
}
/*}}}*/
// OpProgress::OverallProgress - Set the overall progress /*{{{*/
// ---------------------------------------------------------------------
/* */
void OpProgress::OverallProgress(unsigned long long Current, unsigned long long Total,
unsigned long long Size,const string &Op)
{
this->Current = Current;
this->Total = Total;
this->Size = Size;
this->Op = Op;
SubOp = string();
if (Total == 0)
Percent = 0;
else
Percent = Current*100.0/Total;
Update();
}
/*}}}*/
// OpProgress::SubProgress - Set the sub progress state /*{{{*/
// ---------------------------------------------------------------------
/* */
void OpProgress::SubProgress(unsigned long long SubTotal,const string &Op,
float const Percent)
{
this->SubTotal = SubTotal;
if (Op.empty() == false)
SubOp = Op;
if (Total == 0 || Percent == 0)
this->Percent = 0;
else if (Percent != -1)
this->Percent = this->Current += (Size*Percent)/SubTotal;
else
this->Percent = Current*100.0/Total;
Update();
}
/*}}}*/
// OpProgress::CheckChange - See if the display should be updated /*{{{*/
// ---------------------------------------------------------------------
/* Progress calls are made so frequently that if every one resulted in
an update the display would be swamped and the system much slower.
This provides an upper bound on the update rate. */
bool OpProgress::CheckChange(float Interval)
{
// For absolute progress, we assume every call is relevant.
if (_config->FindB("APT::Internal::OpProgress::Absolute", false))
return true;
// New major progress indication
if (Op != LastOp)
{
MajorChange = true;
LastOp = Op;
return true;
}
MajorChange = false;
if (SubOp != LastSubOp)
{
LastSubOp = SubOp;
return true;
}
if (std::lround(LastPercent) == std::lround(Percent))
return false;
LastPercent = Percent;
if (Interval == 0)
return false;
// Check time delta
auto const Now = std::chrono::steady_clock::now().time_since_epoch();
auto const Now_sec = std::chrono::duration_cast<std::chrono::seconds>(Now);
auto const Now_usec = std::chrono::duration_cast<std::chrono::microseconds>(Now - Now_sec);
struct timeval NowTime = { Now_sec.count(), Now_usec.count() };
std::chrono::duration<decltype(Interval)> Delta =
std::chrono::seconds(NowTime.tv_sec - LastTime.tv_sec) +
std::chrono::microseconds(NowTime.tv_usec - LastTime.tv_usec);
if (Delta.count() < Interval)
return false;
LastTime = NowTime;
return true;
}
/*}}}*/
// OpTextProgress::OpTextProgress - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* */
OpTextProgress::OpTextProgress(Configuration &Config) :
NoUpdate(false), NoDisplay(false), LastLen(0)
{
if (Config.FindI("quiet",0) >= 1 || Config.FindB("quiet::NoUpdate", false) == true)
NoUpdate = true;
if (Config.FindI("quiet",0) >= 2 || Config.FindB("quiet::NoProgress", false) == true)
NoDisplay = true;
}
/*}}}*/
// OpTextProgress::Done - Clean up the display /*{{{*/
// ---------------------------------------------------------------------
/* */
void OpTextProgress::Done()
{
if (NoUpdate == false && OldOp.empty() == false)
{
char S[300];
if (_error->PendingError() == true)
snprintf(S,sizeof(S),_("%c%s... Error!"),'\r',OldOp.c_str());
else
snprintf(S,sizeof(S),_("%c%s... Done"),'\r',OldOp.c_str());
Write(S);
cout << endl;
OldOp = string();
}
if (NoUpdate == true && NoDisplay == false && OldOp.empty() == false)
{
OldOp = string();
cout << endl;
}
}
/*}}}*/
// OpTextProgress::Update - Simple text spinner /*{{{*/
// ---------------------------------------------------------------------
/* */
void OpTextProgress::Update()
{
if (CheckChange((NoUpdate == true?0:0.7)) == false)
return;
// No percent spinner
if (NoUpdate == true)
{
if (MajorChange == false)
return;
if (NoDisplay == false)
{
if (OldOp.empty() == false)
cout << endl;
OldOp = "a";
cout << Op << _("...") << flush;
}
return;
}
// Erase the old text and 'log' the event
char S[300];
if (MajorChange == true && OldOp.empty() == false)
{
snprintf(S,sizeof(S),"\r%s",OldOp.c_str());
Write(S);
cout << endl;
}
// Print the spinner. Absolute progress shows us a time progress.
if (_config->FindB("APT::Internal::OpProgress::Absolute", false) && Total != -1llu)
snprintf(S, sizeof(S), _("%c%s... %llu/%llus"), '\r', Op.c_str(), Current, Total);
else if (_config->FindB("APT::Internal::OpProgress::Absolute", false))
snprintf(S, sizeof(S), _("%c%s... %llus"), '\r', Op.c_str(), Current);
else
snprintf(S, sizeof(S), _("%c%s... %u%%"), '\r', Op.c_str(), (unsigned int)Percent);
Write(S);
OldOp = Op;
}
/*}}}*/
// OpTextProgress::Write - Write the progress string /*{{{*/
// ---------------------------------------------------------------------
/* This space fills the end to overwrite the previous text */
void OpTextProgress::Write(const char *S)
{
cout << S;
for (unsigned int I = strlen(S); I < LastLen; I++)
cout << ' ';
cout << '\r' << flush;
LastLen = strlen(S);
}
/*}}}*/

View File

@ -0,0 +1,87 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
OpProgress - Operation Progress
This class allows lengthy operations to communicate their progress
to the GUI. The progress model is simple and is not designed to handle
the complex case of the multi-activity acquire class.
The model is based on the concept of an overall operation consisting
of a series of small sub operations. Each sub operation has it's own
completion status and the overall operation has it's completion status.
The units of the two are not mixed and are completely independent.
The UI is expected to subclass this to provide the visuals to the user.
##################################################################### */
/*}}}*/
#ifndef PKGLIB_PROGRESS_H
#define PKGLIB_PROGRESS_H
#include <apt-pkg/macros.h>
#include <string>
#include <sys/time.h>
class Configuration;
class APT_PUBLIC OpProgress
{
friend class OpTextProgress;
unsigned long long Current;
unsigned long long Total;
unsigned long long Size;
unsigned long long SubTotal;
float LastPercent;
// Change reduction code
struct timeval LastTime;
std::string LastOp;
std::string LastSubOp;
protected:
std::string Op;
std::string SubOp;
float Percent;
bool MajorChange;
bool CheckChange(float Interval = 0.7);
virtual void Update() {};
public:
void Progress(unsigned long long Current);
void SubProgress(unsigned long long SubTotal, const std::string &Op = "", float const Percent = -1);
void OverallProgress(unsigned long long Current,unsigned long long Total,
unsigned long long Size,const std::string &Op);
virtual void Done() {};
OpProgress();
virtual ~OpProgress() {};
};
class APT_PUBLIC OpTextProgress : public OpProgress
{
protected:
std::string OldOp;
bool NoUpdate;
bool NoDisplay;
unsigned long LastLen;
virtual void Update() APT_OVERRIDE;
void Write(const char *S);
public:
virtual void Done() APT_OVERRIDE;
explicit OpTextProgress(bool NoUpdate = false) : NoUpdate(NoUpdate),
NoDisplay(false), LastLen(0) {};
explicit OpTextProgress(Configuration &Config);
virtual ~OpTextProgress() {Done();};
};
#endif

97
apt-pkg/contrib/proxy.cc Normal file
View File

@ -0,0 +1,97 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Proxy - Proxy related functions
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/strutl.h>
#include <algorithm>
#include <iostream>
#include <fcntl.h>
#include <unistd.h>
#include "proxy.h"
/*}}}*/
// AutoDetectProxy - auto detect proxy /*{{{*/
// ---------------------------------------------------------------------
/* */
static std::vector<std::string> CompatibleProxies(URI const &URL)
{
if (URL.Access == "http" || URL.Access == "https")
return {"http", "https", "socks5h"};
return {URL.Access};
}
bool AutoDetectProxy(URI &URL)
{
// we support both http/https debug options
bool Debug = _config->FindB("Debug::Acquire::"+URL.Access,false);
// the user already explicitly set a proxy for this host
if(_config->Find("Acquire::"+URL.Access+"::proxy::"+URL.Host, "") != "")
return true;
// option is "Acquire::http::Proxy-Auto-Detect" but we allow the old
// name without the dash ("-")
std::string AutoDetectProxyCmd = _config->Find("Acquire::"+URL.Access+"::Proxy-Auto-Detect",
_config->Find("Acquire::"+URL.Access+"::ProxyAutoDetect"));
if (AutoDetectProxyCmd.empty())
return true;
if (Debug)
std::clog << "Using auto proxy detect command: " << AutoDetectProxyCmd << std::endl;
if (faccessat(AT_FDCWD, AutoDetectProxyCmd.c_str(), R_OK | X_OK, AT_EACCESS) != 0)
return _error->Errno("access", "ProxyAutoDetect command '%s' can not be executed!", AutoDetectProxyCmd.c_str());
std::string const urlstring = URL;
std::vector<const char *> Args;
Args.push_back(AutoDetectProxyCmd.c_str());
Args.push_back(urlstring.c_str());
Args.push_back(nullptr);
FileFd PipeFd;
pid_t Child;
if (Popen(&Args[0], PipeFd, Child, FileFd::ReadOnly, false, true) == false)
return _error->Error("ProxyAutoDetect command '%s' failed!", AutoDetectProxyCmd.c_str());
char buf[512];
bool const goodread = PipeFd.ReadLine(buf, sizeof(buf)) != nullptr;
PipeFd.Close();
if (ExecWait(Child, "ProxyAutoDetect") == false)
return false;
// no output means the detector has no idea which proxy to use
// and apt will use the generic proxy settings
if (goodread == false)
return true;
auto const cleanedbuf = _strstrip(buf);
// We warn about this as the implementor probably meant to use DIRECT instead
if (cleanedbuf[0] == '\0')
{
_error->Warning("ProxyAutoDetect command returned an empty line");
return true;
}
if (Debug)
std::clog << "auto detect command returned: '" << cleanedbuf << "'" << std::endl;
auto compatibleTypes = CompatibleProxies(URL);
bool compatible = strcmp(cleanedbuf, "DIRECT") == 0 ||
compatibleTypes.end() != std::find_if(compatibleTypes.begin(),
compatibleTypes.end(), [cleanedbuf](std::string &compat) {
return strstr(cleanedbuf, compat.c_str()) == cleanedbuf;
});
if (compatible)
_config->Set("Acquire::"+URL.Access+"::proxy::"+URL.Host, cleanedbuf);
return true;
}
/*}}}*/

16
apt-pkg/contrib/proxy.h Normal file
View File

@ -0,0 +1,16 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Proxy - Proxy operations
##################################################################### */
/*}}}*/
#ifndef PKGLIB_PROXY_H
#define PKGLIB_PROXY_H
class URI;
APT_PUBLIC bool AutoDetectProxy(URI &URL);
#endif

209
apt-pkg/contrib/srvrec.cc Normal file
View File

@ -0,0 +1,209 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
SRV record support
##################################################################### */
/*}}}*/
#include <config.h>
#include <netdb.h>
#include <arpa/nameser.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <resolv.h>
#include <time.h>
#include <algorithm>
#include <memory>
#include <tuple>
#include <apt-pkg/configuration.h>
#include <apt-pkg/error.h>
#include <apt-pkg/strutl.h>
#include "srvrec.h"
bool SrvRec::operator==(SrvRec const &other) const
{
return (std::tie(target, priority, weight, port) ==
std::tie(other.target, other.priority, other.weight, other.port));
}
bool GetSrvRecords(std::string host, int port, std::vector<SrvRec> &Result)
{
// try SRV only for hostnames, not for IP addresses
{
struct in_addr addr4;
struct in6_addr addr6;
if (inet_pton(AF_INET, host.c_str(), &addr4) == 1 ||
inet_pton(AF_INET6, host.c_str(), &addr6) == 1)
return true;
}
std::string target;
int res;
struct servent s_ent_buf;
struct servent *s_ent = nullptr;
std::vector<char> buf(1024);
res = getservbyport_r(htons(port), "tcp", &s_ent_buf, buf.data(), buf.size(), &s_ent);
if (res != 0 || s_ent == nullptr)
return false;
strprintf(target, "_%s._tcp.%s", s_ent->s_name, host.c_str());
return GetSrvRecords(target, Result);
}
bool GetSrvRecords(std::string name, std::vector<SrvRec> &Result)
{
unsigned char answer[PACKETSZ];
int answer_len, compressed_name_len;
int answer_count;
struct __res_state res;
if (res_ninit(&res) != 0)
return _error->Errno("res_init", "Failed to init resolver");
// Close on return
std::shared_ptr<void> guard(&res, res_nclose);
answer_len = res_nquery(&res, name.c_str(), C_IN, T_SRV, answer, sizeof(answer));
if (answer_len == -1)
return false;
if (answer_len < (int)sizeof(HEADER))
return _error->Warning("Not enough data from res_query (%i)", answer_len);
// check the header
HEADER *header = (HEADER*)answer;
if (header->rcode != NOERROR)
return _error->Warning("res_query returned rcode %i", header->rcode);
answer_count = ntohs(header->ancount);
if (answer_count <= 0)
return _error->Warning("res_query returned no answers (%i) ", answer_count);
// skip the header
compressed_name_len = dn_skipname(answer+sizeof(HEADER), answer+answer_len);
if(compressed_name_len < 0)
return _error->Warning("dn_skipname failed %i", compressed_name_len);
// pt points to the first answer record, go over all of them now
unsigned char *pt = answer+sizeof(HEADER)+compressed_name_len+QFIXEDSZ;
while ((int)Result.size() < answer_count && pt < answer+answer_len)
{
u_int16_t type, klass, priority, weight, port, dlen;
char buf[MAXDNAME];
compressed_name_len = dn_skipname(pt, answer+answer_len);
if (compressed_name_len < 0)
return _error->Warning("dn_skipname failed (2): %i",
compressed_name_len);
pt += compressed_name_len;
if (((answer+answer_len) - pt) < 16)
return _error->Warning("packet too short");
// extract the data out of the result buffer
#define extract_u16(target, p) target = *p++ << 8; target |= *p++;
extract_u16(type, pt);
if(type != T_SRV)
return _error->Warning("Unexpected type excepted %x != %x",
T_SRV, type);
extract_u16(klass, pt);
if(klass != C_IN)
return _error->Warning("Unexpected class excepted %x != %x",
C_IN, klass);
pt += 4; // ttl
extract_u16(dlen, pt);
extract_u16(priority, pt);
extract_u16(weight, pt);
extract_u16(port, pt);
#undef extract_u16
compressed_name_len = dn_expand(answer, answer+answer_len, pt, buf, sizeof(buf));
if(compressed_name_len < 0)
return _error->Warning("dn_expand failed %i", compressed_name_len);
pt += compressed_name_len;
// add it to our class
Result.emplace_back(buf, priority, weight, port);
}
// implement load balancing as specified in RFC-2782
// sort them by priority
std::stable_sort(Result.begin(), Result.end());
for(std::vector<SrvRec>::iterator I = Result.begin();
I != Result.end(); ++I)
{
if (_config->FindB("Debug::Acquire::SrvRecs", false) == true)
{
std::cerr << "SrvRecs: got " << I->target
<< " prio: " << I->priority
<< " weight: " << I->weight
<< std::endl;
}
}
return true;
}
SrvRec PopFromSrvRecs(std::vector<SrvRec> &Recs)
{
// FIXME: instead of the simplistic shuffle below use the algorithm
// described in rfc2782 (with weights)
// and figure out how the weights need to be adjusted if
// a host refuses connections
#if 0 // all code below is only needed for the weight adjusted selection
// assign random number ranges
int prev_weight = 0;
int prev_priority = 0;
for(std::vector<SrvRec>::iterator I = Result.begin();
I != Result.end(); ++I)
{
if(prev_priority != I->priority)
prev_weight = 0;
I->random_number_range_start = prev_weight;
I->random_number_range_end = prev_weight + I->weight;
prev_weight = I->random_number_range_end;
prev_priority = I->priority;
if (_config->FindB("Debug::Acquire::SrvRecs", false) == true)
std::cerr << "SrvRecs: got " << I->target
<< " prio: " << I->priority
<< " weight: " << I->weight
<< std::endl;
}
// go over the code in reverse order and note the max random range
int max = 0;
prev_priority = 0;
for(std::vector<SrvRec>::iterator I = Result.end();
I != Result.begin(); --I)
{
if(prev_priority != I->priority)
max = I->random_number_range_end;
I->random_number_range_max = max;
}
#endif
// shuffle in a very simplistic way for now (equal weights)
std::vector<SrvRec>::iterator I = Recs.begin();
std::vector<SrvRec>::iterator const J = std::find_if(Recs.begin(), Recs.end(),
[&I](SrvRec const &J) { return I->priority != J.priority; });
// clock seems random enough.
I += std::max(static_cast<clock_t>(0), clock()) % std::distance(I, J);
SrvRec const selected = std::move(*I);
Recs.erase(I);
if (_config->FindB("Debug::Acquire::SrvRecs", false) == true)
std::cerr << "PopFromSrvRecs: selecting " << selected.target << std::endl;
return selected;
}

56
apt-pkg/contrib/srvrec.h Normal file
View File

@ -0,0 +1,56 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
SRV record support
##################################################################### */
/*}}}*/
#ifndef SRVREC_H
#define SRVREC_H
#include <string>
#include <vector>
#include <arpa/nameser.h>
#include <apt-pkg/macros.h>
class APT_PUBLIC SrvRec
{
public:
std::string target;
u_int16_t priority;
u_int16_t weight;
u_int16_t port;
// each server is assigned a interval [start, end] in the space of [0, max]
int random_number_range_start;
int random_number_range_end;
int random_number_range_max;
bool operator<(SrvRec const &other) const {
return this->priority < other.priority;
}
bool operator==(SrvRec const &other) const;
SrvRec(std::string const Target, u_int16_t const Priority,
u_int16_t const Weight, u_int16_t const Port) :
target(Target), priority(Priority), weight(Weight), port(Port),
random_number_range_start(0), random_number_range_end(0),
random_number_range_max(0) {}
};
/** \brief Get SRV records from host/port (builds the query string internally)
*/
APT_PUBLIC bool GetSrvRecords(std::string name, std::vector<SrvRec> &Result);
/** \brief Get SRV records for query string like: _http._tcp.example.com
*/
APT_PUBLIC bool GetSrvRecords(std::string host, int port, std::vector<SrvRec> &Result);
/** \brief Pop a single SRV record from the vector of SrvRec taking
* priority and weight into account
*/
APT_PUBLIC SrvRec PopFromSrvRecs(std::vector<SrvRec> &Recs);
#endif

View File

@ -0,0 +1,137 @@
/*
* Basic implementation of string_view
*
* (C) 2015 Julian Andres Klode <jak@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#if !defined(APT_STRINGVIEW_H)
#define APT_STRINGVIEW_H
#include <apt-pkg/macros.h>
#include <string>
#include <string.h>
namespace APT {
/**
* \brief Simple subset of std::string_view from C++17
*
* This is an internal implementation of the subset of std::string_view
* used by APT. It is not meant to be used in programs, only inside the
* library for performance critical paths.
*/
class StringView {
const char *data_;
size_t size_;
public:
static constexpr size_t npos = static_cast<size_t>(-1);
static_assert(APT::StringView::npos == std::string::npos, "npos values are different");
/* Constructors */
constexpr StringView() : data_(""), size_(0) {}
constexpr StringView(const char *data, size_t size) : data_(data), size_(size) {}
StringView(const char *data) : data_(data), size_(strlen(data)) {}
StringView(std::string const & str): data_(str.data()), size_(str.size()) {}
/* Viewers */
constexpr StringView substr(size_t pos, size_t n = npos) const {
return StringView(data_ + pos, n > (size_ - pos) ? (size_ - pos) : n);
}
size_t find(int c, size_t pos) const {
if (pos == 0)
return find(c);
size_t const found = substr(pos).find(c);
if (found == npos)
return npos;
return pos + found;
}
size_t find(int c) const {
const char *found = static_cast<const char*>(memchr(data_, c, size_));
if (found == NULL)
return npos;
return found - data_;
}
size_t rfind(int c, size_t pos) const {
if (pos == npos)
return rfind(c);
return APT::StringView(data_, pos).rfind(c);
}
size_t rfind(int c) const {
const char *found = static_cast<const char*>(memrchr(data_, c, size_));
if (found == NULL)
return npos;
return found - data_;
}
/* Conversions */
std::string to_string() const {
return std::string(data_, size_);
}
/* Comparisons */
int compare(size_t pos, size_t n, StringView other) const {
return substr(pos, n).compare(other);
}
int compare(StringView other) const {
int res;
res = memcmp(data_, other.data_, std::min(size_, other.size_));
if (res != 0)
return res;
if (size_ == other.size_)
return res;
return (size_ > other.size_) ? 1 : -1;
}
/* Optimization: If size not equal, string cannot be equal */
bool operator ==(StringView other) const { return size_ == other.size_ && compare(other) == 0; }
bool operator !=(StringView other) const { return !(*this == other); }
/* Accessors */
constexpr bool empty() const { return size_ == 0; }
constexpr const char* data() const { return data_; }
constexpr const char* begin() const { return data_; }
constexpr const char* end() const { return data_ + size_; }
constexpr char operator [](size_t i) const { return data_[i]; }
constexpr size_t size() const { return size_; }
constexpr size_t length() const { return size_; }
};
/**
* \brief Faster comparison for string views (compare size before data)
*
* Still stable, but faster than the normal ordering. */
static inline int StringViewCompareFast(StringView a, StringView b) {
if (a.size() != b.size())
return a.size() - b.size();
return memcmp(a.data(), b.data(), a.size());
}
static constexpr inline APT::StringView operator""_sv(const char *data, size_t size)
{
return APT::StringView(data, size);
}
}
inline bool operator ==(const char *other, APT::StringView that);
inline bool operator ==(const char *other, APT::StringView that) { return that.operator==(other); }
inline bool operator ==(std::string const &other, APT::StringView that);
inline bool operator ==(std::string const &other, APT::StringView that) { return that.operator==(other); }
#endif

1831
apt-pkg/contrib/strutl.cc Normal file

File diff suppressed because it is too large Load Diff

231
apt-pkg/contrib/strutl.h Normal file
View File

@ -0,0 +1,231 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
String Util - These are some useful string functions
_strstrip is a function to remove whitespace from the front and end
of a string.
This source is placed in the Public Domain, do with it what you will
It was originally written by Jason Gunthorpe <jgg@gpu.srv.ualberta.ca>
##################################################################### */
/*}}}*/
#ifndef STRUTL_H
#define STRUTL_H
#include <cstring>
#include <iostream>
#include <limits>
#include <string>
#include <vector>
#include <apt-pkg/string_view.h>
#include <stddef.h>
#include <time.h>
#include "macros.h"
namespace APT {
namespace String {
APT_PUBLIC std::string Strip(const std::string &s);
APT_PUBLIC bool Endswith(const std::string &s, const std::string &ending);
APT_PUBLIC bool Startswith(const std::string &s, const std::string &starting);
APT_PUBLIC std::string Join(std::vector<std::string> list, const std::string &sep);
// Returns string display length honoring multi-byte characters
APT_PUBLIC size_t DisplayLength(StringView str);
}
}
APT_PUBLIC bool UTF8ToCodeset(const char *codeset, const std::string &orig, std::string *dest);
APT_PUBLIC char *_strstrip(char *String);
APT_PUBLIC char *_strrstrip(char *String); // right strip only
APT_PUBLIC char *_strtabexpand(char *String,size_t Len);
APT_PUBLIC bool ParseQuoteWord(const char *&String,std::string &Res);
APT_PUBLIC bool ParseCWord(const char *&String,std::string &Res);
APT_PUBLIC std::string QuoteString(const std::string &Str,const char *Bad);
APT_PUBLIC std::string DeQuoteString(const std::string &Str);
APT_PUBLIC std::string DeQuoteString(std::string::const_iterator const &begin, std::string::const_iterator const &end);
// unescape (\0XX and \xXX) from a string
APT_PUBLIC std::string DeEscapeString(const std::string &input);
APT_PUBLIC std::string SizeToStr(double Bytes);
APT_PUBLIC std::string TimeToStr(unsigned long Sec);
APT_PUBLIC std::string Base64Encode(const std::string &Str);
APT_PUBLIC std::string OutputInDepth(const unsigned long Depth, const char* Separator=" ");
APT_PUBLIC std::string URItoFileName(const std::string &URI);
/** returns a datetime string as needed by HTTP/1.1 and Debian files.
*
* Note: The date will always be represented in a UTC timezone
*
* @param Date to be represented as a string
* @param NumericTimezone is preferred in general, but HTTP/1.1 requires the use
* of GMT as timezone instead. \b true means that the timezone should be denoted
* as "+0000" while \b false uses "GMT".
*/
APT_PUBLIC std::string TimeRFC1123(time_t Date, bool const NumericTimezone);
/** parses time as needed by HTTP/1.1 and Debian files.
*
* HTTP/1.1 prefers dates in RFC1123 format (but the other two obsolete date formats
* are supported to) and e.g. Release files use the same format in Date & Valid-Until
* fields.
*
* Note: datetime strings need to be in UTC timezones (GMT, UTC, Z, +/-0000) to be
* parsed. Other timezones will be rejected as invalid. Previous implementations
* accepted other timezones, but treated them as UTC.
*
* @param str is the datetime string to parse
* @param[out] time will be the seconds since epoch of the given datetime if
* parsing is successful, undefined otherwise.
* @return \b true if parsing was successful, otherwise \b false.
*/
APT_PUBLIC bool RFC1123StrToTime(const std::string &str,time_t &time) APT_MUSTCHECK;
APT_PUBLIC bool FTPMDTMStrToTime(const char* const str,time_t &time) APT_MUSTCHECK;
APT_PUBLIC std::string LookupTag(const std::string &Message,const char *Tag,const char *Default = 0);
APT_PUBLIC int StringToBool(const std::string &Text,int Default = -1);
APT_PUBLIC bool ReadMessages(int Fd, std::vector<std::string> &List);
APT_PUBLIC bool StrToNum(const char *Str,unsigned long &Res,unsigned Len,unsigned Base = 0);
APT_PUBLIC bool StrToNum(const char *Str,unsigned long long &Res,unsigned Len,unsigned Base = 0);
APT_PUBLIC bool Base256ToNum(const char *Str,unsigned long &Res,unsigned int Len);
APT_PUBLIC bool Base256ToNum(const char *Str,unsigned long long &Res,unsigned int Len);
APT_PUBLIC bool Hex2Num(const APT::StringView Str,unsigned char *Num,unsigned int Length);
// input changing string split
APT_PUBLIC bool TokSplitString(char Tok,char *Input,char **List,
unsigned long ListMax);
// split a given string by a char
APT_PUBLIC std::vector<std::string> VectorizeString(std::string const &haystack, char const &split) APT_PURE;
/* \brief Return a vector of strings from string "input" where "sep"
* is used as the delimiter string.
*
* \param input The input string.
*
* \param sep The separator to use.
*
* \param maxsplit (optional) The maximum amount of splitting that
* should be done .
*
* The optional "maxsplit" argument can be used to limit the splitting,
* if used the string is only split on maxsplit places and the last
* item in the vector contains the remainder string.
*/
APT_PUBLIC std::vector<std::string> StringSplit(std::string const &input,
std::string const &sep,
unsigned int maxsplit=std::numeric_limits<unsigned int>::max()) APT_PURE;
APT_PUBLIC void ioprintf(std::ostream &out,const char *format,...) APT_PRINTF(2);
APT_PUBLIC void strprintf(std::string &out,const char *format,...) APT_PRINTF(2);
APT_PUBLIC char *safe_snprintf(char *Buffer,char *End,const char *Format,...) APT_PRINTF(3);
APT_PUBLIC bool CheckDomainList(const std::string &Host, const std::string &List);
/* Do some compat mumbo jumbo */
#define tolower_ascii tolower_ascii_inline
#define isspace_ascii isspace_ascii_inline
APT_PURE APT_HOT
static inline int tolower_ascii_unsafe(int const c)
{
return c | 0x20;
}
APT_PURE APT_HOT
static inline int tolower_ascii_inline(int const c)
{
return (c >= 'A' && c <= 'Z') ? c + 32 : c;
}
APT_PURE APT_HOT
static inline int isspace_ascii_inline(int const c)
{
// 9='\t',10='\n',11='\v',12='\f',13='\r',32=' '
return (c >= 9 && c <= 13) || c == ' ';
}
APT_PUBLIC std::string StripEpoch(const std::string &VerStr);
#define APT_MKSTRCMP(name,func) \
inline APT_PURE int name(const char *A,const char *B) {return func(A,A+strlen(A),B,B+strlen(B));} \
inline APT_PURE int name(const char *A,const char *AEnd,const char *B) {return func(A,AEnd,B,B+strlen(B));} \
inline APT_PURE int name(const std::string& A,const char *B) {return func(A.c_str(),A.c_str()+A.length(),B,B+strlen(B));} \
inline APT_PURE int name(const std::string& A,const std::string& B) {return func(A.c_str(),A.c_str()+A.length(),B.c_str(),B.c_str()+B.length());} \
inline APT_PURE int name(const std::string& A,const char *B,const char *BEnd) {return func(A.c_str(),A.c_str()+A.length(),B,BEnd);}
#define APT_MKSTRCMP2(name,func) \
inline APT_PURE int name(const char *A,const char *AEnd,const char *B) {return func(A,AEnd,B,B+strlen(B));} \
inline APT_PURE int name(const std::string& A,const char *B) {return func(A.begin(),A.end(),B,B+strlen(B));} \
inline APT_PURE int name(const std::string& A,const std::string& B) {return func(A.begin(),A.end(),B.begin(),B.end());} \
inline APT_PURE int name(const std::string& A,const char *B,const char *BEnd) {return func(A.begin(),A.end(),B,BEnd);}
APT_PUBLIC int APT_PURE stringcmp(const char *A,const char *AEnd,const char *B,const char *BEnd);
APT_PUBLIC int APT_PURE stringcasecmp(const char *A,const char *AEnd,const char *B,const char *BEnd);
/* We assume that GCC 3 indicates that libstdc++3 is in use too. In that
case the definition of string::const_iterator is not the same as
const char * and we need these extra functions */
#if __GNUC__ >= 3
APT_PUBLIC int APT_PURE stringcmp(std::string::const_iterator A,std::string::const_iterator AEnd,
const char *B,const char *BEnd);
APT_PUBLIC int APT_PURE stringcmp(std::string::const_iterator A,std::string::const_iterator AEnd,
std::string::const_iterator B,std::string::const_iterator BEnd);
APT_PUBLIC int APT_PURE stringcasecmp(std::string::const_iterator A,std::string::const_iterator AEnd,
const char *B,const char *BEnd);
APT_PUBLIC int APT_PURE stringcasecmp(std::string::const_iterator A,std::string::const_iterator AEnd,
std::string::const_iterator B,std::string::const_iterator BEnd);
inline APT_PURE int stringcmp(std::string::const_iterator A,std::string::const_iterator Aend,const char *B) {return stringcmp(A,Aend,B,B+strlen(B));}
inline APT_PURE int stringcasecmp(std::string::const_iterator A,std::string::const_iterator Aend,const char *B) {return stringcasecmp(A,Aend,B,B+strlen(B));}
#endif
APT_MKSTRCMP2(stringcmp,stringcmp)
APT_MKSTRCMP2(stringcasecmp,stringcasecmp)
// Return the length of a NULL-terminated string array
APT_PUBLIC size_t APT_PURE strv_length(const char **str_array);
inline const char *DeNull(const char *s) {return (s == 0?"(null)":s);}
class APT_PUBLIC URI
{
void CopyFrom(const std::string &From);
public:
std::string Access;
std::string User;
std::string Password;
std::string Host;
std::string Path;
unsigned int Port;
operator std::string();
inline void operator =(const std::string &From) {CopyFrom(From);}
inline bool empty() {return Access.empty();};
static std::string SiteOnly(const std::string &URI);
static std::string ArchiveOnly(const std::string &URI);
static std::string NoUserPassword(const std::string &URI);
explicit URI(std::string Path) { CopyFrom(Path); }
URI() : Port(0) {}
};
struct SubstVar
{
const char *Subst;
const std::string *Contents;
};
APT_PUBLIC std::string SubstVar(std::string Str,const struct SubstVar *Vars);
APT_PUBLIC std::string SubstVar(const std::string &Str,const std::string &Subst,const std::string &Contents);
struct RxChoiceList
{
void *UserData;
const char *Str;
bool Hit;
};
APT_PUBLIC unsigned long RegexChoice(RxChoiceList *Rxs,const char **ListBegin,
const char **ListEnd);
#endif

64
apt-pkg/contrib/weakptr.h Normal file
View File

@ -0,0 +1,64 @@
/* weakptr.h - An object which supports weak pointers.
*
* Copyright (C) 2010 Julian Andres Klode <jak@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#ifndef WEAK_POINTER_H
#define WEAK_POINTER_H
#include <set>
#include <stddef.h>
/**
* Class for objects providing support for weak pointers.
*
* This class allows for the registration of certain pointers as weak,
* which will cause them to be set to NULL when the destructor of the
* object is called.
*/
class WeakPointable {
private:
std::set<WeakPointable**> pointers;
public:
/**
* Add a new weak pointer.
*/
inline void AddWeakPointer(WeakPointable** weakptr) {
pointers.insert(weakptr);
}
/**
* Remove the weak pointer from the list of weak pointers.
*/
inline void RemoveWeakPointer(WeakPointable **weakptr) {
pointers.erase(weakptr);
}
/**
* Deconstruct the object, set all weak pointers to NULL.
*/
~WeakPointable() {
std::set<WeakPointable**>::iterator iter = pointers.begin();
while (iter != pointers.end())
**(iter++) = NULL;
}
};
#endif // WEAK_POINTER_H

250
apt-pkg/deb/debfile.cc Normal file
View File

@ -0,0 +1,250 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Debian Archive File (.deb)
.DEB archives are AR files containing two tars and an empty marker
member called 'debian-binary'. The two tars contain the meta data and
the actual archive contents. Thus this class is a very simple wrapper
around ar/tar to simply extract the right tar files.
It also uses the deb package list parser to parse the control file
into the cache.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apt-pkg/aptconfiguration.h>
#include <apt-pkg/arfile.h>
#include <apt-pkg/debfile.h>
#include <apt-pkg/dirstream.h>
#include <apt-pkg/error.h>
#include <apt-pkg/extracttar.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/tagfile.h>
#include <string>
#include <vector>
#include <string.h>
#include <sys/stat.h>
#include <apti18n.h>
/*}}}*/
// DebFile::debDebFile - Constructor /*{{{*/
// ---------------------------------------------------------------------
/* Open the AR file and check for consistency */
debDebFile::debDebFile(FileFd &File) : File(File), AR(File)
{
if (_error->PendingError() == true)
return;
if (!CheckMember("debian-binary")) {
_error->Error(_("This is not a valid DEB archive, missing '%s' member"), "debian-binary");
return;
}
if (!CheckMember("control.tar") &&
!CheckMember("control.tar.gz") &&
!CheckMember("control.tar.xz") &&
!CheckMember("control.tar.zst"))
{
_error->Error(_("This is not a valid DEB archive, missing '%s' member"), "control.tar");
return;
}
if (!CheckMember("data.tar") &&
!CheckMember("data.tar.gz") &&
!CheckMember("data.tar.bz2") &&
!CheckMember("data.tar.lzma") &&
!CheckMember("data.tar.xz") &&
!CheckMember("data.tar.zst"))
{
_error->Error(_("This is not a valid DEB archive, missing '%s' member"), "data.tar");
return;
}
}
/*}}}*/
// DebFile::CheckMember - Check if a named member is in the archive /*{{{*/
// ---------------------------------------------------------------------
/* This is used to check for a correct deb and to give nicer error messages
for people playing around. */
bool debDebFile::CheckMember(const char *Name)
{
if (AR.FindMember(Name) == 0)
return false;
return true;
}
/*}}}*/
// DebFile::GotoMember - Jump to a Member /*{{{*/
// ---------------------------------------------------------------------
/* Jump in the file to the start of a named member and return the information
about that member. The caller can then read from the file up to the
returned size. Note, since this relies on the file position this is
a destructive operation, it also changes the last returned Member
structure - so don't nest them! */
const ARArchive::Member *debDebFile::GotoMember(const char *Name)
{
// Get the archive member and positition the file
const ARArchive::Member *Member = AR.FindMember(Name);
if (Member == 0)
{
return 0;
}
if (File.Seek(Member->Start) == false)
return 0;
return Member;
}
/*}}}*/
// DebFile::ExtractTarMember - Extract the contents of a tar member /*{{{*/
// ---------------------------------------------------------------------
/* Simple wrapper around tar.. */
bool debDebFile::ExtractTarMember(pkgDirStream &Stream,const char *Name)
{
// Get the archive member
const ARArchive::Member *Member = NULL;
std::string Compressor;
std::vector<APT::Configuration::Compressor> compressor = APT::Configuration::getCompressors();
for (std::vector<APT::Configuration::Compressor>::const_iterator c = compressor.begin();
c != compressor.end(); ++c)
{
Member = AR.FindMember(std::string(Name).append(c->Extension).c_str());
if (Member == NULL)
continue;
Compressor = c->Name;
break;
}
if (Member == NULL)
Member = AR.FindMember(std::string(Name).c_str());
if (Member == NULL)
{
std::string ext = std::string(Name) + ".{";
for (std::vector<APT::Configuration::Compressor>::const_iterator c = compressor.begin();
c != compressor.end(); ++c) {
if (!c->Extension.empty())
ext.append(c->Extension.substr(1));
}
ext.append("}");
return _error->Error(_("Internal error, could not locate member %s"), ext.c_str());
}
if (File.Seek(Member->Start) == false)
return false;
// Prepare Tar
ExtractTar Tar(File,Member->Size,Compressor);
if (_error->PendingError() == true)
return false;
return Tar.Go(Stream);
}
/*}}}*/
// DebFile::ExtractArchive - Extract the archive data itself /*{{{*/
// ---------------------------------------------------------------------
/* Simple wrapper around DebFile::ExtractTarMember. */
bool debDebFile::ExtractArchive(pkgDirStream &Stream)
{
return ExtractTarMember(Stream, "data.tar");
}
/*}}}*/
// DebFile::ControlExtract::DoItem - Control Tar Extraction /*{{{*/
// ---------------------------------------------------------------------
/* This directory stream handler for the control tar handles extracting
it into the temporary meta directory. It only extracts files, it does
not create directories, links or anything else. */
bool debDebFile::ControlExtract::DoItem(Item &Itm,int &Fd)
{
if (Itm.Type != Item::File)
return true;
/* Cleanse the file name, prevent people from trying to unpack into
absolute paths, .., etc */
for (char *I = Itm.Name; *I != 0; I++)
if (*I == '/')
*I = '_';
/* Force the ownership to be root and ensure correct permissions,
go-w, the rest are left untouched */
Itm.UID = 0;
Itm.GID = 0;
Itm.Mode &= ~(S_IWGRP | S_IWOTH);
return pkgDirStream::DoItem(Itm,Fd);
}
/*}}}*/
// MemControlExtract::DoItem - Check if it is the control file /*{{{*/
// ---------------------------------------------------------------------
/* This sets up to extract the control block member file into a memory
block of just the right size. All other files go into the bit bucket. */
bool debDebFile::MemControlExtract::DoItem(Item &Itm,int &Fd)
{
// At the control file, allocate buffer memory.
if (Member == Itm.Name)
{
delete [] Control;
Control = new char[Itm.Size+2];
IsControl = true;
Fd = -2; // Signal to pass to Process
Length = Itm.Size;
}
else
IsControl = false;
return true;
}
/*}}}*/
// MemControlExtract::Process - Process extracting the control file /*{{{*/
// ---------------------------------------------------------------------
/* Just memcopy the block from the tar extractor and put it in the right
place in the pre-allocated memory block. */
bool debDebFile::MemControlExtract::Process(Item &/*Itm*/,const unsigned char *Data,
unsigned long long Size,unsigned long long Pos)
{
memcpy(Control + Pos, Data,Size);
return true;
}
/*}}}*/
// MemControlExtract::Read - Read the control information from the deb /*{{{*/
// ---------------------------------------------------------------------
/* This uses the internal tar extractor to fetch the control file, and then
it parses it into a tag section parser. */
bool debDebFile::MemControlExtract::Read(debDebFile &Deb)
{
if (Deb.ExtractTarMember(*this, "control.tar") == false)
return false;
if (Control == 0)
return true;
Control[Length] = '\n';
Control[Length+1] = '\n';
if (Section.Scan(Control,Length+2) == false)
return _error->Error(_("Unparsable control file"));
return true;
}
/*}}}*/
// MemControlExtract::TakeControl - Parse a memory block /*{{{*/
// ---------------------------------------------------------------------
/* The given memory block is loaded into the parser and parsed as a control
record. */
bool debDebFile::MemControlExtract::TakeControl(const void *Data,unsigned long long Size)
{
delete [] Control;
Control = new char[Size+2];
Length = Size;
memcpy(Control,Data,Size);
Control[Length] = '\n';
Control[Length+1] = '\n';
return Section.Scan(Control,Length+2);
}
/*}}}*/

89
apt-pkg/deb/debfile.h Normal file
View File

@ -0,0 +1,89 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Debian Archive File (.deb)
This Class handles all the operations performed directly on .deb
files. It makes use of the AR and TAR classes to give the necessary
external interface.
There are only two things that can be done with a raw package,
extract it's control information and extract the contents itself.
This should probably subclass an as-yet unwritten super class to
produce a generic archive mechanism.
The memory control file extractor is useful to extract a single file
into memory from the control.tar.gz
##################################################################### */
/*}}}*/
#ifndef PKGLIB_DEBFILE_H
#define PKGLIB_DEBFILE_H
#include <apt-pkg/arfile.h>
#include <apt-pkg/dirstream.h>
#include <apt-pkg/macros.h>
#include <apt-pkg/tagfile.h>
#include <string>
class FileFd;
class APT_PUBLIC debDebFile
{
protected:
FileFd &File;
ARArchive AR;
bool CheckMember(const char *Name);
public:
class ControlExtract;
class MemControlExtract;
bool ExtractTarMember(pkgDirStream &Stream, const char *Name);
bool ExtractArchive(pkgDirStream &Stream);
const ARArchive::Member *GotoMember(const char *Name);
inline FileFd &GetFile() {return File;};
explicit debDebFile(FileFd &File);
};
class APT_PUBLIC debDebFile::ControlExtract : public pkgDirStream
{
public:
virtual bool DoItem(Item &Itm,int &Fd) APT_OVERRIDE;
};
class APT_PUBLIC debDebFile::MemControlExtract : public pkgDirStream
{
bool IsControl;
public:
char *Control;
pkgTagSection Section;
unsigned long Length;
std::string Member;
// Members from DirStream
virtual bool DoItem(Item &Itm,int &Fd) APT_OVERRIDE;
virtual bool Process(Item &Itm,const unsigned char *Data,
unsigned long long Size,unsigned long long Pos) APT_OVERRIDE;
// Helpers
bool Read(debDebFile &Deb);
bool TakeControl(const void *Data,unsigned long long Size);
MemControlExtract() : IsControl(false), Control(0), Length(0), Member("control") {};
explicit MemControlExtract(std::string Member) : IsControl(false), Control(0), Length(0), Member(Member) {};
~MemControlExtract() {delete [] Control;};
};
/*}}}*/
#endif

421
apt-pkg/deb/debindexfile.cc Normal file
View File

@ -0,0 +1,421 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Debian Specific sources.list types and the three sorts of Debian
index files.
##################################################################### */
/*}}}*/
// Include Files /*{{{*/
#include <config.h>
#include <apti18n.h>
#include <apt-pkg/configuration.h>
#include <apt-pkg/debfile.h>
#include <apt-pkg/debindexfile.h>
#include <apt-pkg/deblistparser.h>
#include <apt-pkg/debrecords.h>
#include <apt-pkg/debsrcrecords.h>
#include <apt-pkg/error.h>
#include <apt-pkg/fileutl.h>
#include <apt-pkg/indexfile.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/pkgrecords.h>
#include <apt-pkg/srcrecords.h>
#include <apt-pkg/strutl.h>
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include <stdio.h>
#include <sys/stat.h>
#include <unistd.h>
/*}}}*/
// Sources Index /*{{{*/
debSourcesIndex::debSourcesIndex(IndexTarget const &Target,bool const Trusted) :
pkgDebianIndexTargetFile(Target, Trusted), d(NULL)
{
}
std::string debSourcesIndex::SourceInfo(pkgSrcRecords::Parser const &Record,
pkgSrcRecords::File const &File) const
{
// The result looks like: http://foo/debian/ stable/main src 1.1.1 (dsc)
std::string Res = Target.Description;
Res.erase(Target.Description.rfind(' '));
Res += " ";
Res += Record.Package();
Res += " ";
Res += Record.Version();
if (File.Type.empty() == false)
Res += " (" + File.Type + ")";
return Res;
}
pkgSrcRecords::Parser *debSourcesIndex::CreateSrcParser() const
{
std::string const SourcesURI = IndexFileName();
if (FileExists(SourcesURI))
return new debSrcRecordParser(SourcesURI, this);
return NULL;
}
bool debSourcesIndex::OpenListFile(FileFd &, std::string const &)
{
return true;
}
pkgCacheListParser * debSourcesIndex::CreateListParser(FileFd &)
{
return nullptr;
}
uint8_t debSourcesIndex::GetIndexFlags() const
{
return 0;
}
/*}}}*/
// Packages Index /*{{{*/
debPackagesIndex::debPackagesIndex(IndexTarget const &Target, bool const Trusted) :
pkgDebianIndexTargetFile(Target, Trusted), d(NULL)
{
}
std::string debPackagesIndex::ArchiveInfo(pkgCache::VerIterator const &Ver) const
{
std::string Res = Target.Description;
{
auto const space = Target.Description.rfind(' ');
if (space != std::string::npos)
Res.erase(space);
}
Res += " ";
Res += Ver.ParentPkg().Name();
Res += " ";
std::string const Dist = Target.Option(IndexTarget::RELEASE);
if (Dist.empty() == false && Dist[Dist.size() - 1] != '/')
Res.append(Ver.Arch()).append(" ");
Res += Ver.VerStr();
return Res;
}
uint8_t debPackagesIndex::GetIndexFlags() const
{
return 0;
}
/*}}}*/
// Translation-* Index /*{{{*/
debTranslationsIndex::debTranslationsIndex(IndexTarget const &Target) :
pkgDebianIndexTargetFile(Target, true), d(NULL)
{}
bool debTranslationsIndex::HasPackages() const
{
return Exists();
}
bool debTranslationsIndex::OpenListFile(FileFd &Pkg, std::string const &FileName)
{
if (FileExists(FileName))
return pkgDebianIndexTargetFile::OpenListFile(Pkg, FileName);
return true;
}
uint8_t debTranslationsIndex::GetIndexFlags() const
{
return pkgCache::Flag::NotSource | pkgCache::Flag::NoPackages;
}
std::string debTranslationsIndex::GetArchitecture() const
{
return std::string();
}
pkgCacheListParser * debTranslationsIndex::CreateListParser(FileFd &Pkg)
{
if (Pkg.IsOpen() == false)
return nullptr;
_error->PushToStack();
std::unique_ptr<pkgCacheListParser> Parser(new debTranslationsParser(&Pkg));
bool const newError = _error->PendingError();
_error->MergeWithStack();
return newError ? nullptr : Parser.release();
}
/*}}}*/
// dpkg/status Index /*{{{*/
debStatusIndex::debStatusIndex(std::string const &File) : pkgDebianIndexRealFile(File, true), d(NULL)
{
}
std::string debStatusIndex::GetArchitecture() const
{
return std::string();
}
std::string debStatusIndex::GetComponent() const
{
return "now";
}
uint8_t debStatusIndex::GetIndexFlags() const
{
return pkgCache::Flag::NotSource;
}
pkgCacheListParser * debStatusIndex::CreateListParser(FileFd &Pkg)
{
if (Pkg.IsOpen() == false)
return nullptr;
_error->PushToStack();
std::unique_ptr<pkgCacheListParser> Parser(new debStatusListParser(&Pkg));
bool const newError = _error->PendingError();
_error->MergeWithStack();
return newError ? nullptr : Parser.release();
}
/*}}}*/
// DebPkgFile Index - a single .deb file as an index /*{{{*/
debDebPkgFileIndex::debDebPkgFileIndex(std::string const &DebFile)
: pkgDebianIndexRealFile(DebFile, true), d(NULL), DebFile(DebFile)
{
}
bool debDebPkgFileIndex::GetContent(std::ostream &content, std::string const &debfile)
{
struct stat Buf;
if (stat(debfile.c_str(), &Buf) != 0)
return false;
FileFd debFd(debfile, FileFd::ReadOnly);
debDebFile deb(debFd);
debDebFile::MemControlExtract extractor("control");
if (not extractor.Read(deb))
return _error->Error(_("Could not read meta data from %s"), debfile.c_str());
// trim off newlines
while (extractor.Control[extractor.Length] == '\n')
extractor.Control[extractor.Length--] = '\0';
const char *Control = extractor.Control;
while (isspace_ascii(Control[0]))
Control++;
content << Control << '\n';
content << "Filename: " << debfile << "\n";
content << "Size: " << std::to_string(Buf.st_size) << "\n";
return true;
}
bool debDebPkgFileIndex::OpenListFile(FileFd &Pkg, std::string const &FileName)
{
// write the control data to a tempfile
if (GetTempFile("deb-file-" + flNotDir(FileName), true, &Pkg) == NULL)
return false;
std::ostringstream content;
if (GetContent(content, FileName) == false)
return false;
std::string const contentstr = content.str();
if (contentstr.empty())
return true;
if (Pkg.Write(contentstr.c_str(), contentstr.length()) == false || Pkg.Seek(0) == false)
return false;
return true;
}
pkgCacheListParser * debDebPkgFileIndex::CreateListParser(FileFd &Pkg)
{
if (Pkg.IsOpen() == false)
return nullptr;
_error->PushToStack();
std::unique_ptr<pkgCacheListParser> Parser(new debDebFileParser(&Pkg, DebFile));
bool const newError = _error->PendingError();
_error->MergeWithStack();
return newError ? nullptr : Parser.release();
}
uint8_t debDebPkgFileIndex::GetIndexFlags() const
{
return pkgCache::Flag::LocalSource;
}
std::string debDebPkgFileIndex::GetArchitecture() const
{
return std::string();
}
std::string debDebPkgFileIndex::GetComponent() const
{
return "local-deb";
}
pkgCache::PkgFileIterator debDebPkgFileIndex::FindInCache(pkgCache &Cache) const
{
std::string const FileName = IndexFileName();
pkgCache::PkgFileIterator File = Cache.FileBegin();
for (; File.end() == false; ++File)
{
if (File.FileName() == NULL || FileName != File.FileName())
continue;
// we can't do size checks here as file size != content size
return File;
}
return File;
}
std::string debDebPkgFileIndex::ArchiveInfo(pkgCache::VerIterator const &Ver) const
{
std::string Res = IndexFileName() + " ";
Res.append(Ver.ParentPkg().Name()).append(" ");
Res.append(Ver.Arch()).append(" ");
Res.append(Ver.VerStr());
return Res;
}
/*}}}*/
// DscFile Index - a single .dsc file as an index /*{{{*/
debDscFileIndex::debDscFileIndex(std::string const &DscFile)
: pkgDebianIndexRealFile(DscFile, true), d(NULL)
{
}
pkgSrcRecords::Parser *debDscFileIndex::CreateSrcParser() const
{
if (Exists() == false)
return NULL;
return new debDscRecordParser(File, this);
}
std::string debDscFileIndex::GetComponent() const
{
return "local-dsc";
}
std::string debDscFileIndex::GetArchitecture() const
{
return "source";
}
uint8_t debDscFileIndex::GetIndexFlags() const
{
return pkgCache::Flag::LocalSource;
}
/*}}}*/
// ControlFile Index - a directory with a debian/control file /*{{{*/
std::string debDebianSourceDirIndex::GetComponent() const
{
return "local-control";
}
/*}}}*/
// String Package Index - a string of Packages file content /*{{{*/
std::string debStringPackageIndex::GetArchitecture() const
{
return std::string();
}
std::string debStringPackageIndex::GetComponent() const
{
return "apt-tmp-index";
}
uint8_t debStringPackageIndex::GetIndexFlags() const
{
return pkgCache::Flag::NotSource;
}
const pkgIndexFile::Type *debStringPackageIndex::GetType() const
{
return pkgIndexFile::Type::GetType("Debian Package Index");
}
debStringPackageIndex::debStringPackageIndex(std::string const &content) :
pkgDebianIndexRealFile("", false), d(NULL)
{
FileFd fd;
GetTempFile("apt-tmp-index", false, &fd);
fd.Write(content.data(), content.length());
File = fd.Name();
}
debStringPackageIndex::~debStringPackageIndex()
{
RemoveFile("~debStringPackageIndex", File);
}
/*}}}*/
// Index File types for Debian /*{{{*/
class APT_HIDDEN debIFTypeSrc : public pkgIndexFile::Type
{
public:
debIFTypeSrc() {Label = "Debian Source Index";};
};
class APT_HIDDEN debIFTypePkg : public pkgIndexFile::Type
{
public:
virtual pkgRecords::Parser *CreatePkgParser(pkgCache::PkgFileIterator const &File) const APT_OVERRIDE
{
return new debRecordParser(File.FileName(),*File.Cache());
};
debIFTypePkg() {Label = "Debian Package Index";};
};
class APT_HIDDEN debIFTypeTrans : public debIFTypePkg
{
public:
debIFTypeTrans() {Label = "Debian Translation Index";};
};
class APT_HIDDEN debIFTypeStatus : public pkgIndexFile::Type
{
public:
virtual pkgRecords::Parser *CreatePkgParser(pkgCache::PkgFileIterator const &File) const APT_OVERRIDE
{
return new debRecordParser(File.FileName(),*File.Cache());
};
debIFTypeStatus() {Label = "Debian dpkg status file";};
};
class APT_HIDDEN debIFTypeDebPkgFile : public pkgIndexFile::Type
{
public:
virtual pkgRecords::Parser *CreatePkgParser(pkgCache::PkgFileIterator const &File) const APT_OVERRIDE
{
return new debDebFileRecordParser(File.FileName());
};
debIFTypeDebPkgFile() {Label = "Debian deb file";};
};
class APT_HIDDEN debIFTypeDscFile : public pkgIndexFile::Type
{
public:
virtual pkgSrcRecords::Parser *CreateSrcPkgParser(std::string const &DscFile) const APT_OVERRIDE
{
return new debDscRecordParser(DscFile, NULL);
};
debIFTypeDscFile() {Label = "Debian dsc file";};
};
class APT_HIDDEN debIFTypeDebianSourceDir : public pkgIndexFile::Type
{
public:
virtual pkgSrcRecords::Parser *CreateSrcPkgParser(std::string const &SourceDir) const APT_OVERRIDE
{
return new debDscRecordParser(SourceDir + std::string("/debian/control"), NULL);
};
debIFTypeDebianSourceDir() {Label = "Debian control file";};
};
APT_HIDDEN debIFTypeSrc _apt_Src;
APT_HIDDEN debIFTypePkg _apt_Pkg;
APT_HIDDEN debIFTypeTrans _apt_Trans;
APT_HIDDEN debIFTypeStatus _apt_Status;
APT_HIDDEN debIFTypeDebPkgFile _apt_DebPkgFile;
// file based pseudo indexes
APT_HIDDEN debIFTypeDscFile _apt_DscFile;
APT_HIDDEN debIFTypeDebianSourceDir _apt_DebianSourceDir;
const pkgIndexFile::Type *debSourcesIndex::GetType() const
{
return &_apt_Src;
}
const pkgIndexFile::Type *debPackagesIndex::GetType() const
{
return &_apt_Pkg;
}
const pkgIndexFile::Type *debTranslationsIndex::GetType() const
{
return &_apt_Trans;
}
const pkgIndexFile::Type *debStatusIndex::GetType() const
{
return &_apt_Status;
}
const pkgIndexFile::Type *debDebPkgFileIndex::GetType() const
{
return &_apt_DebPkgFile;
}
const pkgIndexFile::Type *debDscFileIndex::GetType() const
{
return &_apt_DscFile;
}
const pkgIndexFile::Type *debDebianSourceDirIndex::GetType() const
{
return &_apt_DebianSourceDir;
}
/*}}}*/
debStatusIndex::~debStatusIndex() {}
debPackagesIndex::~debPackagesIndex() {}
debTranslationsIndex::~debTranslationsIndex() {}
debSourcesIndex::~debSourcesIndex() {}
debDebPkgFileIndex::~debDebPkgFileIndex() {}
debDscFileIndex::~debDscFileIndex() {}

196
apt-pkg/deb/debindexfile.h Normal file
View File

@ -0,0 +1,196 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Debian Index Files
There are three sorts currently
Package files that have File: tags
Package files that don't (/var/lib/dpkg/status)
Source files
##################################################################### */
/*}}}*/
#ifndef PKGLIB_DEBINDEXFILE_H
#define PKGLIB_DEBINDEXFILE_H
#include <apt-pkg/indexfile.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/srcrecords.h>
#include <string>
class OpProgress;
class pkgAcquire;
class pkgCacheGenerator;
class debStatusIndex : public pkgDebianIndexRealFile
{
void * const d;
protected:
virtual std::string GetArchitecture() const APT_OVERRIDE;
virtual std::string GetComponent() const APT_OVERRIDE;
virtual uint8_t GetIndexFlags() const APT_OVERRIDE;
public:
virtual const Type *GetType() const APT_OVERRIDE APT_PURE;
// Interface for the Cache Generator
virtual bool HasPackages() const APT_OVERRIDE {return true;};
// Abort if the file does not exist.
virtual bool Exists() const APT_OVERRIDE {return true;};
virtual pkgCacheListParser * CreateListParser(FileFd &Pkg) APT_OVERRIDE;
explicit debStatusIndex(std::string const &File);
virtual ~debStatusIndex();
};
class debPackagesIndex : public pkgDebianIndexTargetFile
{
void * const d;
protected:
virtual uint8_t GetIndexFlags() const APT_OVERRIDE;
public:
virtual const Type *GetType() const APT_OVERRIDE APT_PURE;
// Stuff for accessing files on remote items
virtual std::string ArchiveInfo(pkgCache::VerIterator const &Ver) const APT_OVERRIDE;
// Interface for the Cache Generator
virtual bool HasPackages() const APT_OVERRIDE {return true;};
debPackagesIndex(IndexTarget const &Target, bool const Trusted);
virtual ~debPackagesIndex();
};
class debTranslationsIndex : public pkgDebianIndexTargetFile
{
void * const d;
protected:
virtual std::string GetArchitecture() const APT_OVERRIDE;
virtual uint8_t GetIndexFlags() const APT_OVERRIDE;
virtual bool OpenListFile(FileFd &Pkg, std::string const &FileName) APT_OVERRIDE;
APT_HIDDEN virtual pkgCacheListParser * CreateListParser(FileFd &Pkg) APT_OVERRIDE;
public:
virtual const Type *GetType() const APT_OVERRIDE APT_PURE;
// Interface for the Cache Generator
virtual bool HasPackages() const APT_OVERRIDE;
explicit debTranslationsIndex(IndexTarget const &Target);
virtual ~debTranslationsIndex();
};
class debSourcesIndex : public pkgDebianIndexTargetFile
{
void * const d;
virtual uint8_t GetIndexFlags() const APT_OVERRIDE;
virtual bool OpenListFile(FileFd &Pkg, std::string const &FileName) APT_OVERRIDE;
APT_HIDDEN virtual pkgCacheListParser * CreateListParser(FileFd &Pkg) APT_OVERRIDE;
public:
virtual const Type *GetType() const APT_OVERRIDE APT_PURE;
// Stuff for accessing files on remote items
virtual std::string SourceInfo(pkgSrcRecords::Parser const &Record,
pkgSrcRecords::File const &File) const APT_OVERRIDE;
// Interface for the record parsers
virtual pkgSrcRecords::Parser *CreateSrcParser() const APT_OVERRIDE;
// Interface for the Cache Generator
virtual bool HasPackages() const APT_OVERRIDE {return false;};
debSourcesIndex(IndexTarget const &Target, bool const Trusted);
virtual ~debSourcesIndex();
};
class debDebPkgFileIndex : public pkgDebianIndexRealFile
{
void * const d;
std::string DebFile;
protected:
virtual std::string GetComponent() const APT_OVERRIDE;
virtual std::string GetArchitecture() const APT_OVERRIDE;
virtual uint8_t GetIndexFlags() const APT_OVERRIDE;
virtual bool OpenListFile(FileFd &Pkg, std::string const &FileName) APT_OVERRIDE;
APT_HIDDEN virtual pkgCacheListParser * CreateListParser(FileFd &Pkg) APT_OVERRIDE;
public:
virtual const Type *GetType() const APT_OVERRIDE APT_PURE;
/** get the control (file) content of the deb file
*
* @param[out] content of the control file
* @param debfile is the filename of the .deb-file
* @return \b true if successful, otherwise \b false.
*/
static bool GetContent(std::ostream &content, std::string const &debfile);
// Interface for the Cache Generator
virtual bool HasPackages() const APT_OVERRIDE {return true;}
virtual pkgCache::PkgFileIterator FindInCache(pkgCache &Cache) const APT_OVERRIDE;
// Interface for acquire
explicit debDebPkgFileIndex(std::string const &DebFile);
virtual ~debDebPkgFileIndex();
std::string ArchiveInfo(pkgCache::VerIterator const &Ver) const override;
};
class APT_PUBLIC debDscFileIndex : public pkgDebianIndexRealFile
{
void * const d;
protected:
virtual std::string GetComponent() const APT_OVERRIDE;
virtual std::string GetArchitecture() const APT_OVERRIDE;
virtual uint8_t GetIndexFlags() const APT_OVERRIDE;
public:
virtual const Type *GetType() const APT_OVERRIDE APT_PURE;
virtual pkgSrcRecords::Parser *CreateSrcParser() const APT_OVERRIDE;
virtual bool HasPackages() const APT_OVERRIDE {return false;};
explicit debDscFileIndex(std::string const &DscFile);
virtual ~debDscFileIndex();
};
class debDebianSourceDirIndex : public debDscFileIndex
{
protected:
virtual std::string GetComponent() const APT_OVERRIDE;
public:
virtual const Type *GetType() const APT_OVERRIDE APT_PURE;
};
class APT_PUBLIC debStringPackageIndex : public pkgDebianIndexRealFile
{
void * const d;
protected:
virtual std::string GetArchitecture() const APT_OVERRIDE;
virtual std::string GetComponent() const APT_OVERRIDE;
virtual uint8_t GetIndexFlags() const APT_OVERRIDE;
public:
virtual const Type *GetType() const APT_OVERRIDE APT_PURE;
// Interface for the Cache Generator
virtual bool HasPackages() const APT_OVERRIDE {return true;};
// Abort if the file does not exist.
virtual bool Exists() const APT_OVERRIDE {return true;};
explicit debStringPackageIndex(std::string const &content);
virtual ~debStringPackageIndex();
};
#endif

1029
apt-pkg/deb/deblistparser.cc Normal file

File diff suppressed because it is too large Load Diff

124
apt-pkg/deb/deblistparser.h Normal file
View File

@ -0,0 +1,124 @@
// -*- mode: cpp; mode: fold -*-
// Description /*{{{*/
/* ######################################################################
Debian Package List Parser - This implements the abstract parser
interface for Debian package files
##################################################################### */
/*}}}*/
#ifndef PKGLIB_DEBLISTPARSER_H
#define PKGLIB_DEBLISTPARSER_H
#include <apt-pkg/macros.h>
#include <apt-pkg/pkgcache.h>
#include <apt-pkg/pkgcachegen.h>
#include <apt-pkg/tagfile.h>
#include <string>
#include <vector>
#include <apt-pkg/string_view.h>
class FileFd;
class APT_HIDDEN debListParser : public pkgCacheListParser
{
public:
// Parser Helper
struct WordList
{
APT::StringView Str;
unsigned char Val;
};
private:
std::vector<std::string> forceEssential;
std::vector<std::string> forceImportant;
std::string MD5Buffer;
std::string myArch;
protected:
pkgTagFile Tags;
pkgTagSection Section;
map_filesize_t iOffset;
virtual bool ParseStatus(pkgCache::PkgIterator &Pkg,pkgCache::VerIterator &Ver);
bool ParseDepends(pkgCache::VerIterator &Ver, pkgTagSection::Key Key,
unsigned int Type);
bool ParseProvides(pkgCache::VerIterator &Ver);
APT_HIDDEN static bool GrabWord(APT::StringView Word,const WordList *List,unsigned char &Out);
APT_HIDDEN unsigned char ParseMultiArch(bool const showErrors);
public:
APT_PUBLIC static unsigned char GetPrio(std::string Str);
// These all operate against the current section
virtual std::string Package() APT_OVERRIDE;
virtual bool ArchitectureAll() APT_OVERRIDE;
virtual APT::StringView Architecture() APT_OVERRIDE;
virtual APT::StringView Version() APT_OVERRIDE;
virtual bool NewVersion(pkgCache::VerIterator &Ver) APT_OVERRIDE;
virtual std::vector<std::string> AvailableDescriptionLanguages() APT_OVERRIDE;
virtual APT::StringView Description_md5() APT_OVERRIDE;
virtual uint32_t VersionHash() APT_OVERRIDE;
virtual bool SameVersion(uint32_t Hash, pkgCache::VerIterator const &Ver) APT_OVERRIDE;
virtual bool UsePackage(pkgCache::PkgIterator &Pkg,
pkgCache::VerIterator &Ver) APT_OVERRIDE;
virtual map_filesize_t Offset() APT_OVERRIDE {return iOffset;};
virtual map_filesize_t Size() APT_OVERRIDE {return Section.size();};
virtual bool Step() APT_OVERRIDE;
APT_PUBLIC static const char *ParseDepends(const char *Start, const char *Stop,
std::string &Package, std::string &Ver, unsigned int &Op,
bool const &ParseArchFlags = false, bool const &StripMultiArch = true,
bool const &ParseRestrictionsList = false,
std::string const &Arch = "");
APT_PUBLIC static const char *ParseDepends(const char *Start, const char *Stop,
APT::StringView &Package,
APT::StringView &Ver, unsigned int &Op,
bool const ParseArchFlags = false, bool StripMultiArch = true,
bool const ParseRestrictionsList = false,
std::string Arch = "");
APT_PUBLIC static const char *ConvertRelation(const char *I,unsigned int &Op);
explicit debListParser(FileFd *File);
virtual ~debListParser();
};
class APT_HIDDEN debDebFileParser : public debListParser
{
private:
std::string DebFile;
public:
debDebFileParser(FileFd *File, std::string const &DebFile);
virtual bool UsePackage(pkgCache::PkgIterator &Pkg,
pkgCache::VerIterator &Ver) APT_OVERRIDE;
};
class APT_HIDDEN debTranslationsParser : public debListParser
{
public:
// a translation can never be a real package
virtual APT::StringView Architecture() APT_OVERRIDE { return ""; }
virtual APT::StringView Version() APT_OVERRIDE { return ""; }
explicit debTranslationsParser(FileFd *File)
: debListParser(File) {};
};
class APT_HIDDEN debStatusListParser : public debListParser
{
public:
virtual bool ParseStatus(pkgCache::PkgIterator &Pkg,pkgCache::VerIterator &Ver) APT_OVERRIDE;
explicit debStatusListParser(FileFd *File)
: debListParser(File) {};
};
#endif

1312
apt-pkg/deb/debmetaindex.cc Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
#ifndef PKGLIB_DEBMETAINDEX_H
#define PKGLIB_DEBMETAINDEX_H
#include <apt-pkg/macros.h>
#include <apt-pkg/metaindex.h>
#include <map>
#include <string>
#include <vector>
class pkgAcquire;
class pkgIndexFile;
class IndexTarget;
class pkgCacheGenerator;
class OpProgress;
class debReleaseIndexPrivate;
class APT_HIDDEN debReleaseIndex : public metaIndex
{
debReleaseIndexPrivate * const d;
APT_HIDDEN bool parseSumData(const char *&Start, const char *End, std::string &Name,
std::string &Hash, unsigned long long &Size);
public:
APT_HIDDEN std::string MetaIndexInfo(const char *Type) const;
APT_HIDDEN std::string MetaIndexFile(const char *Types) const;
APT_HIDDEN std::string MetaIndexURI(const char *Type) const;
debReleaseIndex(std::string const &URI, std::string const &Dist, std::map<std::string,std::string> const &Options);
debReleaseIndex(std::string const &URI, std::string const &Dist, bool const Trusted, std::map<std::string,std::string> const &Options);
virtual ~debReleaseIndex();
virtual std::string ArchiveURI(std::string const &File) const APT_OVERRIDE {return URI + File;};
virtual bool GetIndexes(pkgAcquire *Owner, bool const &GetAll=false) APT_OVERRIDE;
virtual std::vector<IndexTarget> GetIndexTargets() const APT_OVERRIDE;
virtual std::string Describe() const APT_OVERRIDE;
virtual pkgCache::RlsFileIterator FindInCache(pkgCache &Cache, bool const ModifyCheck) const APT_OVERRIDE;
virtual bool Merge(pkgCacheGenerator &Gen,OpProgress *Prog) const APT_OVERRIDE;
virtual bool Load(std::string const &Filename, std::string * const ErrorText) APT_OVERRIDE;
virtual metaIndex * UnloadedClone() const APT_OVERRIDE;
virtual std::vector <pkgIndexFile *> *GetIndexFiles() APT_OVERRIDE;
bool SetTrusted(TriState const Trusted);
bool SetCheckValidUntil(TriState const Trusted);
bool SetValidUntilMin(time_t const Valid);
bool SetValidUntilMax(time_t const Valid);
bool SetCheckDate(TriState const CheckDate);
bool SetDateMaxFuture(time_t const DateMaxFuture);
bool SetSignedBy(std::string const &SignedBy);
std::map<std::string, std::string> GetReleaseOptions();
virtual bool IsTrusted() const APT_OVERRIDE;
bool IsArchitectureSupported(std::string const &arch) const override;
bool IsArchitectureAllSupportedFor(IndexTarget const &target) const override;
bool HasSupportForComponent(std::string const &component) const override;
APT_PURE time_t GetNotBefore() const override;
void AddComponent(std::string const &sourcesEntry,
bool const isSrc, std::string const &Name,
std::vector<std::string> const &Targets,
std::vector<std::string> const &Architectures,
std::vector<std::string> Languages,
bool const usePDiffs, std::string const &useByHash);
};
#endif

Some files were not shown because too many files have changed in this diff Show More